diff --git a/.clang-tidy b/.clang-tidy index bc854d57f8a..2ca1402ddf1 100644 --- a/.clang-tidy +++ b/.clang-tidy @@ -54,8 +54,6 @@ Checks: '*, -cppcoreguidelines-slicing, -cppcoreguidelines-special-member-functions, - -concurrency-mt-unsafe, - -darwin-*, -fuchsia-*, diff --git a/.github/workflows/tags_stable.yml b/.github/workflows/tags_stable.yml index 01b75039963..9711f7688cb 100644 --- a/.github/workflows/tags_stable.yml +++ b/.github/workflows/tags_stable.yml @@ -13,13 +13,24 @@ on: # yamllint disable-line rule:truthy - 'v*-prestable' - 'v*-stable' - 'v*-lts' + workflow_dispatch: + inputs: + tag: + description: 'Test tag' + required: true + type: string jobs: UpdateVersions: runs-on: [self-hosted, style-checker] steps: + - name: Set test tag + if: github.event_name == 'workflow_dispatch' + run: | + echo "GITHUB_TAG=${{ github.event.inputs.tag }}" >> "$GITHUB_ENV" - name: Get tag name + if: github.event_name != 'workflow_dispatch' run: | echo "GITHUB_TAG=${GITHUB_REF#refs/tags/}" >> "$GITHUB_ENV" - name: Check out repository code @@ -44,11 +55,13 @@ jobs: uses: peter-evans/create-pull-request@v3 with: author: "robot-clickhouse " + token: ${{ secrets.ROBOT_CLICKHOUSE_COMMIT_TOKEN }} committer: "robot-clickhouse " commit-message: Update version_date.tsv and changelogs after ${{ env.GITHUB_TAG }} branch: auto/${{ env.GITHUB_TAG }} delete-branch: true title: Update version_date.tsv and changelogs after ${{ env.GITHUB_TAG }} + labels: do not test body: | Update version_date.tsv and changelogs after ${{ env.GITHUB_TAG }} diff --git a/CHANGELOG.md b/CHANGELOG.md index d7e2684380c..3198c15b15c 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,4 +1,5 @@ ### Table of Contents +**[ClickHouse release v22.8, 2022-08-18](#228)**
**[ClickHouse release v22.7, 2022-07-21](#227)**
**[ClickHouse release v22.6, 2022-06-16](#226)**
**[ClickHouse release v22.5, 2022-05-19](#225)**
@@ -8,6 +9,148 @@ **[ClickHouse release v22.1, 2022-01-18](#221)**
**[Changelog for 2021](https://clickhouse.com/docs/en/whats-new/changelog/2021/)**
+ +### ClickHouse release 22.8, 2022-08-18 + +#### Backward Incompatible Change +* Extended range of `Date32` and `DateTime64` to support dates from the year 1900 to 2299. In previous versions, the supported interval was only from the year 1925 to 2283. The implementation is using the proleptic Gregorian calendar (which is conformant with [ISO 8601](https://en.wikipedia.org/wiki/ISO_8601):2004 (clause 3.2.1 The Gregorian calendar)) instead of accounting for historical transitions from the Julian to the Gregorian calendar. This change affects implementation-specific behavior for out-of-range arguments. E.g. if in previous versions the value of `1899-01-01` was clamped to `1925-01-01`, in the new version it will be clamped to `1900-01-01`. It changes the behavior of rounding with `toStartOfInterval` if you pass `INTERVAL 3 QUARTER` up to one quarter because the intervals are counted from an implementation-specific point of time. Closes [#28216](https://github.com/ClickHouse/ClickHouse/issues/28216), improves [#38393](https://github.com/ClickHouse/ClickHouse/issues/38393). [#39425](https://github.com/ClickHouse/ClickHouse/pull/39425) ([Roman Vasin](https://github.com/rvasin)). +* Now, all relevant dictionary sources respect `remote_url_allow_hosts` setting. It was already done for HTTP, Cassandra, Redis. Added ClickHouse, MongoDB, MySQL, PostgreSQL. Host is checked only for dictionaries created from DDL. [#39184](https://github.com/ClickHouse/ClickHouse/pull/39184) ([Nikolai Kochetov](https://github.com/KochetovNicolai)). +* Make the remote filesystem cache composable, allow not to evict certain files (regarding idx, mrk, ..), delete old cache version. Now it is possible to configure cache over Azure blob storage disk, over Local disk, over StaticWeb disk, etc. This PR is marked backward incompatible because cache configuration changes and in order for cache to work need to update the config file. Old cache will still be used with new configuration. The server will startup fine with the old cache configuration. Closes https://github.com/ClickHouse/ClickHouse/issues/36140. Closes https://github.com/ClickHouse/ClickHouse/issues/37889. ([Kseniia Sumarokova](https://github.com/kssenii)). [#36171](https://github.com/ClickHouse/ClickHouse/pull/36171)) + +#### New Feature +* Query parameters can be set in interactive mode as `SET param_abc = 'def'` and transferred via the native protocol as settings. [#39906](https://github.com/ClickHouse/ClickHouse/pull/39906) ([Nikita Taranov](https://github.com/nickitat)). +* Quota key can be set in the native protocol ([Yakov Olkhovsky](https://github.com/ClickHouse/ClickHouse/pull/39874)). +* Added a setting `exact_rows_before_limit` (0/1). When enabled, ClickHouse will provide exact value for `rows_before_limit_at_least` statistic, but with the cost that the data before limit will have to be read completely. This closes [#6613](https://github.com/ClickHouse/ClickHouse/issues/6613). [#25333](https://github.com/ClickHouse/ClickHouse/pull/25333) ([kevin wan](https://github.com/MaxWk)). +* Added support for parallel distributed insert select with `s3Cluster` table function into tables with `Distributed` and `Replicated` engine [#34670](https://github.com/ClickHouse/ClickHouse/issues/34670). [#39107](https://github.com/ClickHouse/ClickHouse/pull/39107) ([Nikita Mikhaylov](https://github.com/nikitamikhaylov)). +* Add new settings to control schema inference from text formats: - `input_format_try_infer_dates` - try infer dates from strings. - `input_format_try_infer_datetimes` - try infer datetimes from strings. - `input_format_try_infer_integers` - try infer `Int64` instead of `Float64`. - `input_format_json_try_infer_numbers_from_strings` - try infer numbers from json strings in JSON formats. [#39186](https://github.com/ClickHouse/ClickHouse/pull/39186) ([Kruglov Pavel](https://github.com/Avogar)). +* An option to provide JSON formatted log output. The purpose is to allow easier ingestion and query in log analysis tools. [#39277](https://github.com/ClickHouse/ClickHouse/pull/39277) ([Mallik Hassan](https://github.com/SadiHassan)). +* Add function `nowInBlock` which allows getting the current time during long-running and continuous queries. Closes [#39522](https://github.com/ClickHouse/ClickHouse/issues/39522). Notes: there are no functions `now64InBlock` neither `todayInBlock`. [#39533](https://github.com/ClickHouse/ClickHouse/pull/39533) ([Alexey Milovidov](https://github.com/alexey-milovidov)). +* Add ability to specify settings for an `executable()` table function. [#39681](https://github.com/ClickHouse/ClickHouse/pull/39681) ([Constantine Peresypkin](https://github.com/pkit)). +* Implemented automatic conversion of database engine from `Ordinary` to `Atomic`. Create empty `convert_ordinary_to_atomic` file in `flags` directory and all `Ordinary` databases will be converted automatically on next server start. Resolves [#39546](https://github.com/ClickHouse/ClickHouse/issues/39546). [#39933](https://github.com/ClickHouse/ClickHouse/pull/39933) ([Alexander Tokmakov](https://github.com/tavplubix)). +* Support `SELECT ... INTO OUTFILE '...' AND STDOUT`. [#37490](https://github.com/ClickHouse/ClickHouse/issues/37490). [#39054](https://github.com/ClickHouse/ClickHouse/pull/39054) ([SmitaRKulkarni](https://github.com/SmitaRKulkarni)). +* Add formats `PrettyMonoBlock`, `PrettyNoEscapesMonoBlock`, `PrettyCompactNoEscapes`, `PrettyCompactNoEscapesMonoBlock`, `PrettySpaceNoEscapes`, `PrettySpaceMonoBlock`, `PrettySpaceNoEscapesMonoBlock`. [#39646](https://github.com/ClickHouse/ClickHouse/pull/39646) ([Kruglov Pavel](https://github.com/Avogar)). +* Add new setting schema_inference_hints that allows to specify structure hints in schema inference for specific columns. Closes [#39569](https://github.com/ClickHouse/ClickHouse/issues/39569). [#40068](https://github.com/ClickHouse/ClickHouse/pull/40068) ([Kruglov Pavel](https://github.com/Avogar)). + +#### Experimental Feature +* Support SQL standard DELETE FROM syntax on merge tree tables and lightweight delete implementation for merge tree families. [#37893](https://github.com/ClickHouse/ClickHouse/pull/37893) ([Jianmei Zhang](https://github.com/zhangjmruc)) ([Alexander Gololobov](https://github.com/davenger)). Note: this new feature does not make ClickHouse an HTAP DBMS. + +#### Performance Improvement +* Improved memory usage during memory efficient merging of aggregation results. [#39429](https://github.com/ClickHouse/ClickHouse/pull/39429) ([Nikita Taranov](https://github.com/nickitat)). +* Added concurrency control logic to limit total number of concurrent threads created by queries. [#37558](https://github.com/ClickHouse/ClickHouse/pull/37558) ([Sergei Trifonov](https://github.com/serxa)). Add `concurrent_threads_soft_limit parameter` to increase performance in case of high QPS by means of limiting total number of threads for all queries. [#37285](https://github.com/ClickHouse/ClickHouse/pull/37285) ([Roman Vasin](https://github.com/rvasin)). +* Add `SLRU` cache policy for uncompressed cache and marks cache. ([Kseniia Sumarokova](https://github.com/kssenii)). [#34651](https://github.com/ClickHouse/ClickHouse/pull/34651) ([alexX512](https://github.com/alexX512)). Decoupling local cache function and cache algorithm [#38048](https://github.com/ClickHouse/ClickHouse/pull/38048) ([Han Shukai](https://github.com/KinderRiven)). +* Intel® In-Memory Analytics Accelerator (Intel® IAA) is a hardware accelerator available in the upcoming generation of Intel® Xeon® Scalable processors ("Sapphire Rapids"). Its goal is to speed up common operations in analytics like data (de)compression and filtering. ClickHouse gained the new "DeflateQpl" compression codec which utilizes the Intel® IAA offloading technology to provide a high-performance DEFLATE implementation. The codec uses the [Intel® Query Processing Library (QPL)](https://github.com/intel/qpl) which abstracts access to the hardware accelerator, respectively to a software fallback in case the hardware accelerator is not available. DEFLATE provides in general higher compression rates than ClickHouse's LZ4 default codec, and as a result, offers less disk I/O and lower main memory consumption. [#36654](https://github.com/ClickHouse/ClickHouse/pull/36654) ([jasperzhu](https://github.com/jinjunzh)). [#39494](https://github.com/ClickHouse/ClickHouse/pull/39494) ([Robert Schulze](https://github.com/rschu1ze)). +* `DISTINCT` in order with `ORDER BY`: Deduce way to sort based on input stream sort description. Skip sorting if input stream is already sorted. [#38719](https://github.com/ClickHouse/ClickHouse/pull/38719) ([Igor Nikonov](https://github.com/devcrafter)). Improve memory usage (significantly) and query execution time + use `DistinctSortedChunkTransform` for final distinct when `DISTINCT` columns match `ORDER BY` columns, but rename to `DistinctSortedStreamTransform` in `EXPLAIN PIPELINE` → this improves memory usage significantly + remove unnecessary allocations in hot loop in `DistinctSortedChunkTransform`. [#39432](https://github.com/ClickHouse/ClickHouse/pull/39432) ([Igor Nikonov](https://github.com/devcrafter)). Use `DistinctSortedTransform` only when sort description is applicable to DISTINCT columns, otherwise fall back to ordinary DISTINCT implementation + it allows making less checks during `DistinctSortedTransform` execution. [#39528](https://github.com/ClickHouse/ClickHouse/pull/39528) ([Igor Nikonov](https://github.com/devcrafter)). Fix: `DistinctSortedTransform` didn't take advantage of sorting. It never cleared HashSet since clearing_columns were detected incorrectly (always empty). So, it basically worked as ordinary `DISTINCT` (`DistinctTransform`). The fix reduces memory usage significantly. [#39538](https://github.com/ClickHouse/ClickHouse/pull/39538) ([Igor Nikonov](https://github.com/devcrafter)). +* Use local node as first priority to get structure of remote table when executing `cluster` and similar table functions. [#39440](https://github.com/ClickHouse/ClickHouse/pull/39440) ([Mingliang Pan](https://github.com/liangliangpan)). +* Optimize filtering by numeric columns with AVX512VBMI2 compress store. [#39633](https://github.com/ClickHouse/ClickHouse/pull/39633) ([Guo Wangyang](https://github.com/guowangy)). For systems with AVX512 VBMI2, this PR improves performance by ca. 6% for SSB benchmark queries queries 3.1, 3.2 and 3.3 (SF=100). Tested on Intel Icelake Xeon 8380 * 2 socket. [#40033](https://github.com/ClickHouse/ClickHouse/pull/40033) ([Robert Schulze](https://github.com/rschu1ze)). +* Optimize index analysis with functional expressions in multi-thread scenario. [#39812](https://github.com/ClickHouse/ClickHouse/pull/39812) ([Guo Wangyang](https://github.com/guowangy)). +* Optimizations for complex queries: Don't visit the AST for UDFs if none are registered. [#40069](https://github.com/ClickHouse/ClickHouse/pull/40069) ([Raúl Marín](https://github.com/Algunenano)). Optimize CurrentMemoryTracker alloc and free. [#40078](https://github.com/ClickHouse/ClickHouse/pull/40078) ([Raúl Marín](https://github.com/Algunenano)). +* Improved Base58 encoding/decoding. [#39292](https://github.com/ClickHouse/ClickHouse/pull/39292) ([Andrey Zvonov](https://github.com/zvonand)). +* Improve bytes to bits mask transform for SSE/AVX/AVX512. [#39586](https://github.com/ClickHouse/ClickHouse/pull/39586) ([Guo Wangyang](https://github.com/guowangy)). + +#### Improvement +* Normalize `AggregateFunction` types and state representations because optimizations like [#35788](https://github.com/ClickHouse/ClickHouse/pull/35788) will treat `count(not null columns)` as `count()`, which might confuses distributed interpreters with the following error : `Conversion from AggregateFunction(count) to AggregateFunction(count, Int64) is not supported`. [#39420](https://github.com/ClickHouse/ClickHouse/pull/39420) ([Amos Bird](https://github.com/amosbird)). The functions with identical states can be used in materialized views interchangeably. +* Rework and simplify the `system.backups` table, remove the `internal` column, allow user to set the ID of operation, add columns `num_files`, `uncompressed_size`, `compressed_size`, `start_time`, `end_time`. [#39503](https://github.com/ClickHouse/ClickHouse/pull/39503) ([Vitaly Baranov](https://github.com/vitlibar)). +* Improved structure of DDL query result table for `Replicated` database (separate columns with shard and replica name, more clear status) - `CREATE TABLE ... ON CLUSTER` queries can be normalized on initiator first if `distributed_ddl_entry_format_version` is set to 3 (default value). It means that `ON CLUSTER` queries may not work if initiator does not belong to the cluster that specified in query. Fixes [#37318](https://github.com/ClickHouse/ClickHouse/issues/37318), [#39500](https://github.com/ClickHouse/ClickHouse/issues/39500) - Ignore `ON CLUSTER` clause if database is `Replicated` and cluster name equals to database name. Related to [#35570](https://github.com/ClickHouse/ClickHouse/issues/35570) - Miscellaneous minor fixes for `Replicated` database engine - Check metadata consistency when starting up `Replicated` database, start replica recovery in case of mismatch of local metadata and metadata in Keeper. Resolves [#24880](https://github.com/ClickHouse/ClickHouse/issues/24880). [#37198](https://github.com/ClickHouse/ClickHouse/pull/37198) ([Alexander Tokmakov](https://github.com/tavplubix)). +* Add result_rows and result_bytes to progress reports (`X-ClickHouse-Summary`). [#39567](https://github.com/ClickHouse/ClickHouse/pull/39567) ([Raúl Marín](https://github.com/Algunenano)). +* Improve primary key analysis for MergeTree. [#25563](https://github.com/ClickHouse/ClickHouse/pull/25563) ([Nikolai Kochetov](https://github.com/KochetovNicolai)). +* `timeSlots` now works with DateTime64; subsecond duration and slot size available when working with DateTime64. [#37951](https://github.com/ClickHouse/ClickHouse/pull/37951) ([Andrey Zvonov](https://github.com/zvonand)). +* Added support of `LEFT SEMI` and `LEFT ANTI` direct join with `EmbeddedRocksDB` tables. [#38956](https://github.com/ClickHouse/ClickHouse/pull/38956) ([Vladimir C](https://github.com/vdimir)). +* Add profile events for fsync operations. [#39179](https://github.com/ClickHouse/ClickHouse/pull/39179) ([Azat Khuzhin](https://github.com/azat)). +* Add the second argument to the ordinary function `file(path[, default])`, which function returns in the case when a file does not exists. [#39218](https://github.com/ClickHouse/ClickHouse/pull/39218) ([Nikolay Degterinsky](https://github.com/evillique)). +* Some small fixes for reading via http, allow to retry partial content in case if 200 OK. [#39244](https://github.com/ClickHouse/ClickHouse/pull/39244) ([Kseniia Sumarokova](https://github.com/kssenii)). +* Support queries `CREATE TEMPORARY TABLE ... () AS ...`. [#39462](https://github.com/ClickHouse/ClickHouse/pull/39462) ([Kruglov Pavel](https://github.com/Avogar)). +* Add support of `!`/`*` (exclamation/asterisk) in custom TLDs (`cutToFirstSignificantSubdomainCustom()`/`cutToFirstSignificantSubdomainCustomWithWWW()`/`firstSignificantSubdomainCustom()`). [#39496](https://github.com/ClickHouse/ClickHouse/pull/39496) ([Azat Khuzhin](https://github.com/azat)). +* Add support for TLS connections to NATS. Implements [#39525](https://github.com/ClickHouse/ClickHouse/issues/39525). [#39527](https://github.com/ClickHouse/ClickHouse/pull/39527) ([Constantine Peresypkin](https://github.com/pkit)). +* `clickhouse-obfuscator` (a tool for database obfuscation for testing and load generation) now has the new `--save` and `--load` parameters to work with pre-trained models. This closes [#39534](https://github.com/ClickHouse/ClickHouse/issues/39534). [#39541](https://github.com/ClickHouse/ClickHouse/pull/39541) ([Alexey Milovidov](https://github.com/alexey-milovidov)). +* Fix incorrect behavior of log rotation during restart. [#39558](https://github.com/ClickHouse/ClickHouse/pull/39558) ([Nikolay Degterinsky](https://github.com/evillique)). +* Fix building aggregate projections when external aggregation is on. Mark as improvement because the case is rare and there exists easy workaround to fix it via changing settings. This fixes [#39667](https://github.com/ClickHouse/ClickHouse/issues/39667) . [#39671](https://github.com/ClickHouse/ClickHouse/pull/39671) ([Amos Bird](https://github.com/amosbird)). +* Allow to execute hash functions with arguments of type `Map`. [#39685](https://github.com/ClickHouse/ClickHouse/pull/39685) ([Anton Popov](https://github.com/CurtizJ)). +* Add a configuration parameter to hide addresses in stack traces. It may improve security a little but generally, it is harmful and should not be used. [#39690](https://github.com/ClickHouse/ClickHouse/pull/39690) ([Alexey Milovidov](https://github.com/alexey-milovidov)). +* Change the prefix size of AggregateFunctionDistinct to make sure nested function data memory segment is aligned. [#39696](https://github.com/ClickHouse/ClickHouse/pull/39696) ([Pxl](https://github.com/BiteTheDDDDt)). +* Properly escape credentials passed to the `clickhouse-diagnostic` tool. [#39707](https://github.com/ClickHouse/ClickHouse/pull/39707) ([Dale McDiarmid](https://github.com/gingerwizard)). +* ClickHouse Keeper improvement: create a snapshot on exit. It can be controlled with the config `keeper_server.create_snapshot_on_exit`, `true` by default. [#39755](https://github.com/ClickHouse/ClickHouse/pull/39755) ([Antonio Andelic](https://github.com/antonio2368)). +* Support primary key analysis for `row_policy_filter` and `additional_filter`. It also helps fix issues like [#37454](https://github.com/ClickHouse/ClickHouse/issues/37454) . [#39826](https://github.com/ClickHouse/ClickHouse/pull/39826) ([Amos Bird](https://github.com/amosbird)). +* Fix two usability issues in Play UI: - it was non-pixel-perfect on iPad due to parasitic border radius and margins; - the progress indication did not display after the first query. This closes [#39957](https://github.com/ClickHouse/ClickHouse/issues/39957). This closes [#39960](https://github.com/ClickHouse/ClickHouse/issues/39960). [#39961](https://github.com/ClickHouse/ClickHouse/pull/39961) ([Alexey Milovidov](https://github.com/alexey-milovidov)). +* Play UI: add row numbers; add cell selection on click; add hysteresis for table cells. [#39962](https://github.com/ClickHouse/ClickHouse/pull/39962) ([Alexey Milovidov](https://github.com/alexey-milovidov)). +* Play UI: recognize tab key in textarea, but at the same time don't mess up with tab navigation. [#40053](https://github.com/ClickHouse/ClickHouse/pull/40053) ([Alexey Milovidov](https://github.com/alexey-milovidov)). +* The client will show server-side elapsed time. This is important for the performance comparison of ClickHouse services in remote datacenters. This closes [#38070](https://github.com/ClickHouse/ClickHouse/issues/38070). See also [this](https://github.com/ClickHouse/ClickBench/blob/main/hardware/benchmark-cloud.sh#L37) for motivation. [#39968](https://github.com/ClickHouse/ClickHouse/pull/39968) ([Alexey Milovidov](https://github.com/alexey-milovidov)). +* Adds `parseDateTime64BestEffortUS`, `parseDateTime64BestEffortUSOrNull`, `parseDateTime64BestEffortUSOrZero` functions, closing [#37492](https://github.com/ClickHouse/ClickHouse/issues/37492). [#40015](https://github.com/ClickHouse/ClickHouse/pull/40015) ([Tanya Bragin](https://github.com/tbragin)). +* Extend the `system.processors_profile_log` with more information such as input rows. [#40121](https://github.com/ClickHouse/ClickHouse/pull/40121) ([Amos Bird](https://github.com/amosbird)). +* Display server-side time in `clickhouse-benchmark` by default if it is available (since ClickHouse version 22.8). This is needed to correctly compare the performance of clouds. This behavior can be changed with the new `--client-side-time` command line option. Change the `--randomize` command line option from `--randomize 1` to the form without argument. [#40193](https://github.com/ClickHouse/ClickHouse/pull/40193) ([Alexey Milovidov](https://github.com/alexey-milovidov)). +* Add counters (ProfileEvents) for cases when query complexity limitation has been set and has reached (a separate counter for `overflow_mode` = `break` and `throw`). For example, if you have set up `max_rows_to_read` with `read_overflow_mode = 'break'`, looking at the value of `OverflowBreak` counter will allow distinguishing incomplete results. [#40205](https://github.com/ClickHouse/ClickHouse/pull/40205) ([Alexey Milovidov](https://github.com/alexey-milovidov)). +* Fix memory accounting in case of "Memory limit exceeded" errors (previously [peak] memory usage was takes failed allocations into account). [#40249](https://github.com/ClickHouse/ClickHouse/pull/40249) ([Azat Khuzhin](https://github.com/azat)). +* Add metrics for filesystem cache: `FilesystemCacheSize` and `FilesystemCacheElements`. [#40260](https://github.com/ClickHouse/ClickHouse/pull/40260) ([Kseniia Sumarokova](https://github.com/kssenii)). +* Support hadoop secure RPC transfer (hadoop.rpc.protection=privacy and hadoop.rpc.protection=integrity). [#39411](https://github.com/ClickHouse/ClickHouse/pull/39411) ([michael1589](https://github.com/michael1589)). +* Avoid continuously growing memory consumption of pattern cache when using functions multi(Fuzzy)Match(Any|AllIndices|AnyIndex)(). [#40264](https://github.com/ClickHouse/ClickHouse/pull/40264) ([Robert Schulze](https://github.com/rschu1ze)). +* Add cache for schema inference for file/s3/hdfs/url table functions. Now, schema inference will be performed only on the first query to the file, all subsequent queries to the same file will use the schema from cache if data wasn't changed. Add system table system.schema_inference_cache with all current schemas in cache and system queries SYSTEM DROP SCHEMA CACHE [FOR FILE/S3/HDFS/URL] to drop schemas from cache. [#38286](https://github.com/ClickHouse/ClickHouse/pull/38286) ([Kruglov Pavel](https://github.com/Avogar)). +* Add support for LARGE_BINARY/LARGE_STRING with Arrow (Closes [#32401](https://github.com/ClickHouse/ClickHouse/issues/32401)). [#40293](https://github.com/ClickHouse/ClickHouse/pull/40293) ([Josh Taylor](https://github.com/joshuataylor)). + +#### Build/Testing/Packaging Improvement +* [ClickFiddle](https://fiddle.clickhouse.com/): A new tool for testing ClickHouse versions in read/write mode (**Igor Baliuk**). +* ClickHouse binary is made self-extracting [#35775](https://github.com/ClickHouse/ClickHouse/pull/35775) ([Yakov Olkhovskiy, Arthur Filatenkov](https://github.com/yakov-olkhovskiy)). +* Update tzdata to 2022b to support the new timezone changes. See https://github.com/google/cctz/pull/226. Chile's 2022 DST start is delayed from September 4 to September 11. Iran plans to stop observing DST permanently, after it falls back on 2022-09-21. There are corrections of the historical time zone of Asia/Tehran in the year 1977: Iran adopted standard time in 1935, not 1946. In 1977 it observed DST from 03-21 23:00 to 10-20 24:00; its 1978 transitions were on 03-24 and 08-05, not 03-20 and 10-20; and its spring 1979 transition was on 05-27, not 03-21 (https://data.iana.org/time-zones/tzdb/NEWS). ([Alexey Milovidov](https://github.com/alexey-milovidov)). +* Former packages used to install systemd.service file to `/etc`. The files there are marked as `conf` and are not cleaned out, and not updated automatically. This PR cleans them out. [#39323](https://github.com/ClickHouse/ClickHouse/pull/39323) ([Mikhail f. Shiryaev](https://github.com/Felixoid)). +* Ensure LSan is effective. [#39430](https://github.com/ClickHouse/ClickHouse/pull/39430) ([Azat Khuzhin](https://github.com/azat)). +* TSAN has issues with clang-14 (https://github.com/google/sanitizers/issues/1552, https://github.com/google/sanitizers/issues/1540), so here we build the TSAN binaries with clang-15. [#39450](https://github.com/ClickHouse/ClickHouse/pull/39450) ([Mikhail f. Shiryaev](https://github.com/Felixoid)). +* Remove the option to build ClickHouse tools as separate executable programs. This fixes [#37847](https://github.com/ClickHouse/ClickHouse/issues/37847). [#39520](https://github.com/ClickHouse/ClickHouse/pull/39520) ([Alexey Milovidov](https://github.com/alexey-milovidov)). +* Small preparations for build on s390x (which is big-endian). [#39627](https://github.com/ClickHouse/ClickHouse/pull/39627) ([Harry Lee](https://github.com/HarryLeeIBM)). [#39656](https://github.com/ClickHouse/ClickHouse/pull/39656) ([Harry Lee](https://github.com/HarryLeeIBM)). Fixed Endian issue in BitHelpers for s390x. [#39656](https://github.com/ClickHouse/ClickHouse/pull/39656) ([Harry Lee](https://github.com/HarryLeeIBM)). Implement a piece of code related to SipHash for s390x architecture (which is not supported by ClickHouse). [#39732](https://github.com/ClickHouse/ClickHouse/pull/39732) ([Harry Lee](https://github.com/HarryLeeIBM)). Fixed an Endian issue in Coordination snapshot code for s390x architecture (which is not supported by ClickHouse). [#39931](https://github.com/ClickHouse/ClickHouse/pull/39931) ([Harry Lee](https://github.com/HarryLeeIBM)). Fixed Endian issues in Codec code for s390x architecture (which is not supported by ClickHouse). [#40008](https://github.com/ClickHouse/ClickHouse/pull/40008) ([Harry Lee](https://github.com/HarryLeeIBM)). Fixed Endian issues in reading/writing BigEndian binary data in ReadHelpers and WriteHelpers code for s390x architecture (which is not supported by ClickHouse). [#40179](https://github.com/ClickHouse/ClickHouse/pull/40179) ([Harry Lee](https://github.com/HarryLeeIBM)). +* Support build with `clang-16` (trunk). This closes [#39949](https://github.com/ClickHouse/ClickHouse/issues/39949). [#40181](https://github.com/ClickHouse/ClickHouse/pull/40181) ([Alexey Milovidov](https://github.com/alexey-milovidov)). +* Prepare RISC-V 64 build to run in CI. This is for [#40141](https://github.com/ClickHouse/ClickHouse/issues/40141). [#40197](https://github.com/ClickHouse/ClickHouse/pull/40197) ([Alexey Milovidov](https://github.com/alexey-milovidov)). +* Simplified function registration macro interface (`FUNCTION_REGISTER*`) to eliminate the step to add and call an extern function in the registerFunctions.cpp, it also makes incremental builds of a new function faster. [#38615](https://github.com/ClickHouse/ClickHouse/pull/38615) ([Li Yin](https://github.com/liyinsg)). +* Docker: Now entrypoint.sh in docker image creates and executes chown for all folders it found in config for multidisk setup [#17717](https://github.com/ClickHouse/ClickHouse/issues/17717). [#39121](https://github.com/ClickHouse/ClickHouse/pull/39121) ([Nikita Mikhaylov](https://github.com/nikitamikhaylov)). + +#### Bug Fix +* Fix possible segfault in `CapnProto` input format. This bug was found and send through ClickHouse bug-bounty [program](https://github.com/ClickHouse/ClickHouse/issues/38986) by *kiojj*. [#40241](https://github.com/ClickHouse/ClickHouse/pull/40241) ([Kruglov Pavel](https://github.com/Avogar)). +* Fix a very rare case of incorrect behavior of array subscript operator. This closes [#28720](https://github.com/ClickHouse/ClickHouse/issues/28720). [#40185](https://github.com/ClickHouse/ClickHouse/pull/40185) ([Alexey Milovidov](https://github.com/alexey-milovidov)). +* Fix insufficient argument check for encryption functions (found by query fuzzer). This closes [#39987](https://github.com/ClickHouse/ClickHouse/issues/39987). [#40194](https://github.com/ClickHouse/ClickHouse/pull/40194) ([Alexey Milovidov](https://github.com/alexey-milovidov)). +* Fix the case when the order of columns can be incorrect if the `IN` operator is used with a table with `ENGINE = Set` containing multiple columns. This fixes [#13014](https://github.com/ClickHouse/ClickHouse/issues/13014). [#40225](https://github.com/ClickHouse/ClickHouse/pull/40225) ([Alexey Milovidov](https://github.com/alexey-milovidov)). +* Fix seeking while reading from encrypted disk. This PR fixes [#38381](https://github.com/ClickHouse/ClickHouse/issues/38381). [#39687](https://github.com/ClickHouse/ClickHouse/pull/39687) ([Vitaly Baranov](https://github.com/vitlibar)). +* Fix duplicate columns in join plan. Finally, solve [#26809](https://github.com/ClickHouse/ClickHouse/issues/26809). [#40009](https://github.com/ClickHouse/ClickHouse/pull/40009) ([Vladimir C](https://github.com/vdimir)). +* Fixed query hanging for SELECT with ORDER BY WITH FILL with different date/time types. [#37849](https://github.com/ClickHouse/ClickHouse/pull/37849) ([Yakov Olkhovskiy](https://github.com/yakov-olkhovskiy)). +* Fix ORDER BY that matches projections ORDER BY (before it simply returns unsorted result). [#38725](https://github.com/ClickHouse/ClickHouse/pull/38725) ([Azat Khuzhin](https://github.com/azat)). +* Do not optimise functions in GROUP BY statements if they shadow one of the table columns or expressions. Fixes [#37032](https://github.com/ClickHouse/ClickHouse/issues/37032). [#39103](https://github.com/ClickHouse/ClickHouse/pull/39103) ([Anton Kozlov](https://github.com/tonickkozlov)). +* Fix wrong table name in logs after RENAME TABLE. This fixes [#38018](https://github.com/ClickHouse/ClickHouse/issues/38018). [#39227](https://github.com/ClickHouse/ClickHouse/pull/39227) ([Amos Bird](https://github.com/amosbird)). +* Fix positional arguments in case of columns pruning when optimising the query. Closes [#38433](https://github.com/ClickHouse/ClickHouse/issues/38433). [#39293](https://github.com/ClickHouse/ClickHouse/pull/39293) ([Kseniia Sumarokova](https://github.com/kssenii)). +* Fix bug in schema inference in case of empty messages in Protobuf/CapnProto formats that allowed to create column with empty `Tuple` type. Closes [#39051](https://github.com/ClickHouse/ClickHouse/issues/39051) Add 2 new settings `input_format_{protobuf/capnproto}_skip_fields_with_unsupported_types_in_schema_inference` that allow to skip fields with unsupported types while schema inference for Protobuf and CapnProto formats. [#39357](https://github.com/ClickHouse/ClickHouse/pull/39357) ([Kruglov Pavel](https://github.com/Avogar)). +* (Window View is an experimental feature) Fix segmentation fault on `CREATE WINDOW VIEW .. ON CLUSTER ... INNER`. Closes [#39363](https://github.com/ClickHouse/ClickHouse/issues/39363). [#39384](https://github.com/ClickHouse/ClickHouse/pull/39384) ([Kseniia Sumarokova](https://github.com/kssenii)). +* Fix WriteBuffer finalize when cancelling insert into function (in previous versions it may leat to std::terminate). [#39458](https://github.com/ClickHouse/ClickHouse/pull/39458) ([Kruglov Pavel](https://github.com/Avogar)). +* Fix storing of columns of type `Object` in sparse serialization. [#39464](https://github.com/ClickHouse/ClickHouse/pull/39464) ([Anton Popov](https://github.com/CurtizJ)). +* Fix possible "Not found column in block" exception when using projections. This closes [#39469](https://github.com/ClickHouse/ClickHouse/issues/39469). [#39470](https://github.com/ClickHouse/ClickHouse/pull/39470) ([小路](https://github.com/nicelulu)). +* Fix exception on race between DROP and INSERT with materialized views. [#39477](https://github.com/ClickHouse/ClickHouse/pull/39477) ([Azat Khuzhin](https://github.com/azat)). +* A bug in Apache Avro library: fix data race and possible heap-buffer-overflow in Avro format. Closes [#39094](https://github.com/ClickHouse/ClickHouse/issues/39094) Closes [#33652](https://github.com/ClickHouse/ClickHouse/issues/33652). [#39498](https://github.com/ClickHouse/ClickHouse/pull/39498) ([Kruglov Pavel](https://github.com/Avogar)). +* Fix rare bug in asynchronous reading (with setting `local_filesystem_read_method='pread_threadpool'`) with enabled `O_DIRECT` (enabled by setting `min_bytes_to_use_direct_io`). [#39506](https://github.com/ClickHouse/ClickHouse/pull/39506) ([Anton Popov](https://github.com/CurtizJ)). +* (only on FreeBSD) Fixes "Code: 49. DB::Exception: FunctionFactory: the function name '' is not unique. (LOGICAL_ERROR)" observed on FreeBSD when starting clickhouse. [#39551](https://github.com/ClickHouse/ClickHouse/pull/39551) ([Alexander Gololobov](https://github.com/davenger)). +* Fix bug with the recently introduced "maxsplit" argument for `splitByChar`, which was not working correctly. [#39552](https://github.com/ClickHouse/ClickHouse/pull/39552) ([filimonov](https://github.com/filimonov)). +* Fix bug in ASOF JOIN with `enable_optimize_predicate_expression`, close [#37813](https://github.com/ClickHouse/ClickHouse/issues/37813). [#39556](https://github.com/ClickHouse/ClickHouse/pull/39556) ([Vladimir C](https://github.com/vdimir)). +* Fixed `CREATE/DROP INDEX` query with `ON CLUSTER` or `Replicated` database and `ReplicatedMergeTree`. It used to be executed on all replicas (causing error or DDL queue stuck). Fixes [#39511](https://github.com/ClickHouse/ClickHouse/issues/39511). [#39565](https://github.com/ClickHouse/ClickHouse/pull/39565) ([Alexander Tokmakov](https://github.com/tavplubix)). +* Fix "column not found" error for push down with join, close [#39505](https://github.com/ClickHouse/ClickHouse/issues/39505). [#39575](https://github.com/ClickHouse/ClickHouse/pull/39575) ([Vladimir C](https://github.com/vdimir)). +* Fix the wrong `REGEXP_REPLACE` alias. This fixes https://github.com/ClickHouse/ClickBench/issues/9. [#39592](https://github.com/ClickHouse/ClickHouse/pull/39592) ([Alexey Milovidov](https://github.com/alexey-milovidov)). +* Fixed point of origin for exponential decay window functions to the last value in window. Previously, decay was calculated by formula `exp((t - curr_row_t) / decay_length)`, which is incorrect when right boundary of window is not `CURRENT ROW`. It was changed to: `exp((t - last_row_t) / decay_length)`. There is no change in results for windows with `ROWS BETWEEN (smth) AND CURRENT ROW`. [#39593](https://github.com/ClickHouse/ClickHouse/pull/39593) ([Vladimir Chebotaryov](https://github.com/quickhouse)). +* Fix Decimal division overflow, which can be detected based on operands scale. [#39600](https://github.com/ClickHouse/ClickHouse/pull/39600) ([Andrey Zvonov](https://github.com/zvonand)). +* Fix settings `output_format_arrow_string_as_string` and `output_format_arrow_low_cardinality_as_dictionary` work in combination. Closes [#39624](https://github.com/ClickHouse/ClickHouse/issues/39624). [#39647](https://github.com/ClickHouse/ClickHouse/pull/39647) ([Kruglov Pavel](https://github.com/Avogar)). +* Fixed a bug in default database resolution in distributed table reads. [#39674](https://github.com/ClickHouse/ClickHouse/pull/39674) ([Anton Kozlov](https://github.com/tonickkozlov)). +* (Only with the obsolete Ordinary databases) Select might read data of dropped table if cache for mmap IO is used and database engine is Ordinary and new tables was created with the same name as dropped one had. It's fixed. [#39708](https://github.com/ClickHouse/ClickHouse/pull/39708) ([Alexander Tokmakov](https://github.com/tavplubix)). +* Fix possible error `Invalid column type for ColumnUnique::insertRangeFrom. Expected String, got ColumnLowCardinality` Fixes [#38460](https://github.com/ClickHouse/ClickHouse/issues/38460). [#39716](https://github.com/ClickHouse/ClickHouse/pull/39716) ([Arthur Passos](https://github.com/arthurpassos)). +* Field names in the `meta` section of JSON format were erroneously double escaped. This closes [#39693](https://github.com/ClickHouse/ClickHouse/issues/39693). [#39747](https://github.com/ClickHouse/ClickHouse/pull/39747) ([Alexey Milovidov](https://github.com/alexey-milovidov)). +* Fix wrong index analysis with tuples and operator `IN`, which could lead to wrong query result. [#39752](https://github.com/ClickHouse/ClickHouse/pull/39752) ([Anton Popov](https://github.com/CurtizJ)). +* Fix `EmbeddedRocksDB` tables filtering by key using params. [#39757](https://github.com/ClickHouse/ClickHouse/pull/39757) ([Antonio Andelic](https://github.com/antonio2368)). +* Fix error `Invalid number of columns in chunk pushed to OutputPort` which was caused by ARRAY JOIN optimization. Fixes [#39164](https://github.com/ClickHouse/ClickHouse/issues/39164). [#39799](https://github.com/ClickHouse/ClickHouse/pull/39799) ([Nikolai Kochetov](https://github.com/KochetovNicolai)). +* A workaround for a bug in Linux kernel. Fix `CANNOT_READ_ALL_DATA` exception with `local_filesystem_read_method=pread_threadpool`. This bug affected only Linux kernel version 5.9 and 5.10 according to [man](https://manpages.debian.org/testing/manpages-dev/preadv2.2.en.html#BUGS). [#39800](https://github.com/ClickHouse/ClickHouse/pull/39800) ([Anton Popov](https://github.com/CurtizJ)). +* (Only on NFS) Fix broken NFS mkdir for root-squashed volumes. [#39898](https://github.com/ClickHouse/ClickHouse/pull/39898) ([Constantine Peresypkin](https://github.com/pkit)). +* Remove dictionaries from prometheus metrics on DETACH/DROP. [#39926](https://github.com/ClickHouse/ClickHouse/pull/39926) ([Azat Khuzhin](https://github.com/azat)). +* Fix read of StorageFile with virtual columns. Closes [#39907](https://github.com/ClickHouse/ClickHouse/issues/39907). [#39943](https://github.com/ClickHouse/ClickHouse/pull/39943) ([flynn](https://github.com/ucasfl)). +* Fix big memory usage during fetches. Fixes [#39915](https://github.com/ClickHouse/ClickHouse/issues/39915). [#39990](https://github.com/ClickHouse/ClickHouse/pull/39990) ([Nikolai Kochetov](https://github.com/KochetovNicolai)). +* (experimental feature) Fix `hashId` crash and salt parameter not being used. [#40002](https://github.com/ClickHouse/ClickHouse/pull/40002) ([Raúl Marín](https://github.com/Algunenano)). +* `EXCEPT` and `INTERSECT` operators may lead to crash if a specific combination of constant and non-constant columns were used. [#40020](https://github.com/ClickHouse/ClickHouse/pull/40020) ([Duc Canh Le](https://github.com/canhld94)). +* Fixed "Part directory doesn't exist" and "`tmp_` ... No such file or directory" errors during too slow INSERT or too long merge/mutation. Also fixed issue that may cause some replication queue entries to stuck without any errors or warnings in logs if previous attempt to fetch part failed, but `tmp-fetch_` directory was not cleaned up. [#40031](https://github.com/ClickHouse/ClickHouse/pull/40031) ([Alexander Tokmakov](https://github.com/tavplubix)). +* Fix rare cases of parsing of arrays of tuples in format `Values`. [#40034](https://github.com/ClickHouse/ClickHouse/pull/40034) ([Anton Popov](https://github.com/CurtizJ)). +* Fixes ArrowColumn format Dictionary(X) & Dictionary(Nullable(X)) conversion to ClickHouse LowCardinality(X) & LowCardinality(Nullable(X)) respectively. [#40037](https://github.com/ClickHouse/ClickHouse/pull/40037) ([Arthur Passos](https://github.com/arthurpassos)). +* Fix potential deadlock in writing to S3 during task scheduling failure. [#40070](https://github.com/ClickHouse/ClickHouse/pull/40070) ([Maksim Kita](https://github.com/kitaisreal)). +* Fix bug in collectFilesToSkip() by adding correct file extension (.idx or idx2) for indexes to be recalculated, avoid wrong hard links. Fixed [#39896](https://github.com/ClickHouse/ClickHouse/issues/39896). [#40095](https://github.com/ClickHouse/ClickHouse/pull/40095) ([Jianmei Zhang](https://github.com/zhangjmruc)). +* A fix for reverse DNS resolution. [#40134](https://github.com/ClickHouse/ClickHouse/pull/40134) ([Arthur Passos](https://github.com/arthurpassos)). +* Fix unexpected result `arrayDifference` of `Array(UInt32). [#40211](https://github.com/ClickHouse/ClickHouse/pull/40211) ([Duc Canh Le](https://github.com/canhld94)). + + ### ClickHouse release 22.7, 2022-07-21 #### Upgrade Notes @@ -258,7 +401,7 @@ * Allows providing `NULL`/`NOT NULL` right after type in column declaration. [#37337](https://github.com/ClickHouse/ClickHouse/pull/37337) ([Igor Nikonov](https://github.com/devcrafter)). * optimize file segment PARTIALLY_DOWNLOADED get read buffer. [#37338](https://github.com/ClickHouse/ClickHouse/pull/37338) ([xiedeyantu](https://github.com/xiedeyantu)). * Try to improve short circuit functions processing to fix problems with stress tests. [#37384](https://github.com/ClickHouse/ClickHouse/pull/37384) ([Kruglov Pavel](https://github.com/Avogar)). -* Closes [#37395](https://github.com/ClickHouse/ClickHouse/issues/37395). [#37415](https://github.com/ClickHouse/ClickHouse/pull/37415) ([Memo](https://github.com/Joeywzr)). +* Generate multiple columns with UUID (generateUUIDv4(1), generateUUIDv4(2)) [#37395](https://github.com/ClickHouse/ClickHouse/issues/37395). [#37415](https://github.com/ClickHouse/ClickHouse/pull/37415) ([Memo](https://github.com/Joeywzr)). * Fix extremely rare deadlock during part fetch in zero-copy replication. Fixes [#37423](https://github.com/ClickHouse/ClickHouse/issues/37423). [#37424](https://github.com/ClickHouse/ClickHouse/pull/37424) ([metahys](https://github.com/metahys)). * Don't allow to create storage with unknown data format. [#37450](https://github.com/ClickHouse/ClickHouse/pull/37450) ([Kruglov Pavel](https://github.com/Avogar)). * Set `global_memory_usage_overcommit_max_wait_microseconds` default value to 5 seconds. Add info about `OvercommitTracker` to OOM exception message. Add `MemoryOvercommitWaitTimeMicroseconds` profile event. [#37460](https://github.com/ClickHouse/ClickHouse/pull/37460) ([Dmitry Novik](https://github.com/novikd)). diff --git a/CMakeLists.txt b/CMakeLists.txt index e3eff050015..dbbec2a600d 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -164,7 +164,6 @@ if (HAS_RESERVED_IDENTIFIER) add_compile_definitions (HAS_RESERVED_IDENTIFIER) endif () -# If turned `ON`, assumes the user has either the system GTest library or the bundled one. option(ENABLE_TESTS "Provide unit_test_dbms target with Google.Test unit tests" ON) option(ENABLE_EXAMPLES "Build all example programs in 'examples' subdirectories" OFF) @@ -200,8 +199,8 @@ endif () option(ADD_GDB_INDEX_FOR_GOLD "Add .gdb-index to resulting binaries for gold linker.") if (NOT CMAKE_BUILD_TYPE_UC STREQUAL "RELEASE") - # Can be lld or ld-lld. - if (LINKER_NAME MATCHES "lld$") + # Can be lld or ld-lld or lld-13 or /path/to/lld. + if (LINKER_NAME MATCHES "lld") set (CMAKE_EXE_LINKER_FLAGS "${CMAKE_EXE_LINKER_FLAGS} -Wl,--gdb-index") set (CMAKE_SHARED_LINKER_FLAGS "${CMAKE_SHARED_LINKER_FLAGS} -Wl,--gdb-index") message (STATUS "Adding .gdb-index via --gdb-index linker option.") @@ -246,7 +245,8 @@ else () endif () # Create BuildID when using lld. For other linkers it is created by default. -if (LINKER_NAME MATCHES "lld$") +# (NOTE: LINKER_NAME can be either path or name, and in different variants) +if (LINKER_NAME MATCHES "lld") # SHA1 is not cryptographically secure but it is the best what lld is offering. set (CMAKE_EXE_LINKER_FLAGS "${CMAKE_EXE_LINKER_FLAGS} -Wl,--build-id=sha1") endif () @@ -600,6 +600,8 @@ if (NATIVE_BUILD_TARGETS COMMAND ${CMAKE_COMMAND} "-DCMAKE_C_COMPILER=${CMAKE_C_COMPILER}" "-DCMAKE_CXX_COMPILER=${CMAKE_CXX_COMPILER}" + "-DENABLE_CCACHE=${ENABLE_CCACHE}" + "-DENABLE_CLICKHOUSE_SELF_EXTRACTING=${ENABLE_CLICKHOUSE_SELF_EXTRACTING}" ${CMAKE_SOURCE_DIR} WORKING_DIRECTORY "${NATIVE_BUILD_DIR}" COMMAND_ECHO STDOUT) diff --git a/PreLoad.cmake b/PreLoad.cmake index 95f65b85f7f..b11ab080430 100644 --- a/PreLoad.cmake +++ b/PreLoad.cmake @@ -7,12 +7,8 @@ # How to install Ninja on Ubuntu: # sudo apt-get install ninja-build -# CLion does not support Ninja -# You can add your vote on CLion task tracker: -# https://youtrack.jetbrains.com/issue/CPP-2659 -# https://youtrack.jetbrains.com/issue/CPP-870 -if (NOT DEFINED ENV{CLION_IDE} AND NOT DEFINED ENV{XCODE_IDE}) +if (NOT DEFINED ENV{XCODE_IDE}) find_program(NINJA_PATH ninja) if (NINJA_PATH) set(CMAKE_GENERATOR "Ninja" CACHE INTERNAL "") diff --git a/SECURITY.md b/SECURITY.md index 4bb6d9d0b3b..fb6caa92cb8 100644 --- a/SECURITY.md +++ b/SECURITY.md @@ -10,9 +10,10 @@ The following versions of ClickHouse server are currently being supported with s | Version | Supported | |:-|:-| +| 22.8 | ✔️ | | 22.7 | ✔️ | | 22.6 | ✔️ | -| 22.5 | ✔️ | +| 22.5 | ❌ | | 22.4 | ❌ | | 22.3 | ✔️ | | 22.2 | ❌ | @@ -21,7 +22,7 @@ The following versions of ClickHouse server are currently being supported with s | 21.11 | ❌ | | 21.10 | ❌ | | 21.9 | ❌ | -| 21.8 | ✔️ | +| 21.8 | ❌ | | 21.7 | ❌ | | 21.6 | ❌ | | 21.5 | ❌ | diff --git a/base/base/ReplxxLineReader.cpp b/base/base/ReplxxLineReader.cpp index bd26ec69c89..b7c18110503 100644 --- a/base/base/ReplxxLineReader.cpp +++ b/base/base/ReplxxLineReader.cpp @@ -27,7 +27,7 @@ void trim(String & s) std::string getEditor() { - const char * editor = std::getenv("EDITOR"); + const char * editor = std::getenv("EDITOR"); // NOLINT(concurrency-mt-unsafe) if (!editor || !*editor) editor = "vim"; @@ -76,7 +76,7 @@ void convertHistoryFile(const std::string & path, replxx::Replxx & rx) if (!in) { rx.print("Cannot open %s reading (for conversion): %s\n", - path.c_str(), errnoToString(errno).c_str()); + path.c_str(), errnoToString().c_str()); return; } @@ -84,7 +84,7 @@ void convertHistoryFile(const std::string & path, replxx::Replxx & rx) if (getline(in, line).bad()) { rx.print("Cannot read from %s (for conversion): %s\n", - path.c_str(), errnoToString(errno).c_str()); + path.c_str(), errnoToString().c_str()); return; } @@ -113,7 +113,7 @@ void convertHistoryFile(const std::string & path, replxx::Replxx & rx) if (!out) { rx.print("Cannot open %s for writing (for conversion): %s\n", - path.c_str(), errnoToString(errno).c_str()); + path.c_str(), errnoToString().c_str()); return; } @@ -151,7 +151,7 @@ ReplxxLineReader::ReplxxLineReader( history_file_fd = open(history_file_path.c_str(), O_RDWR); if (history_file_fd < 0) { - rx.print("Open of history file failed: %s\n", errnoToString(errno).c_str()); + rx.print("Open of history file failed: %s\n", errnoToString().c_str()); } else { @@ -159,18 +159,18 @@ ReplxxLineReader::ReplxxLineReader( if (flock(history_file_fd, LOCK_SH)) { - rx.print("Shared lock of history file failed: %s\n", errnoToString(errno).c_str()); + rx.print("Shared lock of history file failed: %s\n", errnoToString().c_str()); } else { if (!rx.history_load(history_file_path)) { - rx.print("Loading history failed: %s\n", errnoToString(errno).c_str()); + rx.print("Loading history failed: %s\n", errnoToString().c_str()); } if (flock(history_file_fd, LOCK_UN)) { - rx.print("Unlock of history file failed: %s\n", errnoToString(errno).c_str()); + rx.print("Unlock of history file failed: %s\n", errnoToString().c_str()); } } } @@ -225,7 +225,7 @@ ReplxxLineReader::ReplxxLineReader( ReplxxLineReader::~ReplxxLineReader() { if (close(history_file_fd)) - rx.print("Close of history file failed: %s\n", errnoToString(errno).c_str()); + rx.print("Close of history file failed: %s\n", errnoToString().c_str()); } LineReader::InputStatus ReplxxLineReader::readOneLine(const String & prompt) @@ -250,7 +250,7 @@ void ReplxxLineReader::addToHistory(const String & line) // and that is why flock() is added here. bool locked = false; if (flock(history_file_fd, LOCK_EX)) - rx.print("Lock of history file failed: %s\n", errnoToString(errno).c_str()); + rx.print("Lock of history file failed: %s\n", errnoToString().c_str()); else locked = true; @@ -258,10 +258,10 @@ void ReplxxLineReader::addToHistory(const String & line) // flush changes to the disk if (!rx.history_save(history_file_path)) - rx.print("Saving history failed: %s\n", errnoToString(errno).c_str()); + rx.print("Saving history failed: %s\n", errnoToString().c_str()); if (locked && 0 != flock(history_file_fd, LOCK_UN)) - rx.print("Unlock of history file failed: %s\n", errnoToString(errno).c_str()); + rx.print("Unlock of history file failed: %s\n", errnoToString().c_str()); } /// See comments in ShellCommand::executeImpl() @@ -275,7 +275,7 @@ int ReplxxLineReader::executeEditor(const std::string & path) static void * real_vfork = dlsym(RTLD_DEFAULT, "vfork"); if (!real_vfork) { - rx.print("Cannot find symbol vfork in myself: %s\n", errnoToString(errno).c_str()); + rx.print("Cannot find symbol vfork in myself: %s\n", errnoToString().c_str()); return -1; } @@ -283,7 +283,7 @@ int ReplxxLineReader::executeEditor(const std::string & path) if (-1 == pid) { - rx.print("Cannot vfork: %s\n", errnoToString(errno).c_str()); + rx.print("Cannot vfork: %s\n", errnoToString().c_str()); return -1; } @@ -292,11 +292,11 @@ int ReplxxLineReader::executeEditor(const std::string & path) { sigset_t mask; sigemptyset(&mask); - sigprocmask(0, nullptr, &mask); - sigprocmask(SIG_UNBLOCK, &mask, nullptr); + sigprocmask(0, nullptr, &mask); // NOLINT(concurrency-mt-unsafe) // ok in newly created process + sigprocmask(SIG_UNBLOCK, &mask, nullptr); // NOLINT(concurrency-mt-unsafe) // ok in newly created process execvp(editor.c_str(), argv); - rx.print("Cannot execute %s: %s\n", editor.c_str(), errnoToString(errno).c_str()); + rx.print("Cannot execute %s: %s\n", editor.c_str(), errnoToString().c_str()); _exit(-1); } @@ -309,7 +309,7 @@ int ReplxxLineReader::executeEditor(const std::string & path) if (errno == EINTR) continue; - rx.print("Cannot waitpid: %s\n", errnoToString(errno).c_str()); + rx.print("Cannot waitpid: %s\n", errnoToString().c_str()); return -1; } else @@ -324,7 +324,7 @@ void ReplxxLineReader::openEditor() int fd = ::mkstemps(filename, 4); if (-1 == fd) { - rx.print("Cannot create temporary file to edit query: %s\n", errnoToString(errno).c_str()); + rx.print("Cannot create temporary file to edit query: %s\n", errnoToString().c_str()); return; } @@ -338,7 +338,7 @@ void ReplxxLineReader::openEditor() ssize_t res = ::write(fd, begin + bytes_written, offset - bytes_written); if ((-1 == res || 0 == res) && errno != EINTR) { - rx.print("Cannot write to temporary query file %s: %s\n", filename, errnoToString(errno).c_str()); + rx.print("Cannot write to temporary query file %s: %s\n", filename, errnoToString().c_str()); break; } bytes_written += res; @@ -346,7 +346,7 @@ void ReplxxLineReader::openEditor() if (0 != ::close(fd)) { - rx.print("Cannot close temporary query file %s: %s\n", filename, errnoToString(errno).c_str()); + rx.print("Cannot close temporary query file %s: %s\n", filename, errnoToString().c_str()); return; } @@ -364,7 +364,7 @@ void ReplxxLineReader::openEditor() } catch (...) { - rx.print("Cannot read from temporary query file %s: %s\n", filename, errnoToString(errno).c_str()); + rx.print("Cannot read from temporary query file %s: %s\n", filename, errnoToString().c_str()); return; } } @@ -373,7 +373,7 @@ void ReplxxLineReader::openEditor() enableBracketedPaste(); if (0 != ::unlink(filename)) - rx.print("Cannot remove temporary query file %s: %s\n", filename, errnoToString(errno).c_str()); + rx.print("Cannot remove temporary query file %s: %s\n", filename, errnoToString().c_str()); } void ReplxxLineReader::enableBracketedPaste() diff --git a/base/base/StringRef.h b/base/base/StringRef.h index 459172eb107..14ca2530a33 100644 --- a/base/base/StringRef.h +++ b/base/base/StringRef.h @@ -55,10 +55,9 @@ struct StringRef bool empty() const { return size == 0; } std::string toString() const { return std::string(data, size); } - explicit operator std::string() const { return toString(); } - std::string_view toView() const { return std::string_view(data, size); } + std::string_view toView() const { return std::string_view(data, size); } constexpr explicit operator std::string_view() const { return std::string_view(data, size); } }; diff --git a/base/base/errnoToString.cpp b/base/base/errnoToString.cpp index c6d1139a9de..b73979f9e07 100644 --- a/base/base/errnoToString.cpp +++ b/base/base/errnoToString.cpp @@ -3,10 +3,11 @@ #include -std::string errnoToString(int code, int the_errno) +std::string errnoToString(int the_errno) { const size_t buf_size = 128; char buf[buf_size]; + #ifndef _GNU_SOURCE int rc = strerror_r(the_errno, buf, buf_size); #ifdef OS_DARWIN @@ -15,7 +16,7 @@ std::string errnoToString(int code, int the_errno) if (rc != 0) #endif { - std::string tmp = std::to_string(code); + std::string tmp = std::to_string(the_errno); const char * code_str = tmp.c_str(); const char * unknown_message = "Unknown error "; strcpy(buf, unknown_message); @@ -23,7 +24,6 @@ std::string errnoToString(int code, int the_errno) } return fmt::format("errno: {}, strerror: {}", the_errno, buf); #else - (void)code; return fmt::format("errno: {}, strerror: {}", the_errno, strerror_r(the_errno, buf, sizeof(buf))); #endif } diff --git a/base/base/errnoToString.h b/base/base/errnoToString.h index fd5f81ec2c7..50cff786f37 100644 --- a/base/base/errnoToString.h +++ b/base/base/errnoToString.h @@ -3,4 +3,4 @@ #include #include -std::string errnoToString(int code, int the_errno = errno); +std::string errnoToString(int the_errno = errno); diff --git a/base/base/setTerminalEcho.cpp b/base/base/setTerminalEcho.cpp index 2f478f10933..759dca19119 100644 --- a/base/base/setTerminalEcho.cpp +++ b/base/base/setTerminalEcho.cpp @@ -16,7 +16,7 @@ void setTerminalEcho(bool enable) struct termios tty{}; if (0 != tcgetattr(STDIN_FILENO, &tty)) - throw std::runtime_error(std::string("setTerminalEcho failed get: ") + errnoToString(errno)); + throw std::runtime_error(std::string("setTerminalEcho failed get: ") + errnoToString()); if (enable) tty.c_lflag |= ECHO; @@ -24,5 +24,5 @@ void setTerminalEcho(bool enable) tty.c_lflag &= ~ECHO; if (0 != tcsetattr(STDIN_FILENO, TCSANOW, &tty)) - throw std::runtime_error(std::string("setTerminalEcho failed set: ") + errnoToString(errno)); + throw std::runtime_error(std::string("setTerminalEcho failed set: ") + errnoToString()); } diff --git a/cmake/autogenerated_versions.txt b/cmake/autogenerated_versions.txt index edc7805150b..5999a089331 100644 --- a/cmake/autogenerated_versions.txt +++ b/cmake/autogenerated_versions.txt @@ -2,11 +2,11 @@ # NOTE: has nothing common with DBMS_TCP_PROTOCOL_VERSION, # only DBMS_TCP_PROTOCOL_VERSION should be incremented on protocol changes. -SET(VERSION_REVISION 54465) +SET(VERSION_REVISION 54466) SET(VERSION_MAJOR 22) -SET(VERSION_MINOR 8) +SET(VERSION_MINOR 9) SET(VERSION_PATCH 1) -SET(VERSION_GITHASH f4f05ec786a8b8966dd0ea2a2d7e39a8c7db24f4) -SET(VERSION_DESCRIBE v22.8.1.1-testing) -SET(VERSION_STRING 22.8.1.1) +SET(VERSION_GITHASH 09a2ff88435f79e5279745bbe1dc0e5e401df38d) +SET(VERSION_DESCRIBE v22.9.1.1-testing) +SET(VERSION_STRING 22.9.1.1) # end of autochange diff --git a/cmake/cpu_features.cmake b/cmake/cpu_features.cmake index f1f6dfb9a9c..1fc3c2db804 100644 --- a/cmake/cpu_features.cmake +++ b/cmake/cpu_features.cmake @@ -16,7 +16,7 @@ option (ENABLE_SSE41 "Use SSE4.1 instructions on x86_64" 1) option (ENABLE_SSE42 "Use SSE4.2 instructions on x86_64" 1) option (ENABLE_PCLMULQDQ "Use pclmulqdq instructions on x86_64" 1) option (ENABLE_POPCNT "Use popcnt instructions on x86_64" 1) -option (ENABLE_AVX "Use AVX instructions on x86_64" 1) +option (ENABLE_AVX "Use AVX instructions on x86_64" 0) option (ENABLE_AVX2 "Use AVX2 instructions on x86_64" 0) option (ENABLE_AVX512 "Use AVX512 instructions on x86_64" 0) option (ENABLE_AVX512_VBMI "Use AVX512_VBMI instruction on x86_64 (depends on ENABLE_AVX512)" 0) diff --git a/contrib/NuRaft b/contrib/NuRaft index 1b0af760b35..33f60f961d4 160000 --- a/contrib/NuRaft +++ b/contrib/NuRaft @@ -1 +1 @@ -Subproject commit 1b0af760b3506b8e35b50cb7df098cbad5064ff2 +Subproject commit 33f60f961d4914441b684af43e9e5535078ba54b diff --git a/docker/packager/binary/Dockerfile b/docker/packager/binary/Dockerfile index 74919bb2100..ba0a27c9801 100644 --- a/docker/packager/binary/Dockerfile +++ b/docker/packager/binary/Dockerfile @@ -78,6 +78,7 @@ RUN export CODENAME="$(lsb_release --codename --short | tr 'A-Z' 'a-z')" \ && apt-get update \ && apt-get install \ clang-15 \ + llvm-15 \ clang-tidy-15 \ --yes --no-install-recommends \ && apt-get clean diff --git a/docker/server/Dockerfile.ubuntu b/docker/server/Dockerfile.ubuntu index 7c0108f8b2c..b9e8b89cd92 100644 --- a/docker/server/Dockerfile.ubuntu +++ b/docker/server/Dockerfile.ubuntu @@ -26,7 +26,7 @@ ARG PACKAGES="clickhouse-client clickhouse-server clickhouse-common-static" # set non-empty deb_location_url url to create a docker image # from debs created by CI build, for example: -# docker build . --network host --build-arg version="21.4.1.6282" --build-arg deb_location_url="https://clickhouse-builds.s3.yandex.net/21852/069cfbff388b3d478d1a16dc7060b48073f5d522/clickhouse_build_check/clang-11_relwithdebuginfo_none_bundled_unsplitted_disable_False_deb/" -t filimonovq/clickhouse-server:pr21852 +# docker build . --network host --build-arg version="21.4.1.6282" --build-arg deb_location_url="https://..." -t ... ARG deb_location_url="" # set non-empty single_binary_location_url to create docker image diff --git a/docker/server/entrypoint.sh b/docker/server/entrypoint.sh index 89dd501bf38..d4da5f0f38c 100755 --- a/docker/server/entrypoint.sh +++ b/docker/server/entrypoint.sh @@ -107,6 +107,13 @@ fi if [ -n "$(ls /docker-entrypoint-initdb.d/)" ] || [ -n "$CLICKHOUSE_DB" ]; then # port is needed to check if clickhouse-server is ready for connections HTTP_PORT="$(clickhouse extract-from-config --config-file "$CLICKHOUSE_CONFIG" --key=http_port)" + HTTPS_PORT="$(clickhouse extract-from-config --config-file "$CLICKHOUSE_CONFIG" --key=https_port)" + + if [ -n "$HTTP_PORT" ]; then + URL="http://127.0.0.1:$HTTP_PORT/ping" + else + URL="https://127.0.0.1:$HTTPS_PORT/ping" + fi # Listen only on localhost until the initialization is done /usr/bin/clickhouse su "${USER}:${GROUP}" /usr/bin/clickhouse-server --config-file="$CLICKHOUSE_CONFIG" -- --listen_host=127.0.0.1 & @@ -115,7 +122,7 @@ if [ -n "$(ls /docker-entrypoint-initdb.d/)" ] || [ -n "$CLICKHOUSE_DB" ]; then # check if clickhouse is ready to accept connections # will try to send ping clickhouse via http_port (max 12 retries by default, with 1 sec timeout and 1 sec delay between retries) tries=${CLICKHOUSE_INIT_TIMEOUT:-12} - while ! wget --spider -T 1 -q "http://127.0.0.1:$HTTP_PORT/ping" 2>/dev/null; do + while ! wget --spider --no-check-certificate -T 1 -q "$URL" 2>/dev/null; do if [ "$tries" -le "0" ]; then echo >&2 'ClickHouse init process failed.' exit 1 diff --git a/docker/test/fuzzer/run-fuzzer.sh b/docker/test/fuzzer/run-fuzzer.sh index 392d8110576..11ddb0bd2d3 100755 --- a/docker/test/fuzzer/run-fuzzer.sh +++ b/docker/test/fuzzer/run-fuzzer.sh @@ -12,7 +12,7 @@ stage=${stage:-} script_dir="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )" echo "$script_dir" repo_dir=ch -BINARY_TO_DOWNLOAD=${BINARY_TO_DOWNLOAD:="clang-14_debug_none_bundled_unsplitted_disable_False_binary"} +BINARY_TO_DOWNLOAD=${BINARY_TO_DOWNLOAD:="clang-14_debug_none_unsplitted_disable_False_binary"} BINARY_URL_TO_DOWNLOAD=${BINARY_URL_TO_DOWNLOAD:="https://clickhouse-builds.s3.amazonaws.com/$PR_TO_TEST/$SHA_TO_TEST/clickhouse_build_check/$BINARY_TO_DOWNLOAD/clickhouse"} function clone diff --git a/docker/test/keeper-jepsen/run.sh b/docker/test/keeper-jepsen/run.sh index 460c0db54c3..c43e6b2c54d 100644 --- a/docker/test/keeper-jepsen/run.sh +++ b/docker/test/keeper-jepsen/run.sh @@ -2,7 +2,7 @@ set -euo pipefail -CLICKHOUSE_PACKAGE=${CLICKHOUSE_PACKAGE:="https://clickhouse-builds.s3.amazonaws.com/$PR_TO_TEST/$SHA_TO_TEST/clickhouse_build_check/clang-14_relwithdebuginfo_none_bundled_unsplitted_disable_False_binary/clickhouse"} +CLICKHOUSE_PACKAGE=${CLICKHOUSE_PACKAGE:="https://clickhouse-builds.s3.amazonaws.com/$PR_TO_TEST/$SHA_TO_TEST/clickhouse_build_check/clang-14_relwithdebuginfo_none_unsplitted_disable_False_binary/clickhouse"} CLICKHOUSE_REPO_PATH=${CLICKHOUSE_REPO_PATH:=""} diff --git a/docker/test/performance-comparison/compare.sh b/docker/test/performance-comparison/compare.sh index 3b0b7a4d95a..d3d7084f37f 100755 --- a/docker/test/performance-comparison/compare.sh +++ b/docker/test/performance-comparison/compare.sh @@ -284,13 +284,21 @@ function run_tests # Use awk because bash doesn't support floating point arithmetic. profile_seconds=$(awk "BEGIN { print ($profile_seconds_left > 0 ? 10 : 0) }") + if [ "$(grep -c $(basename $test) changed-test-definitions.txt)" -gt 0 ] + then + # Run all queries from changed test files to ensure that all new queries will be tested. + max_queries=0 + else + max_queries=$CHPC_MAX_QUERIES + fi + ( set +x argv=( --host localhost localhost --port "$LEFT_SERVER_PORT" "$RIGHT_SERVER_PORT" --runs "$CHPC_RUNS" - --max-queries "$CHPC_MAX_QUERIES" + --max-queries "$max_queries" --profile-seconds "$profile_seconds" "$test" diff --git a/docker/test/stateless/run.sh b/docker/test/stateless/run.sh index 4e7555ea3a4..72b0eb5bda1 100755 --- a/docker/test/stateless/run.sh +++ b/docker/test/stateless/run.sh @@ -88,13 +88,15 @@ sleep 5 function run_tests() { set -x - # We can have several additional options so we path them as array because it's - # more idiologically correct. + # We can have several additional options so we pass them as array because it is more ideologically correct. read -ra ADDITIONAL_OPTIONS <<< "${ADDITIONAL_OPTIONS:-}" + HIGH_LEVEL_COVERAGE=YES + # Use random order in flaky check if [ "$NUM_TRIES" -gt "1" ]; then ADDITIONAL_OPTIONS+=('--order=random') + HIGH_LEVEL_COVERAGE=NO fi if [[ -n "$USE_S3_STORAGE_FOR_MERGE_TREE" ]] && [[ "$USE_S3_STORAGE_FOR_MERGE_TREE" -eq 1 ]]; then @@ -117,12 +119,17 @@ function run_tests() ADDITIONAL_OPTIONS+=("$RUN_BY_HASH_NUM") ADDITIONAL_OPTIONS+=('--run-by-hash-total') ADDITIONAL_OPTIONS+=("$RUN_BY_HASH_TOTAL") + HIGH_LEVEL_COVERAGE=NO fi if [[ -n "$USE_DATABASE_ORDINARY" ]] && [[ "$USE_DATABASE_ORDINARY" -eq 1 ]]; then ADDITIONAL_OPTIONS+=('--db-engine=Ordinary') fi + if [[ "${HIGH_LEVEL_COVERAGE}" = "YES" ]]; then + ADDITIONAL_OPTIONS+=('--report-coverage') + fi + set +e clickhouse-test --testname --shard --zookeeper --check-zookeeper-session --hung-check --print-time \ --test-runs "$NUM_TRIES" "${ADDITIONAL_OPTIONS[@]}" 2>&1 \ diff --git a/docker/test/stateless/setup_hdfs_minicluster.sh b/docker/test/stateless/setup_hdfs_minicluster.sh index 128db96d694..6671e73562a 100755 --- a/docker/test/stateless/setup_hdfs_minicluster.sh +++ b/docker/test/stateless/setup_hdfs_minicluster.sh @@ -1,4 +1,5 @@ #!/bin/bash +# shellcheck disable=SC2024 set -e -x -a -u @@ -9,7 +10,7 @@ cd hadoop-3.3.1 export JAVA_HOME=/usr mkdir -p target/test/data chown clickhouse ./target/test/data -sudo -E -u clickhouse bin/mapred minicluster -format -nomr -nnport 12222 & +sudo -E -u clickhouse bin/mapred minicluster -format -nomr -nnport 12222 >> /test_output/garbage.log 2>&1 & while ! nc -z localhost 12222; do sleep 1 diff --git a/docker/test/stress/run.sh b/docker/test/stress/run.sh index 62ab1d73668..5f57822386a 100755 --- a/docker/test/stress/run.sh +++ b/docker/test/stress/run.sh @@ -105,12 +105,16 @@ EOL function stop() { + local pid + # Preserve the pid, since the server can hung after the PID will be deleted. + pid="$(cat /var/run/clickhouse-server/clickhouse-server.pid)" + clickhouse stop --do-not-kill && return # We failed to stop the server with SIGTERM. Maybe it hang, let's collect stacktraces. kill -TERM "$(pidof gdb)" ||: sleep 5 echo "thread apply all backtrace (on stop)" >> /test_output/gdb.log - gdb -batch -ex 'thread apply all backtrace' -p "$(cat /var/run/clickhouse-server/clickhouse-server.pid)" | ts '%Y-%m-%d %H:%M:%S' >> /test_output/gdb.log + gdb -batch -ex 'thread apply all backtrace' -p "$pid" | ts '%Y-%m-%d %H:%M:%S' >> /test_output/gdb.log clickhouse stop --force } @@ -304,7 +308,6 @@ else rm -rf /var/lib/clickhouse/* # Make BC check more funny by forcing Ordinary engine for system database - # New version will try to convert it to Atomic on startup mkdir /var/lib/clickhouse/metadata echo "ATTACH DATABASE system ENGINE=Ordinary" > /var/lib/clickhouse/metadata/system.sql @@ -314,16 +317,13 @@ else # Start server from previous release configure - # Avoid "Setting allow_deprecated_database_ordinary is neither a builtin setting..." - rm -f /etc/clickhouse-server/users.d/database_ordinary.xml ||: + # Avoid "Setting s3_check_objects_after_upload is neither a builtin setting..." + rm -f /etc/clickhouse-server/users.d/enable_blobs_check.xml ||: # Remove s3 related configs to avoid "there is no disk type `cache`" rm -f /etc/clickhouse-server/config.d/storage_conf.xml ||: rm -f /etc/clickhouse-server/config.d/azure_storage_conf.xml ||: - # Disable aggressive cleanup of tmp dirs (it worked incorrectly before 22.8) - rm -f /etc/clickhouse-server/config.d/merge_tree_old_dirs_cleanup.xml ||: - start clickhouse-client --query="SELECT 'Server version: ', version()" @@ -388,6 +388,7 @@ else -e "TABLE_IS_READ_ONLY" \ -e "Code: 1000, e.code() = 111, Connection refused" \ -e "UNFINISHED" \ + -e "NETLINK_ERROR" \ -e "Renaming unexpected part" \ -e "PART_IS_TEMPORARILY_LOCKED" \ -e "and a merge is impossible: we didn't find" \ @@ -445,6 +446,13 @@ else [ -s /test_output/bc_check_fatal_messages.txt ] || rm /test_output/bc_check_fatal_messages.txt fi +dmesg -T > /test_output/dmesg.log + +# OOM in dmesg -- those are real +grep -q -F -e 'Out of memory: Killed process' -e 'oom_reaper: reaped process' -e 'oom-kill:constraint=CONSTRAINT_NONE' /test_output/dmesg.log \ + && echo -e 'OOM in dmesg\tFAIL' >> /test_output/test_results.tsv \ + || echo -e 'No OOM in dmesg\tOK' >> /test_output/test_results.tsv + tar -chf /test_output/coordination.tar /var/lib/clickhouse/coordination ||: mv /var/log/clickhouse-server/stderr.log /test_output/ @@ -466,5 +474,3 @@ for core in core.*; do pigz $core mv $core.gz /test_output/ done - -dmesg -T > /test_output/dmesg.log diff --git a/docs/_includes/install/tgz.sh b/docs/_includes/install/tgz.sh index d6d7cd8bc36..e42353f7b48 100644 --- a/docs/_includes/install/tgz.sh +++ b/docs/_includes/install/tgz.sh @@ -14,8 +14,6 @@ do || curl -fO "https://packages.clickhouse.com/tgz/stable/$PKG-$LATEST_VERSION.tgz" done -exit 0 - tar -xzvf "clickhouse-common-static-$LATEST_VERSION-${ARCH}.tgz" \ || tar -xzvf "clickhouse-common-static-$LATEST_VERSION.tgz" sudo "clickhouse-common-static-$LATEST_VERSION/install/doinst.sh" @@ -26,7 +24,7 @@ sudo "clickhouse-common-static-dbg-$LATEST_VERSION/install/doinst.sh" tar -xzvf "clickhouse-server-$LATEST_VERSION-${ARCH}.tgz" \ || tar -xzvf "clickhouse-server-$LATEST_VERSION.tgz" -sudo "clickhouse-server-$LATEST_VERSION/install/doinst.sh" +sudo "clickhouse-server-$LATEST_VERSION/install/doinst.sh" configure sudo /etc/init.d/clickhouse-server start tar -xzvf "clickhouse-client-$LATEST_VERSION-${ARCH}.tgz" \ diff --git a/docs/changelogs/v22.6.6.16-stable.md b/docs/changelogs/v22.6.6.16-stable.md new file mode 100644 index 00000000000..7d54a9af9d2 --- /dev/null +++ b/docs/changelogs/v22.6.6.16-stable.md @@ -0,0 +1,23 @@ +--- +sidebar_position: 1 +sidebar_label: 2022 +--- + +# 2022 Changelog + +### ClickHouse release v22.6.6.16-stable (d2a33ebc822) FIXME as compared to v22.6.5.22-stable (47ca5f14a34) + +#### Bug Fix (user-visible misbehavior in official stable or prestable release) + +* Backported in [#40162](https://github.com/ClickHouse/ClickHouse/issues/40162): fix HashMethodOneNumber get wrong key value when column is const. [#40020](https://github.com/ClickHouse/ClickHouse/pull/40020) ([Duc Canh Le](https://github.com/canhld94)). +* Backported in [#40124](https://github.com/ClickHouse/ClickHouse/issues/40124): Fix bug in collectFilesToSkip() by adding correct file extension(.idx or idx2) for indexes to be recalculated, avoid wrong hard links. Fixed [#39896](https://github.com/ClickHouse/ClickHouse/issues/39896). [#40095](https://github.com/ClickHouse/ClickHouse/pull/40095) ([Jianmei Zhang](https://github.com/zhangjmruc)). +* Backported in [#40209](https://github.com/ClickHouse/ClickHouse/issues/40209): Fix insufficient argument check for encryption functions (found by query fuzzer). This closes [#39987](https://github.com/ClickHouse/ClickHouse/issues/39987). [#40194](https://github.com/ClickHouse/ClickHouse/pull/40194) ([Alexey Milovidov](https://github.com/alexey-milovidov)). +* Backported in [#40281](https://github.com/ClickHouse/ClickHouse/issues/40281): Fix possible segfault in CapnProto input format. This bug was found and send through ClickHouse bug-bounty [program](https://github.com/ClickHouse/ClickHouse/issues/38986) by *kiojj*. [#40241](https://github.com/ClickHouse/ClickHouse/pull/40241) ([Kruglov Pavel](https://github.com/Avogar)). +* Backported in [#40489](https://github.com/ClickHouse/ClickHouse/issues/40489): Fix rare bug with column TTL for MergeTree engines family: In case of repeated vertical merge the error `Cannot unlink file ColumnName.bin ... No such file or directory.` could happen. [#40346](https://github.com/ClickHouse/ClickHouse/pull/40346) ([alesapin](https://github.com/alesapin)). + +#### NOT FOR CHANGELOG / INSIGNIFICANT + +* fix heap buffer overflow by limiting http chunk size [#40292](https://github.com/ClickHouse/ClickHouse/pull/40292) ([Sema Checherinda](https://github.com/CheSema)). +* Reduce changelog verbosity in CI [#40360](https://github.com/ClickHouse/ClickHouse/pull/40360) ([Mikhail f. Shiryaev](https://github.com/Felixoid)). +* DNSResolver remove AI_V4MAPPED, AI_ALL hints [#40502](https://github.com/ClickHouse/ClickHouse/pull/40502) ([Maksim Kita](https://github.com/kitaisreal)). + diff --git a/docs/changelogs/v22.7.4.16-stable.md b/docs/changelogs/v22.7.4.16-stable.md new file mode 100644 index 00000000000..99e42d04d09 --- /dev/null +++ b/docs/changelogs/v22.7.4.16-stable.md @@ -0,0 +1,23 @@ +--- +sidebar_position: 1 +sidebar_label: 2022 +--- + +# 2022 Changelog + +### ClickHouse release v22.7.4.16-stable (0b9272f8fdc) FIXME as compared to v22.7.3.5-stable (e140b8b5f3a) + +#### Bug Fix (user-visible misbehavior in official stable or prestable release) + +* Backported in [#40163](https://github.com/ClickHouse/ClickHouse/issues/40163): fix HashMethodOneNumber get wrong key value when column is const. [#40020](https://github.com/ClickHouse/ClickHouse/pull/40020) ([Duc Canh Le](https://github.com/canhld94)). +* Backported in [#40125](https://github.com/ClickHouse/ClickHouse/issues/40125): Fix bug in collectFilesToSkip() by adding correct file extension(.idx or idx2) for indexes to be recalculated, avoid wrong hard links. Fixed [#39896](https://github.com/ClickHouse/ClickHouse/issues/39896). [#40095](https://github.com/ClickHouse/ClickHouse/pull/40095) ([Jianmei Zhang](https://github.com/zhangjmruc)). +* Backported in [#40291](https://github.com/ClickHouse/ClickHouse/issues/40291): A segmentation fault that has CaresPTRResolver::resolve in the stack trace has been reported:. [#40134](https://github.com/ClickHouse/ClickHouse/pull/40134) ([Arthur Passos](https://github.com/arthurpassos)). +* Backported in [#40210](https://github.com/ClickHouse/ClickHouse/issues/40210): Fix insufficient argument check for encryption functions (found by query fuzzer). This closes [#39987](https://github.com/ClickHouse/ClickHouse/issues/39987). [#40194](https://github.com/ClickHouse/ClickHouse/pull/40194) ([Alexey Milovidov](https://github.com/alexey-milovidov)). +* Backported in [#40284](https://github.com/ClickHouse/ClickHouse/issues/40284): Fix possible segfault in CapnProto input format. This bug was found and send through ClickHouse bug-bounty [program](https://github.com/ClickHouse/ClickHouse/issues/38986) by *kiojj*. [#40241](https://github.com/ClickHouse/ClickHouse/pull/40241) ([Kruglov Pavel](https://github.com/Avogar)). + +#### NOT FOR CHANGELOG / INSIGNIFICANT + +* Fix a bug with symlinks detection [#40232](https://github.com/ClickHouse/ClickHouse/pull/40232) ([Alexander Tokmakov](https://github.com/tavplubix)). +* fix heap buffer overflow by limiting http chunk size [#40292](https://github.com/ClickHouse/ClickHouse/pull/40292) ([Sema Checherinda](https://github.com/CheSema)). +* Reduce changelog verbosity in CI [#40360](https://github.com/ClickHouse/ClickHouse/pull/40360) ([Mikhail f. Shiryaev](https://github.com/Felixoid)). + diff --git a/docs/changelogs/v22.8.1.2097-lts.md b/docs/changelogs/v22.8.1.2097-lts.md new file mode 100644 index 00000000000..5ef2b5c00cc --- /dev/null +++ b/docs/changelogs/v22.8.1.2097-lts.md @@ -0,0 +1,374 @@ +--- +sidebar_position: 1 +sidebar_label: 2022 +--- + +# 2022 Changelog + +### ClickHouse release v22.8.1.2097-lts (09a2ff88435) FIXME as compared to v22.7.1.2484-stable (f4f05ec786a) + +#### Backward Incompatible Change +* Make cache composable, allow not to evict certain files (regarding idx, mrk, ..), delete old cache version. Now it is possible to configure cache over Azure blob storage disk, over Local disk, over StaticWeb disk, etc. This PR is marked backward incompatible because cache configuration changes and in order for cache to work need to update the config file. Old cache will still be used with new configuration. The server will startup fine with the old cache configuration. Closes [#36140](https://github.com/ClickHouse/ClickHouse/issues/36140). Closes [#37889](https://github.com/ClickHouse/ClickHouse/issues/37889). [#36171](https://github.com/ClickHouse/ClickHouse/pull/36171) ([Kseniia Sumarokova](https://github.com/kssenii)). +* Now, all relevant dictionary sources respect `remote_url_allow_hosts` setting. It was already done for HTTP, Cassandra, Redis. Added ClickHouse, MongoDB, MySQL, PostgreSQL. Host is checked only for dictionaries created from DDL. [#39184](https://github.com/ClickHouse/ClickHouse/pull/39184) ([Nikolai Kochetov](https://github.com/KochetovNicolai)). +* Extended range of Date32 and DateTime64 to support dates from the year 1900 to 2299. In previous versions, the supported interval was only from the year 1925 to 2283. The implementation is using the proleptic Gregorian calendar (which is conformant with [ISO 8601](https://en.wikipedia.org/wiki/ISO_8601):2004 (clause 3.2.1 The Gregorian calendar)) instead of accounting for historical transitions from the Julian to the Gregorian calendar. This change affects implementation-specific behavior for out-of-range arguments. E.g. if in previous versions the value of `1899-01-01` was clamped to `1925-01-01`, in the new version it will be clamped to `1900-01-01`. It changes the behavior of rounding with `toStartOfInterval` if you pass `INTERVAL 3 QUARTER` up to one quarter because the intervals are counted from an implementation-specific point of time. Closes [#28216](https://github.com/ClickHouse/ClickHouse/issues/28216), improves [#38393](https://github.com/ClickHouse/ClickHouse/issues/38393). [#39425](https://github.com/ClickHouse/ClickHouse/pull/39425) ([Roman Vasin](https://github.com/rvasin)). + +#### New Feature +* Added a setting `exact_rows_before_limit` (0/1). When enabled, ClickHouse will provide exact value for `rows_before_limit_at_least` statistic, but with the cost that the data before limit will have to be read completely. This closes [#6613](https://github.com/ClickHouse/ClickHouse/issues/6613). [#25333](https://github.com/ClickHouse/ClickHouse/pull/25333) ([kevin wan](https://github.com/MaxWk)). +* Add SLRU cache policy for uncompressed cache and marks cache. [#34651](https://github.com/ClickHouse/ClickHouse/pull/34651) ([alexX512](https://github.com/alexX512)). +* Intel® In-Memory Analytics Accelerator (Intel® IAA) is a hardware accelerator available in the upcoming generation of Intel® Xeon® Scalable processors ("Sapphire Rapids"). Its goal is to speed up common operations in analytics like data (de)compression and filtering. ClickHouse gained the new "DeflateQpl" compression codec which utilizes the Intel® IAA offloading technology to provide a high-performance DEFLATE implementation. The codec uses the [Intel® Query Processing Library (QPL)](https://github.com/intel/qpl) which abstracts access to the hardware accelerator, respectively to a software fallback in case the hardware accelerator is not available. DEFLATE provides in general higher compression rates than ClickHouse's LZ4 default codec, and as a result, offers less disk I/O and lower main memory consumption. [#36654](https://github.com/ClickHouse/ClickHouse/pull/36654) ([jasperzhu](https://github.com/jinjunzh)). +* Add concurrent_threads_soft_limit parameter to increase performance in case of high RPS by means of limiting total number of threads for all queries. [#37285](https://github.com/ClickHouse/ClickHouse/pull/37285) ([Roman Vasin](https://github.com/rvasin)). +* Added concurrency control logic to limit total number of concurrent threads created by queries. [#37558](https://github.com/ClickHouse/ClickHouse/pull/37558) ([Sergei Trifonov](https://github.com/serxa)). +* Added support for parallel distributed insert select into tables with Distributed and Replicated engine [#34670](https://github.com/ClickHouse/ClickHouse/issues/34670). [#39107](https://github.com/ClickHouse/ClickHouse/pull/39107) ([Nikita Mikhaylov](https://github.com/nikitamikhaylov)). +* Add new settings to control schema inference from text formats: - `input_format_try_infer_dates` - try infer dates from strings. - `input_format_try_infer_datetimes` - try infer datetimes from strings. - `input_format_try_infer_integers` - try infer `Int64` instead of `Float64`. - `input_format_json_try_infer_numbers_from_strings` - try infer numbers from json strings in JSON formats. [#39186](https://github.com/ClickHouse/ClickHouse/pull/39186) ([Kruglov Pavel](https://github.com/Avogar)). +* This feature will provide JSON formatted log output in console. The purpose is to allow easier ingestion and query in log analysis tools. [#39277](https://github.com/ClickHouse/ClickHouse/pull/39277) ([Mallik Hassan](https://github.com/SadiHassan)). +* Intel® In-Memory Analytics Accelerator (Intel® IAA) is a hardware accelerator available in the upcoming generation of Intel® Xeon® Scalable processors ("Sapphire Rapids"). Its goal is to speed up common operations in analytics like data (de)compression and filtering. ClickHouse gained the new "DeflateQpl" compression codec which utilizes the Intel® IAA offloading technology to provide a high-performance DEFLATE implementation. The codec uses the [Intel® Query Processing Library (QPL)](https://github.com/intel/qpl) which abstracts access to the hardware accelerator, respectively to a software fallback in case the hardware accelerator is not available. DEFLATE provides in general higher compression rates than ClickHouse's LZ4 default codec, and as a result, offers less disk I/O and lower main memory consumption. [#39494](https://github.com/ClickHouse/ClickHouse/pull/39494) ([Robert Schulze](https://github.com/rschu1ze)). +* Add function `nowInBlock` which allows getting the current time during long-running and continuous queries. Closes [#39522](https://github.com/ClickHouse/ClickHouse/issues/39522). Notes: there are no functions `now64InBlock` neither `todayInBlock`. [#39533](https://github.com/ClickHouse/ClickHouse/pull/39533) ([Alexey Milovidov](https://github.com/alexey-milovidov)). +* - Add result_rows and result_bytes to progress reports (`X-ClickHouse-Summary`). [#39567](https://github.com/ClickHouse/ClickHouse/pull/39567) ([Raúl Marín](https://github.com/Algunenano)). +* adds ability to specify settings for an `executable()` table function. [#39681](https://github.com/ClickHouse/ClickHouse/pull/39681) ([Constantine Peresypkin](https://github.com/pkit)). +* Implemented automatic conversion of database engine from `Ordinary` to `Atomic`. Create empty `convert_ordinary_to_atomic` file in `flags` directory and all `Ordinary` databases will be converted automatically on next server start. Resolves [#39546](https://github.com/ClickHouse/ClickHouse/issues/39546). [#39933](https://github.com/ClickHouse/ClickHouse/pull/39933) ([Alexander Tokmakov](https://github.com/tavplubix)). +* Add new setting `schema_inference_hints` that allows to specify structure hints in schema inference for specific columns. Closes [#39569](https://github.com/ClickHouse/ClickHouse/issues/39569). [#40068](https://github.com/ClickHouse/ClickHouse/pull/40068) ([Kruglov Pavel](https://github.com/Avogar)). + +#### Performance Improvement +* * Break on analyze stuck on complex query. [#38185](https://github.com/ClickHouse/ClickHouse/pull/38185) ([Vladimir C](https://github.com/vdimir)). +* Deduce way to sort based on input stream sort description. Skip sorting if input stream is already sorted. [#38719](https://github.com/ClickHouse/ClickHouse/pull/38719) ([Igor Nikonov](https://github.com/devcrafter)). +* `DISTINCT` in order with `ORDER BY` improves memory usage (significantly) and query execution time if `DISTINCT` columns match (or form a prefix of) `ORDER BY` columns. [#39432](https://github.com/ClickHouse/ClickHouse/pull/39432) ([Igor Nikonov](https://github.com/devcrafter)). +* Use local node as first priority to get structure of remote table when executing `cluster` and similar table functions. [#39440](https://github.com/ClickHouse/ClickHouse/pull/39440) ([Mingliang Pan](https://github.com/liangliangpan)). +* Use `DistinctSortedTransform` only when sort description is applicable to DISTINCT columns, otherwise fall back to ordinary DISTINCT implementation. It allows making less checks during `DistinctSortedTransform` execution. [#39528](https://github.com/ClickHouse/ClickHouse/pull/39528) ([Igor Nikonov](https://github.com/devcrafter)). +* `DistinctSortedTransform` didn't take advantage of sorting, i.e. it worked like ordinary `DISTINCT` implementation. The fix reduces memory usage significantly. [#39538](https://github.com/ClickHouse/ClickHouse/pull/39538) ([Igor Nikonov](https://github.com/devcrafter)). +* ColumnVector: optimize filter with AVX512VBMI2 compress store. [#39633](https://github.com/ClickHouse/ClickHouse/pull/39633) ([Guo Wangyang](https://github.com/guowangy)). +* KeyCondition: optimize applyFunction in multi-thread scenario. [#39812](https://github.com/ClickHouse/ClickHouse/pull/39812) ([Guo Wangyang](https://github.com/guowangy)). +* For systems with AVX512 VBMI2, this PR improves performance by ca. 6% for SSB benchmark queries queries 3.1, 3.2 and 3.3 (SF=100). Tested on Intel Icelake Xeon 8380 * 2 socket. [#40033](https://github.com/ClickHouse/ClickHouse/pull/40033) ([Robert Schulze](https://github.com/rschu1ze)). +* - Don't visit the AST for UDFs if none are registered. [#40069](https://github.com/ClickHouse/ClickHouse/pull/40069) ([Raúl Marín](https://github.com/Algunenano)). +* - Optimize CurrentMemoryTracker alloc and free. [#40078](https://github.com/ClickHouse/ClickHouse/pull/40078) ([Raúl Marín](https://github.com/Algunenano)). + +#### Improvement +* Change the way how PK is analyzed for MergeTree. [#25563](https://github.com/ClickHouse/ClickHouse/pull/25563) ([Nikolai Kochetov](https://github.com/KochetovNicolai)). +* - Improved structure of DDL query result table for `Replicated` database (separate columns with shard and replica name, more clear status) - `CREATE TABLE ... ON CLUSTER` queries can be normalized on initiator first if `distributed_ddl_entry_format_version` is set to 3 (default value). It means that `ON CLUSTER` queries may not work if initiator does not belong to the cluster that specified in query. Fixes [#37318](https://github.com/ClickHouse/ClickHouse/issues/37318), [#39500](https://github.com/ClickHouse/ClickHouse/issues/39500) - Ignore `ON CLUSTER` clause if database is `Replicated` and cluster name equals to database name. Related to [#35570](https://github.com/ClickHouse/ClickHouse/issues/35570) - Miscellaneous minor fixes for `Replicated` database engine - Check metadata consistency when starting up `Replicated` database, start replica recovery in case of mismatch of local metadata and metadata in Keeper. Resolves [#24880](https://github.com/ClickHouse/ClickHouse/issues/24880). [#37198](https://github.com/ClickHouse/ClickHouse/pull/37198) ([Alexander Tokmakov](https://github.com/tavplubix)). +* Support SQL standard DELETE FROM syntax on merge tree tables and lightweight delete implementation for merge tree families. [#37893](https://github.com/ClickHouse/ClickHouse/pull/37893) ([Jianmei Zhang](https://github.com/zhangjmruc)). +* `timeSlots` now works with DateTime64; subsecond duration and slot size available when working with DateTime64. [#37951](https://github.com/ClickHouse/ClickHouse/pull/37951) ([Andrey Zvonov](https://github.com/zvonand)). +* Add cache for schema inference for file/s3/hdfs/url table functions. Now, schema inference will be performed only on the first query to the file, all subsequent queries to the same file will use the schema from cache if data wasn't changed. Add system table `system.schema_inference_cache` with all current schemas in cache and system queries `SYSTEM DROP SCHEMA CACHE [FOR FILE/S3/HDFS/URL]` to drop schemas from cache. [#38286](https://github.com/ClickHouse/ClickHouse/pull/38286) ([Kruglov Pavel](https://github.com/Avogar)). +* - Simplified function registration macro interface (`FUNCTION_REGISTER*`) to eliminate the step to add and call an extern function in the registerFunctions.cpp, it also makes incremental builds of a new function faster. [#38615](https://github.com/ClickHouse/ClickHouse/pull/38615) ([Li Yin](https://github.com/liyinsg)). +* * Added support of `LEFT SEMI` and `LEFT ANTI` direct join with rocksdb. [#38956](https://github.com/ClickHouse/ClickHouse/pull/38956) ([Vladimir C](https://github.com/vdimir)). +* resolves [#37490](https://github.com/ClickHouse/ClickHouse/issues/37490). [#39054](https://github.com/ClickHouse/ClickHouse/pull/39054) ([SmitaRKulkarni](https://github.com/SmitaRKulkarni)). +* Store Keeper API version inside a predefined path. [#39096](https://github.com/ClickHouse/ClickHouse/pull/39096) ([Antonio Andelic](https://github.com/antonio2368)). +* Now entrypoint.sh in docker image creates and executes chown for all folders it found in config for multidisk setup [#17717](https://github.com/ClickHouse/ClickHouse/issues/17717). [#39121](https://github.com/ClickHouse/ClickHouse/pull/39121) ([Nikita Mikhaylov](https://github.com/nikitamikhaylov)). +* Add profile events for fsync. [#39179](https://github.com/ClickHouse/ClickHouse/pull/39179) ([Azat Khuzhin](https://github.com/azat)). +* Add the second argument to the ordinary function `file(path[, default])`, which function returns in the case when a file does not exists. [#39218](https://github.com/ClickHouse/ClickHouse/pull/39218) ([Nikolay Degterinsky](https://github.com/evillique)). +* Some small fixes for reading via http, allow to retry partial content in case if got 200OK. [#39244](https://github.com/ClickHouse/ClickHouse/pull/39244) ([Kseniia Sumarokova](https://github.com/kssenii)). +* Improved Base58 encoding/decoding. [#39292](https://github.com/ClickHouse/ClickHouse/pull/39292) ([Andrey Zvonov](https://github.com/zvonand)). +* Normalize `AggregateFunction` types and state representations because optimizations like https://github.com/ClickHouse/ClickHouse/pull/35788 will treat `count(not null columns)` as `count()`, which might confuses distributed interpreters with the following error : `Conversion from AggregateFunction(count) to AggregateFunction(count, Int64) is not supported`. [#39420](https://github.com/ClickHouse/ClickHouse/pull/39420) ([Amos Bird](https://github.com/amosbird)). +* Improved memory usage during memory efficient merging of aggregation results. [#39429](https://github.com/ClickHouse/ClickHouse/pull/39429) ([Nikita Taranov](https://github.com/nickitat)). +* Support queries `CREATE TEMPORARY TABLE ... () AS ...`. [#39462](https://github.com/ClickHouse/ClickHouse/pull/39462) ([Kruglov Pavel](https://github.com/Avogar)). +* Add support of `!`/`*` (exclamation/asterisk) in custom TLDs (`cutToFirstSignificantSubdomainCustom()`/`cutToFirstSignificantSubdomainCustomWithWWW()`/`firstSignificantSubdomainCustom()`). [#39496](https://github.com/ClickHouse/ClickHouse/pull/39496) ([Azat Khuzhin](https://github.com/azat)). +* Rework and simplify the `system.backups` table, remove the `internal` column, allow user to set ID of operation, add columns `num_files`, `uncompressed_size`, `compressed_size`, `start_time`, `end_time`. [#39503](https://github.com/ClickHouse/ClickHouse/pull/39503) ([Vitaly Baranov](https://github.com/vitlibar)). +* Refactored a little code, removed duplicate code. [#39509](https://github.com/ClickHouse/ClickHouse/pull/39509) ([Simon Liu](https://github.com/monadbobo)). +* Add support for TLS connections to NATS. Implements [#39525](https://github.com/ClickHouse/ClickHouse/issues/39525). [#39527](https://github.com/ClickHouse/ClickHouse/pull/39527) ([Constantine Peresypkin](https://github.com/pkit)). +* `clickhouse-obfuscator` (a tool for database obfuscation for testing and load generation) now has the new `--save` and `--load` parameters to work with pre-trained models. This closes [#39534](https://github.com/ClickHouse/ClickHouse/issues/39534). [#39541](https://github.com/ClickHouse/ClickHouse/pull/39541) ([Alexey Milovidov](https://github.com/alexey-milovidov)). +* Fix incorrect behavior of log rotation during restart. [#39558](https://github.com/ClickHouse/ClickHouse/pull/39558) ([Nikolay Degterinsky](https://github.com/evillique)). +* Improve bytes to bits mask transform for SSE/AVX/AVX512. [#39586](https://github.com/ClickHouse/ClickHouse/pull/39586) ([Guo Wangyang](https://github.com/guowangy)). +* Add formats `PrettyMonoBlock`, `PrettyNoEscapesMonoBlock`, `PrettyCompactNoEscapes`, `PrettyCompactNoEscapesMonoBlock`, `PrettySpaceNoEscapes`, `PrettySpaceMonoBlock`, `PrettySpaceNoEscapesMonoBlock`. [#39646](https://github.com/ClickHouse/ClickHouse/pull/39646) ([Kruglov Pavel](https://github.com/Avogar)). +* Fix building aggregate projections when external aggregation is on. Mark as improvement because the case is rare and there exists easy workaround to fix it via changing settings. This fixes [#39667](https://github.com/ClickHouse/ClickHouse/issues/39667) . [#39671](https://github.com/ClickHouse/ClickHouse/pull/39671) ([Amos Bird](https://github.com/amosbird)). +* Allow to execute hash functions with arguments of type `Map`. [#39685](https://github.com/ClickHouse/ClickHouse/pull/39685) ([Anton Popov](https://github.com/CurtizJ)). +* Add a configuration parameter to hide addresses in stack traces. It may improve security a little but generally, it is harmful and should not be used. [#39690](https://github.com/ClickHouse/ClickHouse/pull/39690) ([Alexey Milovidov](https://github.com/alexey-milovidov)). +* change the prefix size of AggregateFunctionDistinct to make sure nested function data memory aligned. [#39696](https://github.com/ClickHouse/ClickHouse/pull/39696) ([Pxl](https://github.com/BiteTheDDDDt)). +* Properly escape credentials passed to the `clickhouse-diagnostic` tool. [#39707](https://github.com/ClickHouse/ClickHouse/pull/39707) ([Dale McDiarmid](https://github.com/gingerwizard)). +* keeper-improvement: create a snapshot on exit. It can be controlled with the config `keeper_server.create_snapshot_on_exit`, `true` by default. [#39755](https://github.com/ClickHouse/ClickHouse/pull/39755) ([Antonio Andelic](https://github.com/antonio2368)). +* Support primary key analysis for `row_policy_filter` and `additional_filter`. It also helps fix issues like [#37454](https://github.com/ClickHouse/ClickHouse/issues/37454) . [#39826](https://github.com/ClickHouse/ClickHouse/pull/39826) ([Amos Bird](https://github.com/amosbird)). +* Parameters are now transferred in `Query` packets right after the query text in the same serialisation format as the settings. [#39906](https://github.com/ClickHouse/ClickHouse/pull/39906) ([Nikita Taranov](https://github.com/nickitat)). +* Fix two usability issues in Play UI: - it was non-pixel-perfect on iPad due to parasitic border radius and margins; - the progress indication did not display after the first query. This closes [#39957](https://github.com/ClickHouse/ClickHouse/issues/39957). This closes [#39960](https://github.com/ClickHouse/ClickHouse/issues/39960). [#39961](https://github.com/ClickHouse/ClickHouse/pull/39961) ([Alexey Milovidov](https://github.com/alexey-milovidov)). +* Play UI: add row numbers; add cell selection on click; add hysteresis for table cells. [#39962](https://github.com/ClickHouse/ClickHouse/pull/39962) ([Alexey Milovidov](https://github.com/alexey-milovidov)). +* The client will show server-side elapsed time. This is important for the performance comparison of ClickHouse services in remote datacenters. This closes [#38070](https://github.com/ClickHouse/ClickHouse/issues/38070). See also [this](https://github.com/ClickHouse/ClickBench/blob/main/hardware/benchmark-cloud.sh#L37) for motivation. [#39968](https://github.com/ClickHouse/ClickHouse/pull/39968) ([Alexey Milovidov](https://github.com/alexey-milovidov)). +* Adds `parseDateTime64BestEffortUS`, `parseDateTime64BestEffortUSOrNull`, `parseDateTime64BestEffortUSOrZero` functions, closing [#37492](https://github.com/ClickHouse/ClickHouse/issues/37492). [#40015](https://github.com/ClickHouse/ClickHouse/pull/40015) ([Tanya Bragin](https://github.com/tbragin)). +* * Add observer mode to (zoo)keeper cluster discovery feature. In this mode node itself doesn't belong to cluster. [#40035](https://github.com/ClickHouse/ClickHouse/pull/40035) ([Vladimir C](https://github.com/vdimir)). +* Play UI: recognize tab key in textarea, but at the same time don't mess up with tab navigation. [#40053](https://github.com/ClickHouse/ClickHouse/pull/40053) ([Alexey Milovidov](https://github.com/alexey-milovidov)). +* Extend processors_profile_log with more information such as input rows. [#40121](https://github.com/ClickHouse/ClickHouse/pull/40121) ([Amos Bird](https://github.com/amosbird)). +* Update tzdata to 2022b to support the new timezone changes. See https://github.com/google/cctz/pull/226. Chile's 2022 DST start is delayed from September 4 to September 11. Iran plans to stop observing DST permanently, after it falls back on 2022-09-21. There are corrections of the historical time zone of Asia/Tehran in the year 1977: Iran adopted standard time in 1935, not 1946. In 1977 it observed DST from 03-21 23:00 to 10-20 24:00; its 1978 transitions were on 03-24 and 08-05, not 03-20 and 10-20; and its spring 1979 transition was on 05-27, not 03-21 (https://data.iana.org/time-zones/tzdb/NEWS). [#40184](https://github.com/ClickHouse/ClickHouse/pull/40184) ([Alexey Milovidov](https://github.com/alexey-milovidov)). +* Display server-side time in `clickhouse-benchmark` by default if it is available (since ClickHouse version 22.8). This is needed to correctly compare the performance of clouds. This behavior can be changed with the new `--client-side-time` command line option. Change the `--randomize` command line option from `--randomize 1` to the form without argument. [#40193](https://github.com/ClickHouse/ClickHouse/pull/40193) ([Alexey Milovidov](https://github.com/alexey-milovidov)). +* Add counters (ProfileEvents) for cases when query complexity limitation has been set and has reached (a separate counter for `overflow_mode` = `break` and `throw`). For example, if you have set up `max_rows_to_read` with `read_overflow_mode = 'break'`, looking at the value of `OverflowBreak` counter will allow distinguishing incomplete results. [#40205](https://github.com/ClickHouse/ClickHouse/pull/40205) ([Alexey Milovidov](https://github.com/alexey-milovidov)). +* Fix memory accounting in case of MEMORY_LIMIT_EXCEEDED errors (previously [peak] memory usage was takes failed allocations into account). [#40249](https://github.com/ClickHouse/ClickHouse/pull/40249) ([Azat Khuzhin](https://github.com/azat)). +* Add current metrics for fs cache: `FilesystemCacheSize` and `FilesystemCacheElements`. [#40260](https://github.com/ClickHouse/ClickHouse/pull/40260) ([Kseniia Sumarokova](https://github.com/kssenii)). +* Add support for LARGE_BINARY/LARGE_STRING with Arrow (Closes [#32401](https://github.com/ClickHouse/ClickHouse/issues/32401)). [#40293](https://github.com/ClickHouse/ClickHouse/pull/40293) ([Josh Taylor](https://github.com/joshuataylor)). + +#### Bug Fix +* Support hadoop secure rpc transfer(hadoop.rpc.protection=privacy and hadoop.rpc.protection=integrity). [#39411](https://github.com/ClickHouse/ClickHouse/pull/39411) ([michael1589](https://github.com/michael1589)). +* Fix seeking while reading from encrypted disk. This PR fixes [#38381](https://github.com/ClickHouse/ClickHouse/issues/38381). [#39687](https://github.com/ClickHouse/ClickHouse/pull/39687) ([Vitaly Baranov](https://github.com/vitlibar)). +* * Fix duplicate columns in join plan. Finally, solve [#26809](https://github.com/ClickHouse/ClickHouse/issues/26809). [#40009](https://github.com/ClickHouse/ClickHouse/pull/40009) ([Vladimir C](https://github.com/vdimir)). + +#### Build/Testing/Packaging Improvement +* Prebuild ClickHouse x86 binaries now require support for AVX instructions, i.e. a CPU not older than Intel Sandy Bridge / AMD Bulldozer, both released in 2011. [#39000](https://github.com/ClickHouse/ClickHouse/pull/39000) ([Robert Schulze](https://github.com/rschu1ze)). +* Former packages used to install systemd.service file to `/etc`. The files there are marked as `conf` and are not cleaned out, and not updated automatically. This PR cleans them out. [#39323](https://github.com/ClickHouse/ClickHouse/pull/39323) ([Mikhail f. Shiryaev](https://github.com/Felixoid)). +* Fix LSan by fixing getauxval(). [#39430](https://github.com/ClickHouse/ClickHouse/pull/39430) ([Azat Khuzhin](https://github.com/azat)). +* TSAN has issues with clang-14 (https://github.com/google/sanitizers/issues/1552, https://github.com/google/sanitizers/issues/1540), so here we temporary build the TSAN binaries with clang-13. [#39450](https://github.com/ClickHouse/ClickHouse/pull/39450) ([Mikhail f. Shiryaev](https://github.com/Felixoid)). +* Remove the option to build ClickHouse tools as separate executable programs. This fixes [#37847](https://github.com/ClickHouse/ClickHouse/issues/37847). [#39520](https://github.com/ClickHouse/ClickHouse/pull/39520) ([Alexey Milovidov](https://github.com/alexey-milovidov)). +* Fixed Unit tests for wide integers on s390x. [#39627](https://github.com/ClickHouse/ClickHouse/pull/39627) ([Harry Lee](https://github.com/HarryLeeIBM)). +* Increase max cache size for clang-tidy builds. Try to avoid flushing it out between builds. [#39652](https://github.com/ClickHouse/ClickHouse/pull/39652) ([Nikita Taranov](https://github.com/nickitat)). +* No need to use fixed IP when you are using cluster with SSL. Using the same fixed IP could trigger collision between tests. At this change the server's certificate is generated for a designated host name (see server-ext.cnf at each test). The client should check server's certificate against that name accordingly. [#40007](https://github.com/ClickHouse/ClickHouse/pull/40007) ([Sema Checherinda](https://github.com/CheSema)). +* Support build with `clang-16` (trunk). This closes [#39949](https://github.com/ClickHouse/ClickHouse/issues/39949). [#40181](https://github.com/ClickHouse/ClickHouse/pull/40181) ([Alexey Milovidov](https://github.com/alexey-milovidov)). +* Prepare RISC-V 64 build to run in CI. This is for [#40141](https://github.com/ClickHouse/ClickHouse/issues/40141). [#40197](https://github.com/ClickHouse/ClickHouse/pull/40197) ([Alexey Milovidov](https://github.com/alexey-milovidov)). + +#### Bug Fix (user-visible misbehavior in official stable or prestable release) + +* Fixed query hanging for SELECT with ORDER BY WITH FILL with different date/time types. [#37849](https://github.com/ClickHouse/ClickHouse/pull/37849) ([Yakov Olkhovskiy](https://github.com/yakov-olkhovskiy)). +* Fix ORDER BY that matches projections ORDER BY (before it simply returns unsorted result). [#38725](https://github.com/ClickHouse/ClickHouse/pull/38725) ([Azat Khuzhin](https://github.com/azat)). +* Do not optimise functions in GROUP BY statements if they shadow one of the table columns or expressions. Fixes [#37032](https://github.com/ClickHouse/ClickHouse/issues/37032). [#39103](https://github.com/ClickHouse/ClickHouse/pull/39103) ([Anton Kozlov](https://github.com/tonickkozlov)). +* Fix wrong table name in logs after RENAME TABLE. This fixes [#38018](https://github.com/ClickHouse/ClickHouse/issues/38018). [#39227](https://github.com/ClickHouse/ClickHouse/pull/39227) ([Amos Bird](https://github.com/amosbird)). +* Fix positional arguments in case of columns pruning when optimising the query. Closes [#38433](https://github.com/ClickHouse/ClickHouse/issues/38433). [#39293](https://github.com/ClickHouse/ClickHouse/pull/39293) ([Kseniia Sumarokova](https://github.com/kssenii)). +* Fix bug in schema inference in case of empty messages in Protobuf/CapnProto formats that allowed to create column with empty `Tuple` type. Closes [#39051](https://github.com/ClickHouse/ClickHouse/issues/39051) Add 2 new settings `input_format_{protobuf/capnproto}_skip_fields_with_unsupported_types_in_schema_inference` that allow to skip fields with unsupported types while schema inference for Protobuf and CapnProto formats. [#39357](https://github.com/ClickHouse/ClickHouse/pull/39357) ([Kruglov Pavel](https://github.com/Avogar)). +* Fix segmentation fault on `CREATE WINDOW VIEW .. ON CLUSTER ... INNER`. Closes [#39363](https://github.com/ClickHouse/ClickHouse/issues/39363). [#39384](https://github.com/ClickHouse/ClickHouse/pull/39384) ([Kseniia Sumarokova](https://github.com/kssenii)). +* Fix WriteBuffer finalize when cancel insert into function. Proper version of https://github.com/ClickHouse/ClickHouse/pull/39396 that was reverted. [#39458](https://github.com/ClickHouse/ClickHouse/pull/39458) ([Kruglov Pavel](https://github.com/Avogar)). +* Fix storing of columns of type `Object` in sparse serialization. [#39464](https://github.com/ClickHouse/ClickHouse/pull/39464) ([Anton Popov](https://github.com/CurtizJ)). +* Fix possible "Not found column in block" exception when using projections. This closes [#39469](https://github.com/ClickHouse/ClickHouse/issues/39469). [#39470](https://github.com/ClickHouse/ClickHouse/pull/39470) ([小路](https://github.com/nicelulu)). +* Fix LOGICAL_ERROR on race between DROP and INSERT with materialized views. [#39477](https://github.com/ClickHouse/ClickHouse/pull/39477) ([Azat Khuzhin](https://github.com/azat)). +* Fix data race and possible heap-buffer-overflow in Avro format. Closes [#39094](https://github.com/ClickHouse/ClickHouse/issues/39094) Closes [#33652](https://github.com/ClickHouse/ClickHouse/issues/33652). [#39498](https://github.com/ClickHouse/ClickHouse/pull/39498) ([Kruglov Pavel](https://github.com/Avogar)). +* Fix rare bug in asynchronous reading (with setting `local_filesystem_read_method='pread_threadpool'`) with enabled `O_DIRECT` (enabled by setting `min_bytes_to_use_direct_io`). [#39506](https://github.com/ClickHouse/ClickHouse/pull/39506) ([Anton Popov](https://github.com/CurtizJ)). +* Fixes "Code: 49. DB::Exception: FunctionFactory: the function name '' is not unique. (LOGICAL_ERROR)" observed on FreeBSD when starting clickhouse. [#39551](https://github.com/ClickHouse/ClickHouse/pull/39551) ([Alexander Gololobov](https://github.com/davenger)). +* Fix bug with maxsplit argument for splitByChar, which was not working correctly. [#39552](https://github.com/ClickHouse/ClickHouse/pull/39552) ([filimonov](https://github.com/filimonov)). +* * Fix bug in ASOF JOIN with `enable_optimize_predicate_expression`, close [#37813](https://github.com/ClickHouse/ClickHouse/issues/37813). [#39556](https://github.com/ClickHouse/ClickHouse/pull/39556) ([Vladimir C](https://github.com/vdimir)). +* Fixed `CREATE/DROP INDEX` query with `ON CLUSTER` or `Replicated` database and `ReplicatedMergeTree`. It used to be executed on all replicas (causing error or DDL queue stuck). Fixes [#39511](https://github.com/ClickHouse/ClickHouse/issues/39511). [#39565](https://github.com/ClickHouse/ClickHouse/pull/39565) ([Alexander Tokmakov](https://github.com/tavplubix)). +* Fix "column not found" error for push down with join, close [#39505](https://github.com/ClickHouse/ClickHouse/issues/39505). [#39575](https://github.com/ClickHouse/ClickHouse/pull/39575) ([Vladimir C](https://github.com/vdimir)). +* Fix the wrong `REGEXP_REPLACE` alias. This fixes https://github.com/ClickHouse/ClickBench/issues/9. [#39592](https://github.com/ClickHouse/ClickHouse/pull/39592) ([Alexey Milovidov](https://github.com/alexey-milovidov)). +* Fixed point of origin for exponential decay window functions to the last value in window. Previously, decay was calculated by formula `exp((t - curr_row_t) / decay_length)`, which is incorrect when right boundary of window is not `CURRENT ROW`. It was changed to: `exp((t - last_row_t) / decay_length)`. There is no change in results for windows with `ROWS BETWEEN (smth) AND CURRENT ROW`. [#39593](https://github.com/ClickHouse/ClickHouse/pull/39593) ([Vladimir Chebotaryov](https://github.com/quickhouse)). +* Fix Decimal division overflow, which can be detected based on operands scale. [#39600](https://github.com/ClickHouse/ClickHouse/pull/39600) ([Andrey Zvonov](https://github.com/zvonand)). +* Fix settings `output_format_arrow_string_as_string` and `output_format_arrow_low_cardinality_as_dictionary` work in combination. Closes [#39624](https://github.com/ClickHouse/ClickHouse/issues/39624). [#39647](https://github.com/ClickHouse/ClickHouse/pull/39647) ([Kruglov Pavel](https://github.com/Avogar)). +* Fixed a bug in default database resolution in distributed table reads. [#39674](https://github.com/ClickHouse/ClickHouse/pull/39674) ([Anton Kozlov](https://github.com/tonickkozlov)). +* Select might read data of dropped table if cache for mmap IO is used and database engine is Ordinary and new tables was created with the same name as dropped one had. It's fixed. [#39708](https://github.com/ClickHouse/ClickHouse/pull/39708) ([Alexander Tokmakov](https://github.com/tavplubix)). +* Fix possible error `Invalid column type for ColumnUnique::insertRangeFrom. Expected String, got ColumnLowCardinality` Fixes [#38460](https://github.com/ClickHouse/ClickHouse/issues/38460). [#39716](https://github.com/ClickHouse/ClickHouse/pull/39716) ([Arthur Passos](https://github.com/arthurpassos)). +* Field names in the `meta` section of JSON format were erroneously double escaped. This closes [#39693](https://github.com/ClickHouse/ClickHouse/issues/39693). [#39747](https://github.com/ClickHouse/ClickHouse/pull/39747) ([Alexey Milovidov](https://github.com/alexey-milovidov)). +* Fix wrong index analysis with tuples and operator `IN`, which could lead to wrong query result. [#39752](https://github.com/ClickHouse/ClickHouse/pull/39752) ([Anton Popov](https://github.com/CurtizJ)). +* Fix EmbeddedRocksDB filtering by key using params. [#39757](https://github.com/ClickHouse/ClickHouse/pull/39757) ([Antonio Andelic](https://github.com/antonio2368)). +* Fix error `Invalid number of columns in chunk pushed to OutputPort` which was cause by ARRAY JOIN optimization. Fixes [#39164](https://github.com/ClickHouse/ClickHouse/issues/39164). [#39799](https://github.com/ClickHouse/ClickHouse/pull/39799) ([Nikolai Kochetov](https://github.com/KochetovNicolai)). +* Fix `CANNOT_READ_ALL_DATA` exception with `local_filesystem_read_method=pread_threadpool`. This bug affected only Linux kernel version 5.9 and 5.10 according to [man](https://manpages.debian.org/testing/manpages-dev/preadv2.2.en.html#BUGS). [#39800](https://github.com/ClickHouse/ClickHouse/pull/39800) ([Anton Popov](https://github.com/CurtizJ)). +* Fix quota_key application on connect. [#39874](https://github.com/ClickHouse/ClickHouse/pull/39874) ([Yakov Olkhovskiy](https://github.com/yakov-olkhovskiy)). +* we meeted query exceptions: DB::Exception: Cannot open file /media/ssd1/fordata/clickhouse/data/data/perf/perf_log_local_v3_1/20220618_17233_17238_1/namespace.dict.bin, errno: 24, strerror: Too many open files. [#39886](https://github.com/ClickHouse/ClickHouse/pull/39886) ([Fangyuan Deng](https://github.com/pzhdfy)). +* fix broken NFS mkdir for root-squashed volumes. [#39898](https://github.com/ClickHouse/ClickHouse/pull/39898) ([Constantine Peresypkin](https://github.com/pkit)). +* Remove dictionaries from prometheus metrics on DETACH/DROP. [#39926](https://github.com/ClickHouse/ClickHouse/pull/39926) ([Azat Khuzhin](https://github.com/azat)). +* Fix read of StorageFile with virtual columns. Closes [#39907](https://github.com/ClickHouse/ClickHouse/issues/39907). [#39943](https://github.com/ClickHouse/ClickHouse/pull/39943) ([flynn](https://github.com/ucasfl)). +* Fix big memory usage during fetches. Fixes [#39915](https://github.com/ClickHouse/ClickHouse/issues/39915). [#39990](https://github.com/ClickHouse/ClickHouse/pull/39990) ([Nikolai Kochetov](https://github.com/KochetovNicolai)). +* - Fix hashId crash and salt parameter not being used. [#40002](https://github.com/ClickHouse/ClickHouse/pull/40002) ([Raúl Marín](https://github.com/Algunenano)). +* fix HashMethodOneNumber get wrong key value when column is const. [#40020](https://github.com/ClickHouse/ClickHouse/pull/40020) ([Duc Canh Le](https://github.com/canhld94)). +* Fixed "Part directory doesn't exist" and "`tmp_` ... No such file or directory" errors during too slow INSERT or too long merge/mutation. Also fixed issue that may cause some replication queue entries to stuck without any errors or warnings in logs if previous attempt to fetch part failed, but `tmp-fetch_` directory was not cleaned up. [#40031](https://github.com/ClickHouse/ClickHouse/pull/40031) ([Alexander Tokmakov](https://github.com/tavplubix)). +* Fix rare cases of parsing of arrays of tuples in format `Values`. [#40034](https://github.com/ClickHouse/ClickHouse/pull/40034) ([Anton Popov](https://github.com/CurtizJ)). +* Fixes ArrowColumn format Dictionary(X) & Dictionary(Nullable(X)) conversion to ClickHouse LowCardinality(X) & LowCardinality(Nullable(X)) respectively. [#40037](https://github.com/ClickHouse/ClickHouse/pull/40037) ([Arthur Passos](https://github.com/arthurpassos)). +* Fix potential deadlock in WriteBufferFromS3 during task scheduling failure. [#40070](https://github.com/ClickHouse/ClickHouse/pull/40070) ([Maksim Kita](https://github.com/kitaisreal)). +* Fix bug in collectFilesToSkip() by adding correct file extension(.idx or idx2) for indexes to be recalculated, avoid wrong hard links. Fixed [#39896](https://github.com/ClickHouse/ClickHouse/issues/39896). [#40095](https://github.com/ClickHouse/ClickHouse/pull/40095) ([Jianmei Zhang](https://github.com/zhangjmruc)). +* A segmentation fault that has CaresPTRResolver::resolve in the stack trace has been reported:. [#40134](https://github.com/ClickHouse/ClickHouse/pull/40134) ([Arthur Passos](https://github.com/arthurpassos)). +* Fix a very rare case of incorrect behavior of array subscript operator. This closes [#28720](https://github.com/ClickHouse/ClickHouse/issues/28720). [#40185](https://github.com/ClickHouse/ClickHouse/pull/40185) ([Alexey Milovidov](https://github.com/alexey-milovidov)). +* Fix insufficient argument check for encryption functions (found by query fuzzer). This closes [#39987](https://github.com/ClickHouse/ClickHouse/issues/39987). [#40194](https://github.com/ClickHouse/ClickHouse/pull/40194) ([Alexey Milovidov](https://github.com/alexey-milovidov)). +* - Fix unexpected result arrayDifference of Array(UInt32). [#40211](https://github.com/ClickHouse/ClickHouse/pull/40211) ([Duc Canh Le](https://github.com/canhld94)). +* Fix the case when the order of columns can be incorrect if the `IN` operator is used with a table with `ENGINE = Set` containing multiple columns. This fixes [#13014](https://github.com/ClickHouse/ClickHouse/issues/13014). [#40225](https://github.com/ClickHouse/ClickHouse/pull/40225) ([Alexey Milovidov](https://github.com/alexey-milovidov)). +* Fix possible segfault in CapnProto input format. This bug was found and send through ClickHouse bug-bounty [program](https://github.com/ClickHouse/ClickHouse/issues/38986) by *kiojj*. [#40241](https://github.com/ClickHouse/ClickHouse/pull/40241) ([Kruglov Pavel](https://github.com/Avogar)). +* - Avoid continuously growing memory consumption of pattern cache when using functions multi(Fuzzy)Match(Any|AllIndices|AnyIndex)(). [#40264](https://github.com/ClickHouse/ClickHouse/pull/40264) ([Robert Schulze](https://github.com/rschu1ze)). + +#### Build + +* Fix build error: ``` [ 69%] Building CXX object src/CMakeFiles/clickhouse_common_io.dir/Common/waitForPid.cpp.o /CLionProjects/clickhouse-yandex/src/Common/waitForPid.cpp:112:5: error: identifier '__kevp__' is reserved because it starts with '__' [-Werror,-Wreserved-identifier] EV_SET(&change, pid, EVFILT_PROC, EV_ADD, NOTE_EXIT, 0, NULL); ^ /Applications/Xcode.app/Contents/Developer/Platforms/MacOSX.platform/Developer/SDKs/MacOSX.sdk/usr/include/sys/event.h:108:17: note: expanded from macro 'EV_SET' struct kevent *__kevp__ = (kevp); \ ^ ```. [#39493](https://github.com/ClickHouse/ClickHouse/pull/39493) ([小路](https://github.com/nicelulu)). + +#### Build Improvement + +* Fixed Endian issue in BitHelpers for s390x. [#39656](https://github.com/ClickHouse/ClickHouse/pull/39656) ([Harry Lee](https://github.com/HarryLeeIBM)). +* Implement a piece of code related to SipHash for s390x architecture (which is not supported by ClickHouse). [#39732](https://github.com/ClickHouse/ClickHouse/pull/39732) ([Harry Lee](https://github.com/HarryLeeIBM)). +* Fixed an Endian issue in Coordination snapshot code for s390x architecture (which is not supported by ClickHouse). [#39931](https://github.com/ClickHouse/ClickHouse/pull/39931) ([Harry Lee](https://github.com/HarryLeeIBM)). +* Fixed Endian issues in Codec code for s390x architecture (which is not supported by ClickHouse). [#40008](https://github.com/ClickHouse/ClickHouse/pull/40008) ([Harry Lee](https://github.com/HarryLeeIBM)). +* Fixed Endian issues in reading/writing BigEndian binary data in ReadHelpers and WriteHelpers code for s390x architecture (which is not supported by ClickHouse). [#40179](https://github.com/ClickHouse/ClickHouse/pull/40179) ([Harry Lee](https://github.com/HarryLeeIBM)). + +#### NO CL ENTRY + +* NO CL ENTRY: 'Revert "tests: enable back 02232_dist_insert_send_logs_level_hung"'. [#39788](https://github.com/ClickHouse/ClickHouse/pull/39788) ([Alexander Tokmakov](https://github.com/tavplubix)). +* NO CL ENTRY: 'Revert "Update arrow to fix possible data race"'. [#39804](https://github.com/ClickHouse/ClickHouse/pull/39804) ([Alexander Tokmakov](https://github.com/tavplubix)). +* NO CL ENTRY: 'Revert "Revert "Update arrow to fix possible data race""'. [#39811](https://github.com/ClickHouse/ClickHouse/pull/39811) ([Kruglov Pavel](https://github.com/Avogar)). +* NO CL ENTRY: 'Revert "Limit number of analyze for one query"'. [#39816](https://github.com/ClickHouse/ClickHouse/pull/39816) ([Alexander Tokmakov](https://github.com/tavplubix)). +* NO CL ENTRY: 'Revert "Revert "tests: enable back 02232_dist_insert_send_logs_level_hung""'. [#39817](https://github.com/ClickHouse/ClickHouse/pull/39817) ([Alexander Tokmakov](https://github.com/tavplubix)). +* NO CL ENTRY: 'Prepare library-bridge for catboost integration'. [#39904](https://github.com/ClickHouse/ClickHouse/pull/39904) ([Robert Schulze](https://github.com/rschu1ze)). +* NO CL ENTRY: 'Revert "ColumnVector: optimize filter with AVX512VBMI2 compress store"'. [#39963](https://github.com/ClickHouse/ClickHouse/pull/39963) ([Alexey Milovidov](https://github.com/alexey-milovidov)). +* NO CL ENTRY: 'Revert "copy self-extracting to output"'. [#40005](https://github.com/ClickHouse/ClickHouse/pull/40005) ([Alexander Tokmakov](https://github.com/tavplubix)). +* NO CL ENTRY: 'Revert "Use separate counter for RSS in global memory tracker."'. [#40199](https://github.com/ClickHouse/ClickHouse/pull/40199) ([Alexey Milovidov](https://github.com/alexey-milovidov)). +* NO CL ENTRY: 'Revert "tests/performance: cover sparse_hashed dictionary"'. [#40268](https://github.com/ClickHouse/ClickHouse/pull/40268) ([Alexander Tokmakov](https://github.com/tavplubix)). + +#### NOT FOR CHANGELOG / INSIGNIFICANT + +* Test/insert deduplication token materialized views [#34662](https://github.com/ClickHouse/ClickHouse/pull/34662) ([Denny Crane](https://github.com/den-crane)). +* Merging [#34372](https://github.com/ClickHouse/ClickHouse/issues/34372) [#35968](https://github.com/ClickHouse/ClickHouse/pull/35968) ([Nikita Mikhaylov](https://github.com/nikitamikhaylov)). +* Decoupling local cache function and cache algorithm [#38048](https://github.com/ClickHouse/ClickHouse/pull/38048) ([Han Shukai](https://github.com/KinderRiven)). +* Use separate counter for RSS in global memory tracker. [#38682](https://github.com/ClickHouse/ClickHouse/pull/38682) ([Nikolai Kochetov](https://github.com/KochetovNicolai)). +* Build self-extracting-executable utils [#38936](https://github.com/ClickHouse/ClickHouse/pull/38936) ([Yakov Olkhovskiy](https://github.com/yakov-olkhovskiy)). +* Improvements in integration tests [#38978](https://github.com/ClickHouse/ClickHouse/pull/38978) ([Ilya Yatsishin](https://github.com/qoega)). +* More readable regexp in `test_quota` [#39084](https://github.com/ClickHouse/ClickHouse/pull/39084) ([Vladimir Chebotaryov](https://github.com/quickhouse)). +* Fixed regexp in `test_match_process_uid_against_data_owner` [#39085](https://github.com/ClickHouse/ClickHouse/pull/39085) ([Vladimir Chebotaryov](https://github.com/quickhouse)). +* tests: enable back 02232_dist_insert_send_logs_level_hung [#39124](https://github.com/ClickHouse/ClickHouse/pull/39124) ([Azat Khuzhin](https://github.com/azat)). +* Add connection info for Distributed sends log message [#39178](https://github.com/ClickHouse/ClickHouse/pull/39178) ([Azat Khuzhin](https://github.com/azat)). +* Forbid defining non-default disk with default path from [#39183](https://github.com/ClickHouse/ClickHouse/pull/39183) ([Azat Khuzhin](https://github.com/azat)). +* Fix LZ4 decompression issue for s390x [#39195](https://github.com/ClickHouse/ClickHouse/pull/39195) ([Harry Lee](https://github.com/HarryLeeIBM)). +* Do not report "Failed communicating with" on and on for parts exchange [#39222](https://github.com/ClickHouse/ClickHouse/pull/39222) ([Azat Khuzhin](https://github.com/azat)). +* Improve logging around replicated merges [#39230](https://github.com/ClickHouse/ClickHouse/pull/39230) ([Raúl Marín](https://github.com/Algunenano)). +* Cleanup logic around join_algorithm setting, add docs [#39271](https://github.com/ClickHouse/ClickHouse/pull/39271) ([Vladimir C](https://github.com/vdimir)). +* Possible fix for flaky `test_keeper_force_recovery` [#39321](https://github.com/ClickHouse/ClickHouse/pull/39321) ([Antonio Andelic](https://github.com/antonio2368)). +* tests/performance: improve parallel_mv test [#39325](https://github.com/ClickHouse/ClickHouse/pull/39325) ([Azat Khuzhin](https://github.com/azat)). +* Update azure library (removed "harmful" function) [#39327](https://github.com/ClickHouse/ClickHouse/pull/39327) ([Kseniia Sumarokova](https://github.com/kssenii)). +* Refactor PreparedSets/SubqueryForSet [#39343](https://github.com/ClickHouse/ClickHouse/pull/39343) ([Vladimir C](https://github.com/vdimir)). +* Small doc updates [#39362](https://github.com/ClickHouse/ClickHouse/pull/39362) ([Robert Schulze](https://github.com/rschu1ze)). +* Even less usage of StringRef [#39364](https://github.com/ClickHouse/ClickHouse/pull/39364) ([Robert Schulze](https://github.com/rschu1ze)). +* Automatic fixes for black formatting for domestic repo PRs [#39390](https://github.com/ClickHouse/ClickHouse/pull/39390) ([Mikhail f. Shiryaev](https://github.com/Felixoid)). +* Clickhouse-local fixes [#39404](https://github.com/ClickHouse/ClickHouse/pull/39404) ([Heena Bansal](https://github.com/HeenaBansal2009)). +* Uppercase `ROWS`, `GROUPS`, `RANGE` in queries with windows [#39410](https://github.com/ClickHouse/ClickHouse/pull/39410) ([Vladimir Chebotaryov](https://github.com/quickhouse)). +* GitHub helper [#39421](https://github.com/ClickHouse/ClickHouse/pull/39421) ([Mikhail f. Shiryaev](https://github.com/Felixoid)). +* ShellCommand wait pid refactoring [#39426](https://github.com/ClickHouse/ClickHouse/pull/39426) ([Maksim Kita](https://github.com/kitaisreal)). +* Require clear style check to continue building [#39428](https://github.com/ClickHouse/ClickHouse/pull/39428) ([Mikhail f. Shiryaev](https://github.com/Felixoid)). +* DirectDictionary improve performance of dictHas with duplicate keys [#39449](https://github.com/ClickHouse/ClickHouse/pull/39449) ([Maksim Kita](https://github.com/kitaisreal)). +* Commit status names: remove "actions" [#39454](https://github.com/ClickHouse/ClickHouse/pull/39454) ([Mikhail f. Shiryaev](https://github.com/Felixoid)). +* Improve synchronization between hosts in distributed backup and fix locks [#39455](https://github.com/ClickHouse/ClickHouse/pull/39455) ([Vitaly Baranov](https://github.com/vitlibar)). +* Remove some dead and commented code [#39460](https://github.com/ClickHouse/ClickHouse/pull/39460) ([Alexander Tokmakov](https://github.com/tavplubix)). +* Add Build Check and Special Build Check to SimpleCheck [#39467](https://github.com/ClickHouse/ClickHouse/pull/39467) ([Yakov Olkhovskiy](https://github.com/yakov-olkhovskiy)). +* Update version after release [#39474](https://github.com/ClickHouse/ClickHouse/pull/39474) ([Mikhail f. Shiryaev](https://github.com/Felixoid)). +* Update version_date.tsv and changelogs after v22.7.1.2484-stable [#39475](https://github.com/ClickHouse/ClickHouse/pull/39475) ([github-actions[bot]](https://github.com/apps/github-actions)). +* Update README.md [#39478](https://github.com/ClickHouse/ClickHouse/pull/39478) ([Dan Roscigno](https://github.com/DanRoscigno)). +* Remove unused constructor [#39491](https://github.com/ClickHouse/ClickHouse/pull/39491) ([alesapin](https://github.com/alesapin)). +* Mark new codec DEFLATE_QPL as experimental + cosmetics [#39495](https://github.com/ClickHouse/ClickHouse/pull/39495) ([Robert Schulze](https://github.com/rschu1ze)). +* Update arrow to fix possible data race [#39510](https://github.com/ClickHouse/ClickHouse/pull/39510) ([Kruglov Pavel](https://github.com/Avogar)). +* fix `-DENABLE_EXAMPLES=1` in master [#39517](https://github.com/ClickHouse/ClickHouse/pull/39517) ([Constantine Peresypkin](https://github.com/pkit)). +* LZ4_decompress_faster.cpp: remove endianness-dependent code [#39523](https://github.com/ClickHouse/ClickHouse/pull/39523) ([Ignat Loskutov](https://github.com/loskutov)). +* Fix 02286_parallel_final [#39524](https://github.com/ClickHouse/ClickHouse/pull/39524) ([Nikita Taranov](https://github.com/nickitat)). +* add Equinix metal N3 Xlarge [#39532](https://github.com/ClickHouse/ClickHouse/pull/39532) ([Tyler Hannan](https://github.com/tylerhannan)). +* Less usage of StringRef [#39535](https://github.com/ClickHouse/ClickHouse/pull/39535) ([Robert Schulze](https://github.com/rschu1ze)). +* Follow up to [#37827](https://github.com/ClickHouse/ClickHouse/issues/37827) [#39557](https://github.com/ClickHouse/ClickHouse/pull/39557) ([Alexander Tokmakov](https://github.com/tavplubix)). +* Temporarily disable all tests with MaterializedPostgreSQL [#39564](https://github.com/ClickHouse/ClickHouse/pull/39564) ([Alexander Tokmakov](https://github.com/tavplubix)). +* Update version_date.tsv after v22.3.9.19-lts [#39576](https://github.com/ClickHouse/ClickHouse/pull/39576) ([github-actions[bot]](https://github.com/apps/github-actions)). +* free compression and decompression contexts [#39578](https://github.com/ClickHouse/ClickHouse/pull/39578) ([Yakov Olkhovskiy](https://github.com/yakov-olkhovskiy)). +* Update version_date.tsv and changelogs after v22.6.4.35-stable [#39579](https://github.com/ClickHouse/ClickHouse/pull/39579) ([github-actions[bot]](https://github.com/apps/github-actions)). +* Merge Woboq code browser page into "Getting Started" document [#39596](https://github.com/ClickHouse/ClickHouse/pull/39596) ([Robert Schulze](https://github.com/rschu1ze)). +* Fix Chain::addSink [#39601](https://github.com/ClickHouse/ClickHouse/pull/39601) ([Nikolai Kochetov](https://github.com/KochetovNicolai)). +* Update NuRaft to latest master [#39609](https://github.com/ClickHouse/ClickHouse/pull/39609) ([Antonio Andelic](https://github.com/antonio2368)). +* copy self-extracting to output [#39617](https://github.com/ClickHouse/ClickHouse/pull/39617) ([Yakov Olkhovskiy](https://github.com/yakov-olkhovskiy)). +* Replace MemoryTrackerBlockerInThread to LockMemoryExceptionInThread [#39619](https://github.com/ClickHouse/ClickHouse/pull/39619) ([Nikolai Kochetov](https://github.com/KochetovNicolai)). +* Combining sumIf->countIf and multiIf->if opt. [#39621](https://github.com/ClickHouse/ClickHouse/pull/39621) ([Amos Bird](https://github.com/amosbird)). +* Update README.md [#39622](https://github.com/ClickHouse/ClickHouse/pull/39622) ([Ivan Blinkov](https://github.com/blinkov)). +* Disable 02327_capnproto_protobuf_empty_messages with Ordinary [#39623](https://github.com/ClickHouse/ClickHouse/pull/39623) ([Alexander Tokmakov](https://github.com/tavplubix)). +* add Dell PowerEdge R740XD results [#39625](https://github.com/ClickHouse/ClickHouse/pull/39625) ([Tyler Hannan](https://github.com/tylerhannan)). +* Attempt to fix wrong workflow_run data for rerun [#39630](https://github.com/ClickHouse/ClickHouse/pull/39630) ([Mikhail f. Shiryaev](https://github.com/Felixoid)). +* Run tests with Replicated database in master [#39653](https://github.com/ClickHouse/ClickHouse/pull/39653) ([Alexander Tokmakov](https://github.com/tavplubix)). +* Rollback request in Keeper if storing log fails [#39673](https://github.com/ClickHouse/ClickHouse/pull/39673) ([Antonio Andelic](https://github.com/antonio2368)). +* Fix utils build on CI [#39679](https://github.com/ClickHouse/ClickHouse/pull/39679) ([Azat Khuzhin](https://github.com/azat)). +* Add duration_ms into system.zookeeper_log [#39686](https://github.com/ClickHouse/ClickHouse/pull/39686) ([Azat Khuzhin](https://github.com/azat)). +* Fix DISTINCT: handle all const columns case correctly [#39688](https://github.com/ClickHouse/ClickHouse/pull/39688) ([Igor Nikonov](https://github.com/devcrafter)). +* Update README.md [#39692](https://github.com/ClickHouse/ClickHouse/pull/39692) ([Yuko Takagi](https://github.com/yukotakagi)). +* Update Keeper version for digest [#39698](https://github.com/ClickHouse/ClickHouse/pull/39698) ([Antonio Andelic](https://github.com/antonio2368)). +* Change mysql-odbc url [#39702](https://github.com/ClickHouse/ClickHouse/pull/39702) ([Mikhail f. Shiryaev](https://github.com/Felixoid)). +* Avoid recursive destruction of AST. [#39705](https://github.com/ClickHouse/ClickHouse/pull/39705) ([Nikolai Kochetov](https://github.com/KochetovNicolai)). +* Update ccache to the latest available version [#39709](https://github.com/ClickHouse/ClickHouse/pull/39709) ([Mikhail f. Shiryaev](https://github.com/Felixoid)). +* Join enums refactoring [#39718](https://github.com/ClickHouse/ClickHouse/pull/39718) ([Maksim Kita](https://github.com/kitaisreal)). +* Fix flaky test `02360_send_logs_level_colors` [#39720](https://github.com/ClickHouse/ClickHouse/pull/39720) ([Anton Popov](https://github.com/CurtizJ)). +* Fix cherry-pick for cases, when assignee is not set for PR [#39723](https://github.com/ClickHouse/ClickHouse/pull/39723) ([Mikhail f. Shiryaev](https://github.com/Felixoid)). +* Jepsen label [#39730](https://github.com/ClickHouse/ClickHouse/pull/39730) ([Mikhail f. Shiryaev](https://github.com/Felixoid)). +* Fix redirecting of logs to stdout in clickhouse-client [#39731](https://github.com/ClickHouse/ClickHouse/pull/39731) ([Anton Popov](https://github.com/CurtizJ)). +* CI: refactor Simple Check, use statuses to make it stateful [#39735](https://github.com/ClickHouse/ClickHouse/pull/39735) ([Yakov Olkhovskiy](https://github.com/yakov-olkhovskiy)). +* Use different root path for total-queue Jepsen test [#39738](https://github.com/ClickHouse/ClickHouse/pull/39738) ([Antonio Andelic](https://github.com/antonio2368)). +* Simple refactoring: ordinary DISTINCT implementation [#39740](https://github.com/ClickHouse/ClickHouse/pull/39740) ([Igor Nikonov](https://github.com/devcrafter)). +* Cleanup usages of `allow_experimental_projection_optimization` setting, part 1 [#39746](https://github.com/ClickHouse/ClickHouse/pull/39746) ([Alexey Milovidov](https://github.com/alexey-milovidov)). +* Enable SQL function getOSKernelVersion() on all platforms [#39751](https://github.com/ClickHouse/ClickHouse/pull/39751) ([Robert Schulze](https://github.com/rschu1ze)). +* Try clang-15 for build with tsan [#39758](https://github.com/ClickHouse/ClickHouse/pull/39758) ([Alexander Tokmakov](https://github.com/tavplubix)). +* Rename "splitted build" to "shared libraries build" in CI tools [#39759](https://github.com/ClickHouse/ClickHouse/pull/39759) ([Robert Schulze](https://github.com/rschu1ze)). +* Use std::popcount, ::countl_zero, ::countr_zero functions [#39760](https://github.com/ClickHouse/ClickHouse/pull/39760) ([Robert Schulze](https://github.com/rschu1ze)). +* Self-extracting - run resulting executable with execvp [#39763](https://github.com/ClickHouse/ClickHouse/pull/39763) ([Yakov Olkhovskiy](https://github.com/yakov-olkhovskiy)). +* Fix non-deterministic queries in distinct_in_order test [#39772](https://github.com/ClickHouse/ClickHouse/pull/39772) ([Igor Nikonov](https://github.com/devcrafter)). +* Fix some flaky integration tests [#39775](https://github.com/ClickHouse/ClickHouse/pull/39775) ([Alexander Tokmakov](https://github.com/tavplubix)). +* Retry inserts with ClickHouseHelper [#39780](https://github.com/ClickHouse/ClickHouse/pull/39780) ([Mikhail f. Shiryaev](https://github.com/Felixoid)). +* Add cloudflare DNS as a fallback [#39795](https://github.com/ClickHouse/ClickHouse/pull/39795) ([Mikhail f. Shiryaev](https://github.com/Felixoid)). +* Update README.md [#39796](https://github.com/ClickHouse/ClickHouse/pull/39796) ([Yuko Takagi](https://github.com/yukotakagi)). +* Minor fix for Stress Tests [#39798](https://github.com/ClickHouse/ClickHouse/pull/39798) ([Alexander Tokmakov](https://github.com/tavplubix)). +* Typos [#39813](https://github.com/ClickHouse/ClickHouse/pull/39813) ([Robert Schulze](https://github.com/rschu1ze)). +* Update settings changes history [#39839](https://github.com/ClickHouse/ClickHouse/pull/39839) ([Kruglov Pavel](https://github.com/Avogar)). +* Fix post-build script for building utils/self-extracting-executable/compressor [#39843](https://github.com/ClickHouse/ClickHouse/pull/39843) ([Yakov Olkhovskiy](https://github.com/yakov-olkhovskiy)). +* Add hasJoin method into ASTSelectQuery [#39850](https://github.com/ClickHouse/ClickHouse/pull/39850) ([Maksim Kita](https://github.com/kitaisreal)). +* Update tweak on version part update [#39853](https://github.com/ClickHouse/ClickHouse/pull/39853) ([Mikhail f. Shiryaev](https://github.com/Felixoid)). +* Update version_date.tsv and changelogs after v22.7.2.15-stable [#39854](https://github.com/ClickHouse/ClickHouse/pull/39854) ([github-actions[bot]](https://github.com/apps/github-actions)). +* Fix typo and extra dots in exception messages from OverCommitTracker [#39858](https://github.com/ClickHouse/ClickHouse/pull/39858) ([Nikita Mikhaylov](https://github.com/nikitamikhaylov)). +* Fix flaky integration test test_async_backups_to_same_destination. [#39859](https://github.com/ClickHouse/ClickHouse/pull/39859) ([Vitaly Baranov](https://github.com/vitlibar)). +* Better total part size calculation on mutation [#39860](https://github.com/ClickHouse/ClickHouse/pull/39860) ([alesapin](https://github.com/alesapin)). +* typo: PostgerSQL -> PostgreSQL [#39861](https://github.com/ClickHouse/ClickHouse/pull/39861) ([nathanbegbie](https://github.com/nathanbegbie)). +* Remove prefer_localhost_replica from test [#39862](https://github.com/ClickHouse/ClickHouse/pull/39862) ([Igor Nikonov](https://github.com/devcrafter)). +* Block memory tracker in Keeper during commit [#39867](https://github.com/ClickHouse/ClickHouse/pull/39867) ([Antonio Andelic](https://github.com/antonio2368)). +* Update version_date.tsv after v22.3.10.22-lts [#39868](https://github.com/ClickHouse/ClickHouse/pull/39868) ([github-actions[bot]](https://github.com/apps/github-actions)). +* fix incorrect format for functions with settings [#39869](https://github.com/ClickHouse/ClickHouse/pull/39869) ([Constantine Peresypkin](https://github.com/pkit)). +* Get api url from event, not from const/ENV [#39871](https://github.com/ClickHouse/ClickHouse/pull/39871) ([Mikhail f. Shiryaev](https://github.com/Felixoid)). +* Cleanup unused dirs from `store/` on all disks [#39872](https://github.com/ClickHouse/ClickHouse/pull/39872) ([Alexander Tokmakov](https://github.com/tavplubix)). +* Update 02354_distributed_with_external_aggregation_memory_usage.sql [#39893](https://github.com/ClickHouse/ClickHouse/pull/39893) ([Alexander Tokmakov](https://github.com/tavplubix)). +* Fix the race between waitMutation and updating local queue from ZK [#39900](https://github.com/ClickHouse/ClickHouse/pull/39900) ([Alexander Gololobov](https://github.com/davenger)). +* Improve 02354_distributed_with_external_aggregation_memory_usage [#39908](https://github.com/ClickHouse/ClickHouse/pull/39908) ([Nikita Taranov](https://github.com/nickitat)). +* Move username and password from URL parameters to Basic Authentication [#39910](https://github.com/ClickHouse/ClickHouse/pull/39910) ([San](https://github.com/santrancisco)). +* Remove cache flush from the Docs Check [#39911](https://github.com/ClickHouse/ClickHouse/pull/39911) ([Alexey Milovidov](https://github.com/alexey-milovidov)). +* Fix flaky tests (`Tried to commit obsolete part`) [#39922](https://github.com/ClickHouse/ClickHouse/pull/39922) ([Alexander Tokmakov](https://github.com/tavplubix)). +* Add logging to debug flaky tests [#39925](https://github.com/ClickHouse/ClickHouse/pull/39925) ([Alexander Tokmakov](https://github.com/tavplubix)). +* Fix flaky test `02360_send_logs_level_colors` [#39927](https://github.com/ClickHouse/ClickHouse/pull/39927) ([Anton Popov](https://github.com/CurtizJ)). +* Don't create self-extracting clickhouse for split build [#39936](https://github.com/ClickHouse/ClickHouse/pull/39936) ([Yakov Olkhovskiy](https://github.com/yakov-olkhovskiy)). +* tests/stress: add dmesg output (to see OOM details) [#39939](https://github.com/ClickHouse/ClickHouse/pull/39939) ([Azat Khuzhin](https://github.com/azat)). +* Create metadata directory on CREATE for FileLog engine [#39940](https://github.com/ClickHouse/ClickHouse/pull/39940) ([Azat Khuzhin](https://github.com/azat)). +* tests: fix 02352_rwlock flakiness [#39941](https://github.com/ClickHouse/ClickHouse/pull/39941) ([Azat Khuzhin](https://github.com/azat)). +* Remove old code from the website [#39947](https://github.com/ClickHouse/ClickHouse/pull/39947) ([Alexey Milovidov](https://github.com/alexey-milovidov)). +* Remove debug trace from DistinctStep [#39955](https://github.com/ClickHouse/ClickHouse/pull/39955) ([Igor Nikonov](https://github.com/devcrafter)). +* IAST destructor intrusive list [#39956](https://github.com/ClickHouse/ClickHouse/pull/39956) ([Maksim Kita](https://github.com/kitaisreal)). +* Remove old code from the website (part 2) [#39959](https://github.com/ClickHouse/ClickHouse/pull/39959) ([Alexey Milovidov](https://github.com/alexey-milovidov)). +* Add Stateful tests (release), Stateless tests (release) to Mergeable Check [#39967](https://github.com/ClickHouse/ClickHouse/pull/39967) ([Yakov Olkhovskiy](https://github.com/yakov-olkhovskiy)). +* Change font in CI reports [#39969](https://github.com/ClickHouse/ClickHouse/pull/39969) ([Alexey Milovidov](https://github.com/alexey-milovidov)). +* Add setting type to support special 'auto' value [#39974](https://github.com/ClickHouse/ClickHouse/pull/39974) ([Vladimir C](https://github.com/vdimir)). +* Update 02354_distributed_with_external_aggregation_memory_usage.sql [#39979](https://github.com/ClickHouse/ClickHouse/pull/39979) ([Nikita Taranov](https://github.com/nickitat)). +* tests/stress: fix dmesg reading [#39980](https://github.com/ClickHouse/ClickHouse/pull/39980) ([Azat Khuzhin](https://github.com/azat)). +* Disable 02380_insert_mv_race.sh with Ordinary [#39985](https://github.com/ClickHouse/ClickHouse/pull/39985) ([Alexander Tokmakov](https://github.com/tavplubix)). +* Mention how the build can be speed up by disabling self-extraction [#39988](https://github.com/ClickHouse/ClickHouse/pull/39988) ([Robert Schulze](https://github.com/rschu1ze)). +* Use different root path for Jepsen Counter test [#39992](https://github.com/ClickHouse/ClickHouse/pull/39992) ([Antonio Andelic](https://github.com/antonio2368)). +* ActionsDAG rename index to outputs [#39998](https://github.com/ClickHouse/ClickHouse/pull/39998) ([Maksim Kita](https://github.com/kitaisreal)). +* Added H literal for Hour IntervalKind [#39999](https://github.com/ClickHouse/ClickHouse/pull/39999) ([Heena Bansal](https://github.com/HeenaBansal2009)). +* Try to avoid timeouts when checking for replication consistency [#40001](https://github.com/ClickHouse/ClickHouse/pull/40001) ([Alexander Tokmakov](https://github.com/tavplubix)). +* More generic check for MergeTree table family [#40004](https://github.com/ClickHouse/ClickHouse/pull/40004) ([Alexander Gololobov](https://github.com/davenger)). +* Further preparation for catboost integration into library-bridge [#40010](https://github.com/ClickHouse/ClickHouse/pull/40010) ([Robert Schulze](https://github.com/rschu1ze)). +* Self-extracting: decompressor, extract real path of executable instead of argv[0] [#40011](https://github.com/ClickHouse/ClickHouse/pull/40011) ([Yakov Olkhovskiy](https://github.com/yakov-olkhovskiy)). +* copy self-extracting to output [#40017](https://github.com/ClickHouse/ClickHouse/pull/40017) ([Yakov Olkhovskiy](https://github.com/yakov-olkhovskiy)). +* Update 02354_distributed_with_external_aggregation_memory_usage.sql [#40024](https://github.com/ClickHouse/ClickHouse/pull/40024) ([Nikita Taranov](https://github.com/nickitat)). +* Fix segfault in `DataTypeAggregateFunction` [#40025](https://github.com/ClickHouse/ClickHouse/pull/40025) ([Anton Popov](https://github.com/CurtizJ)). +* tests/performance: cover sparse_hashed dictionary [#40027](https://github.com/ClickHouse/ClickHouse/pull/40027) ([Azat Khuzhin](https://github.com/azat)). +* Cleanup docs of parseDateTime*() function family [#40030](https://github.com/ClickHouse/ClickHouse/pull/40030) ([Robert Schulze](https://github.com/rschu1ze)). +* Job url [#40032](https://github.com/ClickHouse/ClickHouse/pull/40032) ([Mikhail f. Shiryaev](https://github.com/Felixoid)). +* Update version_date.tsv and changelogs after v22.6.5.22-stable [#40036](https://github.com/ClickHouse/ClickHouse/pull/40036) ([github-actions[bot]](https://github.com/apps/github-actions)). +* Non-significant changes [#40038](https://github.com/ClickHouse/ClickHouse/pull/40038) ([Alexey Milovidov](https://github.com/alexey-milovidov)). +* tests: attempt to make 02293_part_log_has_merge_reason less flaky [#40047](https://github.com/ClickHouse/ClickHouse/pull/40047) ([Azat Khuzhin](https://github.com/azat)). +* Remove documentation templates [#40048](https://github.com/ClickHouse/ClickHouse/pull/40048) ([Alexey Milovidov](https://github.com/alexey-milovidov)). +* Move images to clickhouse-presentations repository. [#40049](https://github.com/ClickHouse/ClickHouse/pull/40049) ([Alexey Milovidov](https://github.com/alexey-milovidov)). +* Fix broken image in test-visualizer [#40050](https://github.com/ClickHouse/ClickHouse/pull/40050) ([Alexey Milovidov](https://github.com/alexey-milovidov)). +* Add a test for query parameters in HTTP POST [#40055](https://github.com/ClickHouse/ClickHouse/pull/40055) ([Alexey Milovidov](https://github.com/alexey-milovidov)). +* Fix clickhouse-test hang in case of CREATE DATABASE fails [#40057](https://github.com/ClickHouse/ClickHouse/pull/40057) ([Azat Khuzhin](https://github.com/azat)). +* tests: fix 02380_insert_mv_race for Ordinary database [#40058](https://github.com/ClickHouse/ClickHouse/pull/40058) ([Azat Khuzhin](https://github.com/azat)). +* Skip newlines before Tags in clickhouse-test [#40061](https://github.com/ClickHouse/ClickHouse/pull/40061) ([Vladimir C](https://github.com/vdimir)). +* Replace S3 URLs by parameter [#40066](https://github.com/ClickHouse/ClickHouse/pull/40066) ([Mikhail f. Shiryaev](https://github.com/Felixoid)). +* Finally fix `_csv.Error: field larger than field limit` [#40072](https://github.com/ClickHouse/ClickHouse/pull/40072) ([Alexander Tokmakov](https://github.com/tavplubix)). +* tests: fix 00926_adaptive_index_granularity_pk/00489_pk_subexpression flakiness [#40075](https://github.com/ClickHouse/ClickHouse/pull/40075) ([Azat Khuzhin](https://github.com/azat)). +* Changelogs and versions [#40090](https://github.com/ClickHouse/ClickHouse/pull/40090) ([Mikhail f. Shiryaev](https://github.com/Felixoid)). +* A test for counting resources in subqueries [#40104](https://github.com/ClickHouse/ClickHouse/pull/40104) ([Alexey Milovidov](https://github.com/alexey-milovidov)). +* Use a job ID as ref text [#40112](https://github.com/ClickHouse/ClickHouse/pull/40112) ([Mikhail f. Shiryaev](https://github.com/Felixoid)). +* Delete files DictionaryJoinAdapter.h/cpp [#40113](https://github.com/ClickHouse/ClickHouse/pull/40113) ([Vladimir C](https://github.com/vdimir)). +* Rework S3Helper a little bit [#40127](https://github.com/ClickHouse/ClickHouse/pull/40127) ([Mikhail f. Shiryaev](https://github.com/Felixoid)). +* PODArray assign empty array fix [#40129](https://github.com/ClickHouse/ClickHouse/pull/40129) ([Maksim Kita](https://github.com/kitaisreal)). +* Disable 02390_prometheus_ClickHouseStatusInfo_DictionaryStatus with Ordinary database [#40136](https://github.com/ClickHouse/ClickHouse/pull/40136) ([Alexander Tokmakov](https://github.com/tavplubix)). +* Add tests with Ordinary database to flaky check [#40137](https://github.com/ClickHouse/ClickHouse/pull/40137) ([Alexander Tokmakov](https://github.com/tavplubix)). +* fs cache: minor change [#40138](https://github.com/ClickHouse/ClickHouse/pull/40138) ([Kseniia Sumarokova](https://github.com/kssenii)). +* Fix typo [#40139](https://github.com/ClickHouse/ClickHouse/pull/40139) ([Robert Schulze](https://github.com/rschu1ze)). +* Fix keeper-bench in case of error during scheduling a thread [#40147](https://github.com/ClickHouse/ClickHouse/pull/40147) ([Azat Khuzhin](https://github.com/azat)). +* Fix "Cannot quickly remove directory" [#40151](https://github.com/ClickHouse/ClickHouse/pull/40151) ([Alexander Tokmakov](https://github.com/tavplubix)). +* Set sync_request_timeout to 10 to avoid reconnections in tests [#40158](https://github.com/ClickHouse/ClickHouse/pull/40158) ([Alexander Tokmakov](https://github.com/tavplubix)). +* Disable zero-copy replication by default [#40175](https://github.com/ClickHouse/ClickHouse/pull/40175) ([Alexey Milovidov](https://github.com/alexey-milovidov)). +* Improve assignment and logging for cherry-pick and backport steps [#40177](https://github.com/ClickHouse/ClickHouse/pull/40177) ([Mikhail f. Shiryaev](https://github.com/Felixoid)). +* test for Decimal aggregateFunction normalization [#39420](https://github.com/ClickHouse/ClickHouse/issues/39420) [#40178](https://github.com/ClickHouse/ClickHouse/pull/40178) ([Denny Crane](https://github.com/den-crane)). +* Minor build changes [#40182](https://github.com/ClickHouse/ClickHouse/pull/40182) ([Alexey Milovidov](https://github.com/alexey-milovidov)). +* clickhouse-test: enable ZooKeeper tests by default [#40191](https://github.com/ClickHouse/ClickHouse/pull/40191) ([Alexey Milovidov](https://github.com/alexey-milovidov)). +* Remove old code [#40196](https://github.com/ClickHouse/ClickHouse/pull/40196) ([Alexey Milovidov](https://github.com/alexey-milovidov)). +* Update README.md [#40198](https://github.com/ClickHouse/ClickHouse/pull/40198) ([clickhouse-robot-curie](https://github.com/clickhouse-robot-curie)). +* Fix a bug with symlinks detection [#40232](https://github.com/ClickHouse/ClickHouse/pull/40232) ([Alexander Tokmakov](https://github.com/tavplubix)). +* Better error message when restoring covered parts [#40234](https://github.com/ClickHouse/ClickHouse/pull/40234) ([Alexander Tokmakov](https://github.com/tavplubix)). +* Try to print stacktraces if query timeouts in integration tests [#40248](https://github.com/ClickHouse/ClickHouse/pull/40248) ([Alexander Tokmakov](https://github.com/tavplubix)). +* Add Unit tests to Mergeable [#40250](https://github.com/ClickHouse/ClickHouse/pull/40250) ([Yakov Olkhovskiy](https://github.com/yakov-olkhovskiy)). +* Extract common KV storage logic [#40261](https://github.com/ClickHouse/ClickHouse/pull/40261) ([Antonio Andelic](https://github.com/antonio2368)). +* Add update_mergeable_check trigger for Unit tests [#40269](https://github.com/ClickHouse/ClickHouse/pull/40269) ([Yakov Olkhovskiy](https://github.com/yakov-olkhovskiy)). +* CVE-2021-3520: (negligible) rdkafka library: update lz4.c from upstream [#40272](https://github.com/ClickHouse/ClickHouse/pull/40272) ([Suzy Wang](https://github.com/SuzyWangIBMer)). +* Fix build [#40297](https://github.com/ClickHouse/ClickHouse/pull/40297) ([Alexander Tokmakov](https://github.com/tavplubix)). + +#### Support cte statement for antlr4 syntax file + +* ... [#39814](https://github.com/ClickHouse/ClickHouse/pull/39814) ([qianmoQ](https://github.com/qianmoQ)). + diff --git a/docs/changelogs/v22.8.2.11-lts.md b/docs/changelogs/v22.8.2.11-lts.md new file mode 100644 index 00000000000..df0127582ab --- /dev/null +++ b/docs/changelogs/v22.8.2.11-lts.md @@ -0,0 +1,22 @@ +--- +sidebar_position: 1 +sidebar_label: 2022 +--- + +# 2022 Changelog + +### ClickHouse release v22.8.2.11-lts (b4ed6d744ff) FIXME as compared to v22.8.1.2097-lts (09a2ff88435) + +#### Improvement +* Backported in [#40377](https://github.com/ClickHouse/ClickHouse/issues/40377): Improve and fix dictionaries in Arrow format. [#40173](https://github.com/ClickHouse/ClickHouse/pull/40173) ([Kruglov Pavel](https://github.com/Avogar)). + +#### NO CL CATEGORY + +* Backported in [#40472](https://github.com/ClickHouse/ClickHouse/issues/40472):. [#40462](https://github.com/ClickHouse/ClickHouse/pull/40462) ([Alexey Milovidov](https://github.com/alexey-milovidov)). + +#### NOT FOR CHANGELOG / INSIGNIFICANT + +* fix heap buffer overflow by limiting http chunk size [#40292](https://github.com/ClickHouse/ClickHouse/pull/40292) ([Sema Checherinda](https://github.com/CheSema)). +* Fix typo in the S3 download links for [#40359](https://github.com/ClickHouse/ClickHouse/pull/40359) ([Mikhail f. Shiryaev](https://github.com/Felixoid)). +* Reduce changelog verbosity in CI [#40360](https://github.com/ClickHouse/ClickHouse/pull/40360) ([Mikhail f. Shiryaev](https://github.com/Felixoid)). + diff --git a/docs/en/development/adding_test_queries.md b/docs/en/development/adding_test_queries.md index 7ae50768eba..5c3dd7d85eb 100644 --- a/docs/en/development/adding_test_queries.md +++ b/docs/en/development/adding_test_queries.md @@ -1,10 +1,11 @@ --- +slug: /en/development/adding_test_queries sidebar_label: Adding Test Queries sidebar_position: 63 +title: How to add test queries to ClickHouse CI description: Instructions on how to add a test case to ClickHouse continuous integration --- -# How to add test queries to ClickHouse CI ClickHouse has hundreds (or even thousands) of features. Every commit gets checked by a complex set of tests containing many thousands of test cases. diff --git a/docs/en/development/architecture.md b/docs/en/development/architecture.md index b2f1f2448f8..c13b2519b84 100644 --- a/docs/en/development/architecture.md +++ b/docs/en/development/architecture.md @@ -1,4 +1,5 @@ --- +slug: /en/development/architecture sidebar_label: Architecture Overview sidebar_position: 62 --- diff --git a/docs/en/development/browse-code.md b/docs/en/development/browse-code.md index da924c359ff..0d064cc9b0c 100644 --- a/docs/en/development/browse-code.md +++ b/docs/en/development/browse-code.md @@ -1,4 +1,5 @@ --- +slug: /en/development/browse-code sidebar_label: Source Code Browser sidebar_position: 72 description: Various ways to browse and edit the source code diff --git a/docs/en/development/build-cross-arm.md b/docs/en/development/build-cross-arm.md index 346fa909567..c40ed1d7e86 100644 --- a/docs/en/development/build-cross-arm.md +++ b/docs/en/development/build-cross-arm.md @@ -1,10 +1,10 @@ --- +slug: /en/development/build-cross-arm sidebar_position: 67 +title: How to Build ClickHouse on Linux for AARCH64 (ARM64) Architecture sidebar_label: Build on Linux for AARCH64 (ARM64) --- -# How to Build ClickHouse on Linux for AARCH64 (ARM64) Architecture - If you use AArch64 machine and want to build ClickHouse for AArch64, build as usual. If you use x86_64 machine and want cross-compile for AArch64, add the following flag to `cmake`: `-DCMAKE_TOOLCHAIN_FILE=cmake/linux/toolchain-aarch64.cmake` diff --git a/docs/en/development/build-cross-osx.md b/docs/en/development/build-cross-osx.md index 0072c3253cf..7b151d087df 100644 --- a/docs/en/development/build-cross-osx.md +++ b/docs/en/development/build-cross-osx.md @@ -1,9 +1,10 @@ --- +slug: /en/development/build-cross-osx sidebar_position: 66 +title: How to Build ClickHouse on Linux for Mac OS X sidebar_label: Build on Linux for Mac OS X --- -# How to Build ClickHouse on Linux for Mac OS X This is for the case when you have a Linux machine and want to use it to build `clickhouse` binary that will run on OS X. This is intended for continuous integration checks that run on Linux servers. If you want to build ClickHouse directly on Mac OS X, then proceed with [another instruction](../development/build-osx.md). diff --git a/docs/en/development/build-cross-riscv.md b/docs/en/development/build-cross-riscv.md index a0b31ff131a..a20913e7a32 100644 --- a/docs/en/development/build-cross-riscv.md +++ b/docs/en/development/build-cross-riscv.md @@ -1,10 +1,10 @@ --- +slug: /en/development/build-cross-riscv sidebar_position: 68 +title: How to Build ClickHouse on Linux for RISC-V 64 Architecture sidebar_label: Build on Linux for RISC-V 64 --- -# How to Build ClickHouse on Linux for RISC-V 64 Architecture - As of writing (11.11.2021) building for risc-v considered to be highly experimental. Not all features can be enabled. This is for the case when you have Linux machine and want to use it to build `clickhouse` binary that will run on another Linux machine with RISC-V 64 CPU architecture. This is intended for continuous integration checks that run on Linux servers. diff --git a/docs/en/development/build-osx.md b/docs/en/development/build-osx.md index ce12f92c0b9..97e4e4ddde1 100644 --- a/docs/en/development/build-osx.md +++ b/docs/en/development/build-osx.md @@ -1,11 +1,11 @@ --- +slug: /en/development/build-osx sidebar_position: 65 sidebar_label: Build on Mac OS X +title: How to Build ClickHouse on Mac OS X description: How to build ClickHouse on Mac OS X --- -# How to Build ClickHouse on Mac OS X - :::info You don't have to build ClickHouse yourself! You can install pre-built ClickHouse as described in [Quick Start](https://clickhouse.com/#quick-start). Follow **macOS (Intel)** or **macOS (Apple silicon)** installation instructions. ::: diff --git a/docs/en/development/build.md b/docs/en/development/build.md index cea6354094b..fa04fbf2680 100644 --- a/docs/en/development/build.md +++ b/docs/en/development/build.md @@ -1,10 +1,11 @@ --- +slug: /en/development/build sidebar_position: 64 sidebar_label: Build on Linux +title: How to Build ClickHouse on Linux description: How to build ClickHouse on Linux --- -# How to Build ClickHouse on Linux Supported platforms: diff --git a/docs/en/development/continuous-integration.md b/docs/en/development/continuous-integration.md index 48a12474a51..677fb81efdd 100644 --- a/docs/en/development/continuous-integration.md +++ b/docs/en/development/continuous-integration.md @@ -1,11 +1,11 @@ --- +slug: /en/development/continuous-integration sidebar_position: 62 sidebar_label: Continuous Integration Checks +title: Continuous Integration Checks description: When you submit a pull request, some automated checks are ran for your code by the ClickHouse continuous integration (CI) system --- -# Continuous Integration Checks - When you submit a pull request, some automated checks are ran for your code by the ClickHouse [continuous integration (CI) system](tests.md#test-automation). This happens after a repository maintainer (someone from ClickHouse team) has @@ -54,7 +54,7 @@ the documentation is wrong. Go to the check report and look for `ERROR` and `WAR Check that the description of your pull request conforms to the template [PULL_REQUEST_TEMPLATE.md](https://github.com/ClickHouse/ClickHouse/blob/master/.github/PULL_REQUEST_TEMPLATE.md). You have to specify a changelog category for your change (e.g., Bug Fix), and -write a user-readable message describing the change for [CHANGELOG.md](../whats-new/changelog/) +write a user-readable message describing the change for [CHANGELOG.md](../whats-new/changelog/index.md) ## Push To DockerHub diff --git a/docs/en/development/contrib.md b/docs/en/development/contrib.md index 13af1be5097..04158a0c3f7 100644 --- a/docs/en/development/contrib.md +++ b/docs/en/development/contrib.md @@ -1,4 +1,5 @@ --- +slug: /en/development/contrib sidebar_position: 71 sidebar_label: Third-Party Libraries description: A list of third-party libraries used diff --git a/docs/en/development/developer-instruction.md b/docs/en/development/developer-instruction.md index 945d5a2f62f..82cb4018625 100644 --- a/docs/en/development/developer-instruction.md +++ b/docs/en/development/developer-instruction.md @@ -1,4 +1,5 @@ --- +slug: /en/development/developer-instruction sidebar_position: 61 sidebar_label: Getting Started description: Prerequisites and an overview of how to build ClickHouse @@ -285,9 +286,4 @@ If you are not interested in functionality provided by third-party libraries, yo -DENABLE_LIBRARIES=0 -DENABLE_EMBEDDED_COMPILER=0 ``` -Compressing the binary at the end of the build may take a while, disable the self-extraction feature via -``` --DENABLE_CLICKHOUSE_SELF_EXTRACTING=0 -``` - In case of problems with any of the development options, you are on your own! diff --git a/docs/en/development/index.md b/docs/en/development/index.md deleted file mode 100644 index 7849c736229..00000000000 --- a/docs/en/development/index.md +++ /dev/null @@ -1,8 +0,0 @@ ---- -sidebar_label: Development -sidebar_position: 58 ---- - -# ClickHouse Development - -[Original article](https://clickhouse.com/docs/en/development/) diff --git a/docs/en/development/integrating_rust_libraries.md b/docs/en/development/integrating_rust_libraries.md index ccb703376cb..ef0472bf4ac 100644 --- a/docs/en/development/integrating_rust_libraries.md +++ b/docs/en/development/integrating_rust_libraries.md @@ -1,3 +1,6 @@ +--- +slug: /en/development/integrating_rust_libraries +--- # Integrating Rust libraries Rust library integration will be described based on BLAKE3 hash-function integration. diff --git a/docs/en/development/style.md b/docs/en/development/style.md index a543c7532f8..415312eece1 100644 --- a/docs/en/development/style.md +++ b/docs/en/development/style.md @@ -1,4 +1,5 @@ --- +slug: /en/development/style sidebar_position: 69 sidebar_label: C++ Guide description: A list of recommendations regarding coding style, naming convention, formatting and more diff --git a/docs/en/development/tests.md b/docs/en/development/tests.md index d4bf36b0026..e6d5cf66de9 100644 --- a/docs/en/development/tests.md +++ b/docs/en/development/tests.md @@ -1,11 +1,11 @@ --- +slug: /en/development/tests sidebar_position: 70 sidebar_label: Testing +title: ClickHouse Testing description: Most of ClickHouse features can be tested with functional tests and they are mandatory to use for every change in ClickHouse code that can be tested that way. --- -# ClickHouse Testing - ## Functional Tests Functional tests are the most simple and convenient to use. Most of ClickHouse features can be tested with functional tests and they are mandatory to use for every change in ClickHouse code that can be tested that way. diff --git a/docs/en/engines/database-engines/atomic.md b/docs/en/engines/database-engines/atomic.md index 878307121aa..3ea5008c80a 100644 --- a/docs/en/engines/database-engines/atomic.md +++ b/docs/en/engines/database-engines/atomic.md @@ -1,4 +1,5 @@ --- +slug: /en/engines/database-engines/atomic sidebar_label: Atomic sidebar_position: 10 --- diff --git a/docs/en/engines/database-engines/index.md b/docs/en/engines/database-engines/index.md index 237112a5bee..835383f503f 100644 --- a/docs/en/engines/database-engines/index.md +++ b/docs/en/engines/database-engines/index.md @@ -1,4 +1,5 @@ --- +slug: /en/engines/database-engines/ toc_folder_title: Database Engines toc_priority: 27 toc_title: Introduction diff --git a/docs/en/engines/database-engines/lazy.md b/docs/en/engines/database-engines/lazy.md index 170e101d387..79299e338ab 100644 --- a/docs/en/engines/database-engines/lazy.md +++ b/docs/en/engines/database-engines/lazy.md @@ -1,4 +1,5 @@ --- +slug: /en/engines/database-engines/lazy sidebar_label: Lazy sidebar_position: 20 --- diff --git a/docs/en/engines/database-engines/materialized-mysql.md b/docs/en/engines/database-engines/materialized-mysql.md index 4b16d877210..c8aa65bdd91 100644 --- a/docs/en/engines/database-engines/materialized-mysql.md +++ b/docs/en/engines/database-engines/materialized-mysql.md @@ -1,4 +1,5 @@ --- +slug: /en/engines/database-engines/materialized-mysql sidebar_label: MaterializedMySQL sidebar_position: 70 --- diff --git a/docs/en/engines/database-engines/materialized-postgresql.md b/docs/en/engines/database-engines/materialized-postgresql.md index dc05c58f092..180e7578441 100644 --- a/docs/en/engines/database-engines/materialized-postgresql.md +++ b/docs/en/engines/database-engines/materialized-postgresql.md @@ -1,4 +1,5 @@ --- +slug: /en/engines/database-engines/materialized-postgresql sidebar_label: MaterializedPostgreSQL sidebar_position: 60 --- diff --git a/docs/en/engines/database-engines/mysql.md b/docs/en/engines/database-engines/mysql.md index 89a0786a9ec..aae87d90fbd 100644 --- a/docs/en/engines/database-engines/mysql.md +++ b/docs/en/engines/database-engines/mysql.md @@ -1,4 +1,5 @@ --- +slug: /en/engines/database-engines/mysql sidebar_position: 50 sidebar_label: MySQL --- diff --git a/docs/en/engines/database-engines/postgresql.md b/docs/en/engines/database-engines/postgresql.md index 5a430565d54..ce28635a12a 100644 --- a/docs/en/engines/database-engines/postgresql.md +++ b/docs/en/engines/database-engines/postgresql.md @@ -1,4 +1,5 @@ --- +slug: /en/engines/database-engines/postgresql sidebar_position: 40 sidebar_label: PostgreSQL --- diff --git a/docs/en/engines/database-engines/replicated.md b/docs/en/engines/database-engines/replicated.md index 110b799c6be..554345a3c15 100644 --- a/docs/en/engines/database-engines/replicated.md +++ b/docs/en/engines/database-engines/replicated.md @@ -1,4 +1,5 @@ --- +slug: /en/engines/database-engines/replicated sidebar_position: 30 sidebar_label: Replicated --- diff --git a/docs/en/engines/database-engines/sqlite.md b/docs/en/engines/database-engines/sqlite.md index 555f3e0b12b..eef0bb84088 100644 --- a/docs/en/engines/database-engines/sqlite.md +++ b/docs/en/engines/database-engines/sqlite.md @@ -1,4 +1,5 @@ --- +slug: /en/engines/database-engines/sqlite sidebar_position: 55 sidebar_label: SQLite --- diff --git a/docs/en/engines/table-engines/index.md b/docs/en/engines/table-engines/index.md index b6a97ebfbc9..e1c7a8dedc5 100644 --- a/docs/en/engines/table-engines/index.md +++ b/docs/en/engines/table-engines/index.md @@ -1,4 +1,5 @@ --- +slug: /en/engines/table-engines/ toc_folder_title: Table Engines toc_priority: 26 toc_title: Introduction diff --git a/docs/en/engines/table-engines/integrations/ExternalDistributed.md b/docs/en/engines/table-engines/integrations/ExternalDistributed.md index a318e8c3a35..430f53423a4 100644 --- a/docs/en/engines/table-engines/integrations/ExternalDistributed.md +++ b/docs/en/engines/table-engines/integrations/ExternalDistributed.md @@ -1,4 +1,5 @@ --- +slug: /en/engines/table-engines/integrations/ExternalDistributed sidebar_position: 12 sidebar_label: ExternalDistributed --- diff --git a/docs/en/engines/table-engines/integrations/embedded-rocksdb.md b/docs/en/engines/table-engines/integrations/embedded-rocksdb.md index 2c7484f34bd..0eb3331f471 100644 --- a/docs/en/engines/table-engines/integrations/embedded-rocksdb.md +++ b/docs/en/engines/table-engines/integrations/embedded-rocksdb.md @@ -1,4 +1,5 @@ --- +slug: /en/engines/table-engines/integrations/embedded-rocksdb sidebar_position: 9 sidebar_label: EmbeddedRocksDB --- @@ -15,11 +16,12 @@ CREATE TABLE [IF NOT EXISTS] [db.]table_name [ON CLUSTER cluster] name1 [type1] [DEFAULT|MATERIALIZED|ALIAS expr1], name2 [type2] [DEFAULT|MATERIALIZED|ALIAS expr2], ... -) ENGINE = EmbeddedRocksDB PRIMARY KEY(primary_key_name) +) ENGINE = EmbeddedRocksDB([ttl]) PRIMARY KEY(primary_key_name) ``` -Required parameters: +Engine parameters: +- `ttl` - time to live for values. TTL is accepted in seconds. If TTL is 0, regular RocksDB instance is used (without TTL). - `primary_key_name` – any column name in the column list. - `primary key` must be specified, it supports only one column in the primary key. The primary key will be serialized in binary as a `rocksdb key`. - columns other than the primary key will be serialized in binary as `rocksdb` value in corresponding order. diff --git a/docs/en/engines/table-engines/integrations/hdfs.md b/docs/en/engines/table-engines/integrations/hdfs.md index 9796fd73b1b..78eab7295dd 100644 --- a/docs/en/engines/table-engines/integrations/hdfs.md +++ b/docs/en/engines/table-engines/integrations/hdfs.md @@ -1,4 +1,5 @@ --- +slug: /en/engines/table-engines/integrations/hdfs sidebar_position: 6 sidebar_label: HDFS --- @@ -51,10 +52,14 @@ SELECT * FROM hdfs_engine_table LIMIT 2 ## Implementation Details {#implementation-details} - Reads and writes can be parallel. -- [Zero-copy](../../../operations/storing-data.md#zero-copy) replication is supported. - Not supported: - `ALTER` and `SELECT...SAMPLE` operations. - Indexes. + - [Zero-copy](../../../operations/storing-data.md#zero-copy) replication is possible, but not recommended. + + :::warning Zero-copy replication is not ready for production + Zero-copy replication is disabled by default in ClickHouse version 22.8 and higher. This feature is not recommended for production use. + ::: **Globs in path** diff --git a/docs/en/engines/table-engines/integrations/hive.md b/docs/en/engines/table-engines/integrations/hive.md index 52250b17d7a..d4b209a285c 100644 --- a/docs/en/engines/table-engines/integrations/hive.md +++ b/docs/en/engines/table-engines/integrations/hive.md @@ -1,4 +1,5 @@ --- +slug: /en/engines/table-engines/integrations/hive sidebar_position: 4 sidebar_label: Hive --- diff --git a/docs/en/engines/table-engines/integrations/index.md b/docs/en/engines/table-engines/integrations/index.md index 8c8728c7f17..7e67bcb6249 100644 --- a/docs/en/engines/table-engines/integrations/index.md +++ b/docs/en/engines/table-engines/integrations/index.md @@ -1,4 +1,5 @@ --- +slug: /en/engines/table-engines/integrations/ sidebar_position: 40 sidebar_label: Integrations --- diff --git a/docs/en/engines/table-engines/integrations/jdbc.md b/docs/en/engines/table-engines/integrations/jdbc.md index f9907d53672..2b2b30567aa 100644 --- a/docs/en/engines/table-engines/integrations/jdbc.md +++ b/docs/en/engines/table-engines/integrations/jdbc.md @@ -1,4 +1,5 @@ --- +slug: /en/engines/table-engines/integrations/jdbc sidebar_position: 3 sidebar_label: JDBC --- diff --git a/docs/en/engines/table-engines/integrations/kafka.md b/docs/en/engines/table-engines/integrations/kafka.md index 47a0e022841..88a0d08ebbd 100644 --- a/docs/en/engines/table-engines/integrations/kafka.md +++ b/docs/en/engines/table-engines/integrations/kafka.md @@ -1,4 +1,5 @@ --- +slug: /en/engines/table-engines/integrations/kafka sidebar_position: 8 sidebar_label: Kafka --- diff --git a/docs/en/engines/table-engines/integrations/materialized-postgresql.md b/docs/en/engines/table-engines/integrations/materialized-postgresql.md index d3b70419290..2413b9c97b8 100644 --- a/docs/en/engines/table-engines/integrations/materialized-postgresql.md +++ b/docs/en/engines/table-engines/integrations/materialized-postgresql.md @@ -1,4 +1,5 @@ --- +slug: /en/engines/table-engines/integrations/materialized-postgresql sidebar_position: 12 sidebar_label: MaterializedPostgreSQL --- diff --git a/docs/en/engines/table-engines/integrations/mongodb.md b/docs/en/engines/table-engines/integrations/mongodb.md index 664e4722bbb..da626614425 100644 --- a/docs/en/engines/table-engines/integrations/mongodb.md +++ b/docs/en/engines/table-engines/integrations/mongodb.md @@ -1,4 +1,5 @@ --- +slug: /en/engines/table-engines/integrations/mongodb sidebar_position: 5 sidebar_label: MongoDB --- diff --git a/docs/en/engines/table-engines/integrations/mysql.md b/docs/en/engines/table-engines/integrations/mysql.md index 5cd43d8a6ba..7c9c4cfea53 100644 --- a/docs/en/engines/table-engines/integrations/mysql.md +++ b/docs/en/engines/table-engines/integrations/mysql.md @@ -1,4 +1,5 @@ --- +slug: /en/engines/table-engines/integrations/mysql sidebar_position: 4 sidebar_label: MySQL --- diff --git a/docs/en/engines/table-engines/integrations/nats.md b/docs/en/engines/table-engines/integrations/nats.md index 7c975653f0e..90b30dc8295 100644 --- a/docs/en/engines/table-engines/integrations/nats.md +++ b/docs/en/engines/table-engines/integrations/nats.md @@ -1,4 +1,5 @@ --- +slug: /en/engines/table-engines/integrations/nats sidebar_position: 14 sidebar_label: NATS --- diff --git a/docs/en/engines/table-engines/integrations/odbc.md b/docs/en/engines/table-engines/integrations/odbc.md index e82edc92fe9..043d5170654 100644 --- a/docs/en/engines/table-engines/integrations/odbc.md +++ b/docs/en/engines/table-engines/integrations/odbc.md @@ -1,4 +1,5 @@ --- +slug: /en/engines/table-engines/integrations/odbc sidebar_position: 2 sidebar_label: ODBC --- diff --git a/docs/en/engines/table-engines/integrations/postgresql.md b/docs/en/engines/table-engines/integrations/postgresql.md index d029aef240f..4bb8033de9c 100644 --- a/docs/en/engines/table-engines/integrations/postgresql.md +++ b/docs/en/engines/table-engines/integrations/postgresql.md @@ -1,4 +1,5 @@ --- +slug: /en/engines/table-engines/integrations/postgresql sidebar_position: 11 sidebar_label: PostgreSQL --- diff --git a/docs/en/engines/table-engines/integrations/rabbitmq.md b/docs/en/engines/table-engines/integrations/rabbitmq.md index b2b672fb1ef..9227e5cdbfd 100644 --- a/docs/en/engines/table-engines/integrations/rabbitmq.md +++ b/docs/en/engines/table-engines/integrations/rabbitmq.md @@ -1,4 +1,5 @@ --- +slug: /en/engines/table-engines/integrations/rabbitmq sidebar_position: 10 sidebar_label: RabbitMQ --- diff --git a/docs/en/engines/table-engines/integrations/s3.md b/docs/en/engines/table-engines/integrations/s3.md index 09566a08dd6..986a29b8307 100644 --- a/docs/en/engines/table-engines/integrations/s3.md +++ b/docs/en/engines/table-engines/integrations/s3.md @@ -1,4 +1,5 @@ --- +slug: /en/engines/table-engines/integrations/s3 sidebar_position: 7 sidebar_label: S3 --- @@ -50,10 +51,14 @@ For more information about virtual columns see [here](../../../engines/table-eng ## Implementation Details {#implementation-details} - Reads and writes can be parallel -- [Zero-copy](../../../operations/storing-data.md#zero-copy) replication is supported. - Not supported: - `ALTER` and `SELECT...SAMPLE` operations. - Indexes. + - [Zero-copy](../../../operations/storing-data.md#zero-copy) replication is possible, but not supported. + + :::warning Zero-copy replication is not ready for production + Zero-copy replication is disabled by default in ClickHouse version 22.8 and higher. This feature is not recommended for production use. + ::: ## Wildcards In Path {#wildcards-in-path} diff --git a/docs/en/engines/table-engines/integrations/sqlite.md b/docs/en/engines/table-engines/integrations/sqlite.md index 2676f912350..241b32826f3 100644 --- a/docs/en/engines/table-engines/integrations/sqlite.md +++ b/docs/en/engines/table-engines/integrations/sqlite.md @@ -1,4 +1,5 @@ --- +slug: /en/engines/table-engines/integrations/sqlite sidebar_position: 7 sidebar_label: SQLite --- diff --git a/docs/en/engines/table-engines/log-family/index.md b/docs/en/engines/table-engines/log-family/index.md index 4ea2294554a..98bc4dbad04 100644 --- a/docs/en/engines/table-engines/log-family/index.md +++ b/docs/en/engines/table-engines/log-family/index.md @@ -1,4 +1,5 @@ --- +slug: /en/engines/table-engines/log-family/ sidebar_position: 20 sidebar_label: Log Family --- diff --git a/docs/en/engines/table-engines/log-family/log.md b/docs/en/engines/table-engines/log-family/log.md index d8cabfd25cd..2c1518f0127 100644 --- a/docs/en/engines/table-engines/log-family/log.md +++ b/docs/en/engines/table-engines/log-family/log.md @@ -1,4 +1,5 @@ --- +slug: /en/engines/table-engines/log-family/log toc_priority: 33 toc_title: Log --- diff --git a/docs/en/engines/table-engines/log-family/stripelog.md b/docs/en/engines/table-engines/log-family/stripelog.md index 759cbe532aa..b9dc0fe514b 100644 --- a/docs/en/engines/table-engines/log-family/stripelog.md +++ b/docs/en/engines/table-engines/log-family/stripelog.md @@ -1,4 +1,5 @@ --- +slug: /en/engines/table-engines/log-family/stripelog toc_priority: 32 toc_title: StripeLog --- diff --git a/docs/en/engines/table-engines/log-family/tinylog.md b/docs/en/engines/table-engines/log-family/tinylog.md index b23ec3e1d81..adc7553d869 100644 --- a/docs/en/engines/table-engines/log-family/tinylog.md +++ b/docs/en/engines/table-engines/log-family/tinylog.md @@ -1,4 +1,5 @@ --- +slug: /en/engines/table-engines/log-family/tinylog toc_priority: 34 toc_title: TinyLog --- diff --git a/docs/en/engines/table-engines/mergetree-family/aggregatingmergetree.md b/docs/en/engines/table-engines/mergetree-family/aggregatingmergetree.md index b2eea820139..ba518f51657 100644 --- a/docs/en/engines/table-engines/mergetree-family/aggregatingmergetree.md +++ b/docs/en/engines/table-engines/mergetree-family/aggregatingmergetree.md @@ -1,4 +1,5 @@ --- +slug: /en/engines/table-engines/mergetree-family/aggregatingmergetree sidebar_position: 60 sidebar_label: AggregatingMergeTree --- diff --git a/docs/en/engines/table-engines/mergetree-family/collapsingmergetree.md b/docs/en/engines/table-engines/mergetree-family/collapsingmergetree.md index 1b37e20d0da..5e38fa28195 100644 --- a/docs/en/engines/table-engines/mergetree-family/collapsingmergetree.md +++ b/docs/en/engines/table-engines/mergetree-family/collapsingmergetree.md @@ -1,4 +1,5 @@ --- +slug: /en/engines/table-engines/mergetree-family/collapsingmergetree sidebar_position: 70 sidebar_label: CollapsingMergeTree --- diff --git a/docs/en/engines/table-engines/mergetree-family/custom-partitioning-key.md b/docs/en/engines/table-engines/mergetree-family/custom-partitioning-key.md index 1191becbb25..17135a88d5b 100644 --- a/docs/en/engines/table-engines/mergetree-family/custom-partitioning-key.md +++ b/docs/en/engines/table-engines/mergetree-family/custom-partitioning-key.md @@ -1,4 +1,5 @@ --- +slug: /en/engines/table-engines/mergetree-family/custom-partitioning-key sidebar_position: 30 sidebar_label: Custom Partitioning Key --- diff --git a/docs/en/engines/table-engines/mergetree-family/graphitemergetree.md b/docs/en/engines/table-engines/mergetree-family/graphitemergetree.md index 9062dd3c423..b07f4a29396 100644 --- a/docs/en/engines/table-engines/mergetree-family/graphitemergetree.md +++ b/docs/en/engines/table-engines/mergetree-family/graphitemergetree.md @@ -1,4 +1,5 @@ --- +slug: /en/engines/table-engines/mergetree-family/graphitemergetree sidebar_position: 90 sidebar_label: GraphiteMergeTree --- diff --git a/docs/en/engines/table-engines/mergetree-family/index.md b/docs/en/engines/table-engines/mergetree-family/index.md index 45a671da76a..90892c1d6c9 100644 --- a/docs/en/engines/table-engines/mergetree-family/index.md +++ b/docs/en/engines/table-engines/mergetree-family/index.md @@ -1,4 +1,5 @@ --- +slug: /en/engines/table-engines/mergetree-family/ sidebar_position: 10 sidebar_label: MergeTree Family --- diff --git a/docs/en/engines/table-engines/mergetree-family/mergetree.md b/docs/en/engines/table-engines/mergetree-family/mergetree.md index 42378d2ad07..0ebe3c99f35 100644 --- a/docs/en/engines/table-engines/mergetree-family/mergetree.md +++ b/docs/en/engines/table-engines/mergetree-family/mergetree.md @@ -1,4 +1,5 @@ --- +slug: /en/engines/table-engines/mergetree-family/mergetree sidebar_position: 11 sidebar_label: MergeTree --- @@ -1023,6 +1024,10 @@ Other parameters: Examples of working configurations can be found in integration tests directory (see e.g. [test_merge_tree_azure_blob_storage](https://github.com/ClickHouse/ClickHouse/blob/master/tests/integration/test_merge_tree_azure_blob_storage/configs/config.d/storage_conf.xml) or [test_azure_blob_storage_zero_copy_replication](https://github.com/ClickHouse/ClickHouse/blob/master/tests/integration/test_azure_blob_storage_zero_copy_replication/configs/config.d/storage_conf.xml)). + :::warning Zero-copy replication is not ready for production + Zero-copy replication is disabled by default in ClickHouse version 22.8 and higher. This feature is not recommended for production use. + ::: + ## Virtual Columns {#virtual-columns} - `_part` — Name of a part. diff --git a/docs/en/engines/table-engines/mergetree-family/replacingmergetree.md b/docs/en/engines/table-engines/mergetree-family/replacingmergetree.md index 2ed00b5b5a6..f5d81182898 100644 --- a/docs/en/engines/table-engines/mergetree-family/replacingmergetree.md +++ b/docs/en/engines/table-engines/mergetree-family/replacingmergetree.md @@ -1,4 +1,5 @@ --- +slug: /en/engines/table-engines/mergetree-family/replacingmergetree sidebar_position: 40 sidebar_label: ReplacingMergeTree --- @@ -44,6 +45,49 @@ When merging, `ReplacingMergeTree` from all the rows with the same sorting key l - The last in the selection, if `ver` not set. A selection is a set of rows in a set of parts participating in the merge. The most recently created part (the last insert) will be the last one in the selection. Thus, after deduplication, the very last row from the most recent insert will remain for each unique sorting key. - With the maximum version, if `ver` specified. If `ver` is the same for several rows, then it will use "if `ver` is not specified" rule for them, i.e. the most recent inserted row will remain. +Example: + +```sql +-- without ver - the last inserted 'wins' +CREATE TABLE myFirstReplacingMT +( + `key` Int64, + `someCol` String, + `eventTime` DateTime +) +ENGINE = ReplacingMergeTree +ORDER BY key; + +INSERT INTO myFirstReplacingMT Values (1, 'first', '2020-01-01 01:01:01'); +INSERT INTO myFirstReplacingMT Values (1, 'second', '2020-01-01 00:00:00'); + +SELECT * FROM myFirstReplacingMT FINAL; + +┌─key─┬─someCol─┬───────────eventTime─┐ +│ 1 │ second │ 2020-01-01 00:00:00 │ +└─────┴─────────┴─────────────────────┘ + + +-- with ver - the row with the biggest ver 'wins' +CREATE TABLE mySecondReplacingMT +( + `key` Int64, + `someCol` String, + `eventTime` DateTime +) +ENGINE = ReplacingMergeTree(eventTime) +ORDER BY key; + +INSERT INTO mySecondReplacingMT Values (1, 'first', '2020-01-01 01:01:01'); +INSERT INTO mySecondReplacingMT Values (1, 'second', '2020-01-01 00:00:00'); + +SELECT * FROM mySecondReplacingMT FINAL; + +┌─key─┬─someCol─┬───────────eventTime─┐ +│ 1 │ first │ 2020-01-01 01:01:01 │ +└─────┴─────────┴─────────────────────┘ +``` + ## Query clauses When creating a `ReplacingMergeTree` table the same [clauses](../../../engines/table-engines/mergetree-family/mergetree.md) are required, as when creating a `MergeTree` table. diff --git a/docs/en/engines/table-engines/mergetree-family/replication.md b/docs/en/engines/table-engines/mergetree-family/replication.md index 0dfcdccb029..06faceab8ec 100644 --- a/docs/en/engines/table-engines/mergetree-family/replication.md +++ b/docs/en/engines/table-engines/mergetree-family/replication.md @@ -1,4 +1,5 @@ --- +slug: /en/engines/table-engines/mergetree-family/replication sidebar_position: 20 sidebar_label: Data Replication --- diff --git a/docs/en/engines/table-engines/mergetree-family/summingmergetree.md b/docs/en/engines/table-engines/mergetree-family/summingmergetree.md index 7afa7cf028e..5a2c0718610 100644 --- a/docs/en/engines/table-engines/mergetree-family/summingmergetree.md +++ b/docs/en/engines/table-engines/mergetree-family/summingmergetree.md @@ -1,4 +1,5 @@ --- +slug: /en/engines/table-engines/mergetree-family/summingmergetree sidebar_position: 50 sidebar_label: SummingMergeTree --- diff --git a/docs/en/engines/table-engines/mergetree-family/versionedcollapsingmergetree.md b/docs/en/engines/table-engines/mergetree-family/versionedcollapsingmergetree.md index 5642602f4a1..ab149a7dc3d 100644 --- a/docs/en/engines/table-engines/mergetree-family/versionedcollapsingmergetree.md +++ b/docs/en/engines/table-engines/mergetree-family/versionedcollapsingmergetree.md @@ -1,4 +1,5 @@ --- +slug: /en/engines/table-engines/mergetree-family/versionedcollapsingmergetree sidebar_position: 80 sidebar_label: VersionedCollapsingMergeTree --- diff --git a/docs/en/engines/table-engines/special/buffer.md b/docs/en/engines/table-engines/special/buffer.md index bcd7c390eb1..ba2381d3c01 100644 --- a/docs/en/engines/table-engines/special/buffer.md +++ b/docs/en/engines/table-engines/special/buffer.md @@ -1,4 +1,5 @@ --- +slug: /en/engines/table-engines/special/buffer sidebar_position: 120 sidebar_label: Buffer --- diff --git a/docs/en/engines/table-engines/special/dictionary.md b/docs/en/engines/table-engines/special/dictionary.md index d73d3c65fb0..e19fbeef141 100644 --- a/docs/en/engines/table-engines/special/dictionary.md +++ b/docs/en/engines/table-engines/special/dictionary.md @@ -1,4 +1,5 @@ --- +slug: /en/engines/table-engines/special/dictionary sidebar_position: 20 sidebar_label: Dictionary --- diff --git a/docs/en/engines/table-engines/special/distributed.md b/docs/en/engines/table-engines/special/distributed.md index d643d4b3c68..b4b0b35d976 100644 --- a/docs/en/engines/table-engines/special/distributed.md +++ b/docs/en/engines/table-engines/special/distributed.md @@ -1,4 +1,5 @@ --- +slug: /en/engines/table-engines/special/distributed sidebar_position: 10 sidebar_label: Distributed --- diff --git a/docs/en/engines/table-engines/special/external-data.md b/docs/en/engines/table-engines/special/external-data.md index 2aa90be617f..beb025629fe 100644 --- a/docs/en/engines/table-engines/special/external-data.md +++ b/docs/en/engines/table-engines/special/external-data.md @@ -1,4 +1,5 @@ --- +slug: /en/engines/table-engines/special/external-data sidebar_position: 130 sidebar_label: External Data --- diff --git a/docs/en/engines/table-engines/special/file.md b/docs/en/engines/table-engines/special/file.md index 7a53670bebd..00f4b8ec0a9 100644 --- a/docs/en/engines/table-engines/special/file.md +++ b/docs/en/engines/table-engines/special/file.md @@ -1,4 +1,5 @@ --- +slug: /en/engines/table-engines/special/file sidebar_position: 40 sidebar_label: File --- diff --git a/docs/en/engines/table-engines/special/generate.md b/docs/en/engines/table-engines/special/generate.md index a217c240b1c..d03d6dc9d13 100644 --- a/docs/en/engines/table-engines/special/generate.md +++ b/docs/en/engines/table-engines/special/generate.md @@ -1,10 +1,10 @@ --- +slug: /en/engines/table-engines/special/generate sidebar_position: 140 sidebar_label: GenerateRandom +title: "GenerateRandom Table Engine" --- -# GenerateRandom Table Engine - The GenerateRandom table engine produces random data for given table schema. Usage examples: diff --git a/docs/en/engines/table-engines/special/index.md b/docs/en/engines/table-engines/special/index.md index be5ec79caf2..2247aeae5af 100644 --- a/docs/en/engines/table-engines/special/index.md +++ b/docs/en/engines/table-engines/special/index.md @@ -1,4 +1,5 @@ --- +slug: /en/engines/table-engines/special/ sidebar_position: 50 sidebar_label: Special --- diff --git a/docs/en/engines/table-engines/special/join.md b/docs/en/engines/table-engines/special/join.md index 4e628b8b9b0..161896e5550 100644 --- a/docs/en/engines/table-engines/special/join.md +++ b/docs/en/engines/table-engines/special/join.md @@ -1,4 +1,5 @@ --- +slug: /en/engines/table-engines/special/join sidebar_position: 70 sidebar_label: Join --- diff --git a/docs/en/engines/table-engines/special/materializedview.md b/docs/en/engines/table-engines/special/materializedview.md index 8c77a9ce087..7b06560ec98 100644 --- a/docs/en/engines/table-engines/special/materializedview.md +++ b/docs/en/engines/table-engines/special/materializedview.md @@ -1,4 +1,5 @@ --- +slug: /en/engines/table-engines/special/materializedview sidebar_position: 100 sidebar_label: MaterializedView --- diff --git a/docs/en/engines/table-engines/special/memory.md b/docs/en/engines/table-engines/special/memory.md index 1f822d2f96d..b56dab3e369 100644 --- a/docs/en/engines/table-engines/special/memory.md +++ b/docs/en/engines/table-engines/special/memory.md @@ -1,4 +1,5 @@ --- +slug: /en/engines/table-engines/special/memory sidebar_position: 110 sidebar_label: Memory --- diff --git a/docs/en/engines/table-engines/special/merge.md b/docs/en/engines/table-engines/special/merge.md index d32547a300c..57762c21d7b 100644 --- a/docs/en/engines/table-engines/special/merge.md +++ b/docs/en/engines/table-engines/special/merge.md @@ -1,4 +1,5 @@ --- +slug: /en/engines/table-engines/special/merge sidebar_position: 30 sidebar_label: Merge --- diff --git a/docs/en/engines/table-engines/special/null.md b/docs/en/engines/table-engines/special/null.md index ca02d8e300b..8d3cce07e33 100644 --- a/docs/en/engines/table-engines/special/null.md +++ b/docs/en/engines/table-engines/special/null.md @@ -1,4 +1,5 @@ --- +slug: /en/engines/table-engines/special/null sidebar_position: 50 sidebar_label: 'Null' --- diff --git a/docs/en/engines/table-engines/special/set.md b/docs/en/engines/table-engines/special/set.md index f7114f04cea..3a3e7c4d5de 100644 --- a/docs/en/engines/table-engines/special/set.md +++ b/docs/en/engines/table-engines/special/set.md @@ -1,4 +1,5 @@ --- +slug: /en/engines/table-engines/special/set sidebar_position: 60 sidebar_label: Set --- diff --git a/docs/en/engines/table-engines/special/url.md b/docs/en/engines/table-engines/special/url.md index 82617e9425d..e3cd7aa1dde 100644 --- a/docs/en/engines/table-engines/special/url.md +++ b/docs/en/engines/table-engines/special/url.md @@ -1,4 +1,5 @@ --- +slug: /en/engines/table-engines/special/url sidebar_position: 80 sidebar_label: URL --- @@ -13,6 +14,8 @@ Syntax: `URL(URL [,Format] [,CompressionMethod])` - The `Format` must be one that ClickHouse can use in `SELECT` queries and, if necessary, in `INSERTs`. For the full list of supported formats, see [Formats](../../../interfaces/formats.md#formats). + If this argument is not specified, ClickHouse detectes the format automatically from the suffix of the `URL` parameter. If the suffix of `URL` parameter does not match any supported formats, it fails to create table. For example, for engine expression `URL('http://localhost/test.json')`, `JSON` format is applied. + - `CompressionMethod` indicates that whether the HTTP body should be compressed. If the compression is enabled, the HTTP packets sent by the URL engine contain 'Content-Encoding' header to indicate which compression method is used. To enable compression, please first make sure the remote HTTP endpoint indicated by the `URL` parameter supports corresponding compression algorithm. @@ -27,6 +30,11 @@ The supported `CompressionMethod` should be one of following: - bz2 - snappy - none +- auto + +If `CompressionMethod` is not specified, it defaults to `auto`. This means ClickHouse detects compression method from the suffix of `URL` parameter automatically. If the suffix matches any of compression method listed above, corresponding compression is applied or there won't be any compression enabled. + +For example, for engine expression `URL('http://localhost/test.gzip')`, `gzip` compression method is applied, but for `URL('http://localhost/test.fr')`, no compression is enabled because the suffix `fr` does not match any compression methods above. ## Usage {#using-the-engine-in-the-clickhouse-server} diff --git a/docs/en/engines/table-engines/special/view.md b/docs/en/engines/table-engines/special/view.md index 5e646cf2fd3..3dd57651da9 100644 --- a/docs/en/engines/table-engines/special/view.md +++ b/docs/en/engines/table-engines/special/view.md @@ -1,4 +1,5 @@ --- +slug: /en/engines/table-engines/special/view sidebar_position: 90 sidebar_label: View --- diff --git a/docs/en/getting-started/example-datasets/amplab-benchmark.md b/docs/en/getting-started/example-datasets/amplab-benchmark.md index e50c71009bd..df8495bad9e 100644 --- a/docs/en/getting-started/example-datasets/amplab-benchmark.md +++ b/docs/en/getting-started/example-datasets/amplab-benchmark.md @@ -1,4 +1,5 @@ --- +slug: /en/getting-started/example-datasets/amplab-benchmark sidebar_label: AMPLab Big Data Benchmark description: A benchmark dataset used for comparing the performance of data warehousing solutions. --- diff --git a/docs/en/getting-started/example-datasets/brown-benchmark.md b/docs/en/getting-started/example-datasets/brown-benchmark.md index cd4f5ae4a6b..7a0b2cd97ce 100644 --- a/docs/en/getting-started/example-datasets/brown-benchmark.md +++ b/docs/en/getting-started/example-datasets/brown-benchmark.md @@ -1,4 +1,5 @@ --- +slug: /en/getting-started/example-datasets/brown-benchmark sidebar_label: Brown University Benchmark description: A new analytical benchmark for machine-generated log data --- diff --git a/docs/en/getting-started/example-datasets/cell-towers.md b/docs/en/getting-started/example-datasets/cell-towers.md index 8da7761eea4..2d90845127a 100644 --- a/docs/en/getting-started/example-datasets/cell-towers.md +++ b/docs/en/getting-started/example-datasets/cell-towers.md @@ -1,4 +1,5 @@ --- +slug: /en/getting-started/example-datasets/cell-towers sidebar_label: Cell Towers --- diff --git a/docs/en/getting-started/example-datasets/criteo.md b/docs/en/getting-started/example-datasets/criteo.md index 2d1c700d15c..ab99333390e 100644 --- a/docs/en/getting-started/example-datasets/criteo.md +++ b/docs/en/getting-started/example-datasets/criteo.md @@ -1,4 +1,5 @@ --- +slug: /en/getting-started/example-datasets/criteo sidebar_label: Terabyte Click Logs from Criteo --- diff --git a/docs/en/getting-started/example-datasets/github-events.md b/docs/en/getting-started/example-datasets/github-events.md index 3a0cbc3324d..62931a9e516 100644 --- a/docs/en/getting-started/example-datasets/github-events.md +++ b/docs/en/getting-started/example-datasets/github-events.md @@ -1,9 +1,9 @@ --- +slug: /en/getting-started/example-datasets/github-events sidebar_label: GitHub Events +title: "GitHub Events Dataset" --- -# GitHub Events Dataset - Dataset contains all events on GitHub from 2011 to Dec 6 2020, the size is 3.1 billion records. Download size is 75 GB and it will require up to 200 GB space on disk if stored in a table with lz4 compression. Full dataset description, insights, download instruction and interactive queries are posted [here](https://ghe.clickhouse.tech/). diff --git a/docs/en/getting-started/example-datasets/menus.md b/docs/en/getting-started/example-datasets/menus.md index fd20c75f707..72ac17e5128 100644 --- a/docs/en/getting-started/example-datasets/menus.md +++ b/docs/en/getting-started/example-datasets/menus.md @@ -1,4 +1,5 @@ --- +slug: /en/getting-started/example-datasets/menus sidebar_label: New York Public Library "What's on the Menu?" Dataset --- diff --git a/docs/en/getting-started/example-datasets/metrica.md b/docs/en/getting-started/example-datasets/metrica.md index 300bbe58d3f..e966f6c20d6 100644 --- a/docs/en/getting-started/example-datasets/metrica.md +++ b/docs/en/getting-started/example-datasets/metrica.md @@ -1,4 +1,5 @@ --- +slug: /en/getting-started/example-datasets/metrica sidebar_label: Web Analytics Data description: Dataset consisting of two tables containing anonymized web analytics data with hits and visits --- diff --git a/docs/en/getting-started/example-datasets/nyc-taxi.md b/docs/en/getting-started/example-datasets/nyc-taxi.md index 360f9eed1c8..11621cfa5f5 100644 --- a/docs/en/getting-started/example-datasets/nyc-taxi.md +++ b/docs/en/getting-started/example-datasets/nyc-taxi.md @@ -1,4 +1,5 @@ --- +slug: /en/getting-started/example-datasets/nyc-taxi sidebar_label: New York Taxi Data sidebar_position: 2 description: Data for billions of taxi and for-hire vehicle (Uber, Lyft, etc.) trips originating in New York City since 2009 diff --git a/docs/en/getting-started/example-datasets/ontime.md b/docs/en/getting-started/example-datasets/ontime.md index a3af800f038..98469c045b4 100644 --- a/docs/en/getting-started/example-datasets/ontime.md +++ b/docs/en/getting-started/example-datasets/ontime.md @@ -1,4 +1,5 @@ --- +slug: /en/getting-started/example-datasets/ontime sidebar_label: OnTime Airline Flight Data description: Dataset containing the on-time performance of airline flights --- diff --git a/docs/en/getting-started/example-datasets/opensky.md b/docs/en/getting-started/example-datasets/opensky.md index b38021c34eb..c37e13e381a 100644 --- a/docs/en/getting-started/example-datasets/opensky.md +++ b/docs/en/getting-started/example-datasets/opensky.md @@ -1,4 +1,5 @@ --- +slug: /en/getting-started/example-datasets/opensky sidebar_label: Air Traffic Data description: The data in this dataset is derived and cleaned from the full OpenSky dataset to illustrate the development of air traffic during the COVID-19 pandemic. --- diff --git a/docs/en/getting-started/example-datasets/recipes.md b/docs/en/getting-started/example-datasets/recipes.md index 37a6eeebea5..0945c97f9ea 100644 --- a/docs/en/getting-started/example-datasets/recipes.md +++ b/docs/en/getting-started/example-datasets/recipes.md @@ -1,4 +1,5 @@ --- +slug: /en/getting-started/example-datasets/recipes sidebar_label: Recipes Dataset --- diff --git a/docs/en/getting-started/example-datasets/star-schema.md b/docs/en/getting-started/example-datasets/star-schema.md index 4756aedd08c..b3ff9b8c58f 100644 --- a/docs/en/getting-started/example-datasets/star-schema.md +++ b/docs/en/getting-started/example-datasets/star-schema.md @@ -1,4 +1,5 @@ --- +slug: /en/getting-started/example-datasets/star-schema sidebar_label: Star Schema Benchmark description: "Dataset based on the TPC-H dbgen source. The coding style and architecture follows the TPCH dbgen." diff --git a/docs/en/getting-started/example-datasets/uk-price-paid.md b/docs/en/getting-started/example-datasets/uk-price-paid.md index 04e853275ed..27546120fef 100644 --- a/docs/en/getting-started/example-datasets/uk-price-paid.md +++ b/docs/en/getting-started/example-datasets/uk-price-paid.md @@ -1,4 +1,5 @@ --- +slug: /en/getting-started/example-datasets/uk-price-paid sidebar_label: UK Property Price Paid sidebar_position: 1 --- diff --git a/docs/en/getting-started/example-datasets/wikistat.md b/docs/en/getting-started/example-datasets/wikistat.md index 1185338a1da..36feb21cd3b 100644 --- a/docs/en/getting-started/example-datasets/wikistat.md +++ b/docs/en/getting-started/example-datasets/wikistat.md @@ -1,4 +1,5 @@ --- +slug: /en/getting-started/example-datasets/wikistat sidebar_label: WikiStat --- diff --git a/docs/en/getting-started/install.md b/docs/en/getting-started/install.md index a5e6495d8d8..0bdb956f0cb 100644 --- a/docs/en/getting-started/install.md +++ b/docs/en/getting-started/install.md @@ -140,8 +140,6 @@ do || curl -fO "https://packages.clickhouse.com/tgz/stable/$PKG-$LATEST_VERSION.tgz" done -exit 0 - tar -xzvf "clickhouse-common-static-$LATEST_VERSION-${ARCH}.tgz" \ || tar -xzvf "clickhouse-common-static-$LATEST_VERSION.tgz" sudo "clickhouse-common-static-$LATEST_VERSION/install/doinst.sh" @@ -152,7 +150,7 @@ sudo "clickhouse-common-static-dbg-$LATEST_VERSION/install/doinst.sh" tar -xzvf "clickhouse-server-$LATEST_VERSION-${ARCH}.tgz" \ || tar -xzvf "clickhouse-server-$LATEST_VERSION.tgz" -sudo "clickhouse-server-$LATEST_VERSION/install/doinst.sh" +sudo "clickhouse-server-$LATEST_VERSION/install/doinst.sh" configure sudo /etc/init.d/clickhouse-server start tar -xzvf "clickhouse-client-$LATEST_VERSION-${ARCH}.tgz" \ diff --git a/docs/en/interfaces/cli.md b/docs/en/interfaces/cli.md index 4fbeb5088ee..1f45d1fa411 100644 --- a/docs/en/interfaces/cli.md +++ b/docs/en/interfaces/cli.md @@ -1,4 +1,5 @@ --- +slug: /en/interfaces/cli sidebar_position: 17 sidebar_label: Command-Line Client --- diff --git a/docs/en/interfaces/cpp.md b/docs/en/interfaces/cpp.md index 364d77c21a4..850fdcceaa5 100644 --- a/docs/en/interfaces/cpp.md +++ b/docs/en/interfaces/cpp.md @@ -1,4 +1,5 @@ --- +slug: /en/interfaces/cpp sidebar_position: 24 sidebar_label: C++ Client Library --- diff --git a/docs/en/interfaces/formats.md b/docs/en/interfaces/formats.md index 10a311d3aec..640c49377d0 100644 --- a/docs/en/interfaces/formats.md +++ b/docs/en/interfaces/formats.md @@ -1,4 +1,5 @@ --- +slug: /en/interfaces/formats sidebar_position: 21 sidebar_label: Input and Output Formats --- diff --git a/docs/en/interfaces/grpc.md b/docs/en/interfaces/grpc.md index d5590d1cfb1..5ac2f5d5a60 100644 --- a/docs/en/interfaces/grpc.md +++ b/docs/en/interfaces/grpc.md @@ -1,4 +1,5 @@ --- +slug: /en/interfaces/grpc sidebar_position: 19 sidebar_label: gRPC Interface --- diff --git a/docs/en/interfaces/http.md b/docs/en/interfaces/http.md index b1de6c2a105..036fcde6d7a 100644 --- a/docs/en/interfaces/http.md +++ b/docs/en/interfaces/http.md @@ -1,4 +1,5 @@ --- +slug: /en/interfaces/http sidebar_position: 19 sidebar_label: HTTP Interface --- diff --git a/docs/en/interfaces/jdbc.md b/docs/en/interfaces/jdbc.md index c508b540eaf..339c55e0848 100644 --- a/docs/en/interfaces/jdbc.md +++ b/docs/en/interfaces/jdbc.md @@ -1,4 +1,5 @@ --- +slug: /en/interfaces/jdbc sidebar_position: 22 sidebar_label: JDBC Driver --- diff --git a/docs/en/interfaces/mysql.md b/docs/en/interfaces/mysql.md index fbaa49a66aa..9eb34a2bf17 100644 --- a/docs/en/interfaces/mysql.md +++ b/docs/en/interfaces/mysql.md @@ -1,4 +1,5 @@ --- +slug: /en/interfaces/mysql sidebar_position: 20 sidebar_label: MySQL Interface --- diff --git a/docs/en/interfaces/odbc.md b/docs/en/interfaces/odbc.md index 48410fcdbad..3f311ea3f35 100644 --- a/docs/en/interfaces/odbc.md +++ b/docs/en/interfaces/odbc.md @@ -1,4 +1,5 @@ --- +slug: /en/interfaces/odbc sidebar_position: 23 sidebar_label: ODBC Driver --- diff --git a/docs/en/interfaces/overview.md b/docs/en/interfaces/overview.md index 0c7378bf075..1982f793a6d 100644 --- a/docs/en/interfaces/overview.md +++ b/docs/en/interfaces/overview.md @@ -1,4 +1,5 @@ --- +slug: /en/interfaces/overview sidebar_label: Overview sidebar_position: 1 keywords: [clickhouse, network, interfaces, http, tcp, grpc, command-line, client, jdbc, odbc, driver] diff --git a/docs/en/interfaces/postgresql.md b/docs/en/interfaces/postgresql.md index d62b1e7dfee..9ff83559787 100644 --- a/docs/en/interfaces/postgresql.md +++ b/docs/en/interfaces/postgresql.md @@ -1,4 +1,5 @@ --- +slug: /en/interfaces/postgresql sidebar_position: 20 sidebar_label: PostgreSQL Interface --- diff --git a/docs/en/interfaces/tcp.md b/docs/en/interfaces/tcp.md index 16189f11a12..614dc587b52 100644 --- a/docs/en/interfaces/tcp.md +++ b/docs/en/interfaces/tcp.md @@ -1,4 +1,5 @@ --- +slug: /en/interfaces/tcp sidebar_position: 18 sidebar_label: Native Interface (TCP) --- diff --git a/docs/en/interfaces/third-party/client-libraries.md b/docs/en/interfaces/third-party/client-libraries.md index 8067b18cc35..e085566aa7e 100644 --- a/docs/en/interfaces/third-party/client-libraries.md +++ b/docs/en/interfaces/third-party/client-libraries.md @@ -1,4 +1,5 @@ --- +slug: /en/interfaces/third-party/client-libraries sidebar_position: 26 sidebar_label: Client Libraries --- @@ -31,6 +32,7 @@ ClickHouse Inc does **not** maintain the libraries listed below and hasn’t don - [chconn](https://github.com/vahid-sohrabloo/chconn) - [mailrugo-clickhouse](https://github.com/mailru/go-clickhouse) - [golang-clickhouse](https://github.com/leprosus/golang-clickhouse) + - [uptrace/go-clickhouse](https://clickhouse.uptrace.dev/) - Swift - [ClickHouseNIO](https://github.com/patrick-zippenfenig/ClickHouseNIO) - [ClickHouseVapor ORM](https://github.com/patrick-zippenfenig/ClickHouseVapor) diff --git a/docs/en/interfaces/third-party/gui.md b/docs/en/interfaces/third-party/gui.md index 7bcc8832da2..dd1c278d7e6 100644 --- a/docs/en/interfaces/third-party/gui.md +++ b/docs/en/interfaces/third-party/gui.md @@ -1,4 +1,5 @@ --- +slug: /en/interfaces/third-party/gui sidebar_position: 28 sidebar_label: Visual Interfaces --- @@ -196,6 +197,18 @@ Features: The client is available for instant usage through github pages: https://metrico.github.io/clickhouse-mate/ +### Uptrace {#uptrace} + +[Uptrace](https://github.com/uptrace/uptrace) is an APM tool that provides distributed tracing and metrics powered by OpenTelemetry and ClickHouse. + +Features: + +- [OpenTelemetry tracing](https://uptrace.dev/opentelemetry/distributed-tracing.html), metrics, and logs. +- Email/Slack/PagerDuty notifications using AlertManager. +- SQL-like query language to aggregate spans. +- Promql-like language to query metrics. +- Pre-built metrics dashboards. +- Multiple users/projects via YAML config. ## Commercial {#commercial} diff --git a/docs/en/interfaces/third-party/index.md b/docs/en/interfaces/third-party/index.md index f2d2f39f7f8..ad5ed0650a5 100644 --- a/docs/en/interfaces/third-party/index.md +++ b/docs/en/interfaces/third-party/index.md @@ -1,4 +1,5 @@ --- +slug: /en/interfaces/third-party/ toc_folder_title: Third-Party sidebar_position: 24 --- diff --git a/docs/en/interfaces/third-party/integrations.md b/docs/en/interfaces/third-party/integrations.md index 6708cd103bc..de496546cb4 100644 --- a/docs/en/interfaces/third-party/integrations.md +++ b/docs/en/interfaces/third-party/integrations.md @@ -1,4 +1,5 @@ --- +slug: /en/interfaces/third-party/integrations sidebar_position: 27 sidebar_label: Integrations --- diff --git a/docs/en/interfaces/third-party/proxy.md b/docs/en/interfaces/third-party/proxy.md index 2e395355c7d..0919d8dcb42 100644 --- a/docs/en/interfaces/third-party/proxy.md +++ b/docs/en/interfaces/third-party/proxy.md @@ -1,4 +1,5 @@ --- +slug: /en/interfaces/third-party/proxy sidebar_position: 29 sidebar_label: Proxies --- diff --git a/docs/en/operations/access-rights.md b/docs/en/operations/access-rights.md index a431f10fbad..1919aa49ab9 100644 --- a/docs/en/operations/access-rights.md +++ b/docs/en/operations/access-rights.md @@ -1,4 +1,5 @@ --- +slug: /en/operations/access-rights sidebar_position: 48 sidebar_label: Access Control and Account Management title: Access Control and Account Management diff --git a/docs/en/operations/backup.md b/docs/en/operations/backup.md index 76f63db9c2e..4cf205a88fa 100644 --- a/docs/en/operations/backup.md +++ b/docs/en/operations/backup.md @@ -1,4 +1,5 @@ --- +slug: /en/operations/backup sidebar_position: 49 sidebar_label: Data Backup --- diff --git a/docs/en/operations/caches.md b/docs/en/operations/caches.md index f2eb1e3ce5c..57aa8691651 100644 --- a/docs/en/operations/caches.md +++ b/docs/en/operations/caches.md @@ -1,4 +1,5 @@ --- +slug: /en/operations/caches sidebar_position: 65 sidebar_label: Caches --- diff --git a/docs/en/operations/clickhouse-keeper.md b/docs/en/operations/clickhouse-keeper.md index 0950568cc82..8bf64bca28f 100644 --- a/docs/en/operations/clickhouse-keeper.md +++ b/docs/en/operations/clickhouse-keeper.md @@ -1,4 +1,5 @@ --- +slug: /en/operations/clickhouse-keeper sidebar_position: 66 sidebar_label: ClickHouse Keeper --- diff --git a/docs/en/operations/configuration-files.md b/docs/en/operations/configuration-files.md index b3e3ece74b0..439881a2f3e 100644 --- a/docs/en/operations/configuration-files.md +++ b/docs/en/operations/configuration-files.md @@ -1,4 +1,5 @@ --- +slug: /en/operations/configuration-files sidebar_position: 50 sidebar_label: Configuration Files --- diff --git a/docs/en/operations/external-authenticators/index.md b/docs/en/operations/external-authenticators/index.md index d358267c4f0..7759701e5cb 100644 --- a/docs/en/operations/external-authenticators/index.md +++ b/docs/en/operations/external-authenticators/index.md @@ -1,4 +1,5 @@ --- +slug: /en/operations/external-authenticators/ sidebar_position: 48 sidebar_label: External User Authenticators and Directories --- diff --git a/docs/en/operations/external-authenticators/kerberos.md b/docs/en/operations/external-authenticators/kerberos.md index 8b47ec2c809..689c3f66e04 100644 --- a/docs/en/operations/external-authenticators/kerberos.md +++ b/docs/en/operations/external-authenticators/kerberos.md @@ -1,3 +1,6 @@ +--- +slug: /en/operations/external-authenticators/kerberos +--- # Kerberos Existing and properly configured ClickHouse users can be authenticated via Kerberos authentication protocol. diff --git a/docs/en/operations/external-authenticators/ldap.md b/docs/en/operations/external-authenticators/ldap.md index 0c79e2438ff..cdc8cfaf5b0 100644 --- a/docs/en/operations/external-authenticators/ldap.md +++ b/docs/en/operations/external-authenticators/ldap.md @@ -1,3 +1,6 @@ +--- +slug: /en/operations/external-authenticators/ldap +--- # LDAP LDAP server can be used to authenticate ClickHouse users. There are two different approaches for doing this: diff --git a/docs/en/operations/external-authenticators/ssl-x509.md b/docs/en/operations/external-authenticators/ssl-x509.md index c0d83005a7e..6482bb2bd12 100644 --- a/docs/en/operations/external-authenticators/ssl-x509.md +++ b/docs/en/operations/external-authenticators/ssl-x509.md @@ -1,3 +1,6 @@ +--- +slug: /en/operations/external-authenticators/ssl-x509 +--- # SSL X.509 certificate authentication [SSL 'strict' option](../server-configuration-parameters/settings.md#server_configuration_parameters-openssl) enables mandatory certificate validation for the incoming connections. In this case, only connections with trusted certificates can be established. Connections with untrusted certificates will be rejected. Thus, certificate validation allows to uniquely authenticate an incoming connection. `Common Name` field of the certificate is used to identify connected user. This allows to associate multiple certificates with the same user. Additionally, reissuing and revoking of the certificates does not affect the ClickHouse configuration. diff --git a/docs/en/operations/monitoring.md b/docs/en/operations/monitoring.md index 8a5358b146c..8c08080e331 100644 --- a/docs/en/operations/monitoring.md +++ b/docs/en/operations/monitoring.md @@ -1,4 +1,5 @@ --- +slug: /en/operations/monitoring sidebar_position: 45 sidebar_label: Monitoring --- diff --git a/docs/en/operations/named-collections.md b/docs/en/operations/named-collections.md index 7623f7b7203..f605045a0ad 100644 --- a/docs/en/operations/named-collections.md +++ b/docs/en/operations/named-collections.md @@ -1,6 +1,7 @@ --- +slug: /en/operations/named-collections sidebar_position: 69 -sidebar_label: "Named connections" +sidebar_label: "Named collections" --- # Storing details for connecting to external sources in configuration files @@ -12,7 +13,7 @@ from users with only SQL access. Parameters can be set in XML `CSV` and overridden in SQL `, format = 'TSV'`. The parameters in SQL can be overridden using format `key` = `value`: `compression_method = 'gzip'`. -Named connections are stored in the `config.xml` file of the ClickHouse server in the `` section and are applied when ClickHouse starts. +Named collections are stored in the `config.xml` file of the ClickHouse server in the `` section and are applied when ClickHouse starts. Example of configuration: ```xml @@ -24,7 +25,7 @@ $ cat /etc/clickhouse-server/config.d/named_collections.xml ``` -## Named connections for accessing S3. +## Named collections for accessing S3. The description of parameters see [s3 Table Function](../sql-reference/table-functions/s3.md). @@ -42,7 +43,7 @@ Example of configuration: ``` -### Example of using named connections with the s3 function +### Example of using named collections with the s3 function ```sql INSERT INTO FUNCTION s3(s3_mydata, filename = 'test_file.tsv.gz', @@ -58,7 +59,7 @@ FROM s3(s3_mydata, filename = 'test_file.tsv.gz') 1 rows in set. Elapsed: 0.279 sec. Processed 10.00 thousand rows, 90.00 KB (35.78 thousand rows/s., 322.02 KB/s.) ``` -### Example of using named connections with an S3 table +### Example of using named collections with an S3 table ```sql CREATE TABLE s3_engine_table (number Int64) @@ -73,7 +74,7 @@ SELECT * FROM s3_engine_table LIMIT 3; └────────┘ ``` -## Named connections for accessing MySQL database +## Named collections for accessing MySQL database The description of parameters see [mysql](../sql-reference/table-functions/mysql.md). @@ -95,7 +96,7 @@ Example of configuration: ``` -### Example of using named connections with the mysql function +### Example of using named collections with the mysql function ```sql SELECT count() FROM mysql(mymysql, table = 'test'); @@ -105,7 +106,7 @@ SELECT count() FROM mysql(mymysql, table = 'test'); └─────────┘ ``` -### Example of using named connections with an MySQL table +### Example of using named collections with an MySQL table ```sql CREATE TABLE mytable(A Int64) ENGINE = MySQL(mymysql, table = 'test', connection_pool_size=3, replace_query=0); @@ -116,7 +117,7 @@ SELECT count() FROM mytable; └─────────┘ ``` -### Example of using named connections with database with engine MySQL +### Example of using named collections with database with engine MySQL ```sql CREATE DATABASE mydatabase ENGINE = MySQL(mymysql); @@ -129,7 +130,7 @@ SHOW TABLES FROM mydatabase; └────────┘ ``` -### Example of using named connections with an external dictionary with source MySQL +### Example of using named collections with an external dictionary with source MySQL ```sql CREATE DICTIONARY dict (A Int64, B String) @@ -145,7 +146,7 @@ SELECT dictGet('dict', 'B', 2); └─────────────────────────┘ ``` -## Named connections for accessing PostgreSQL database +## Named collections for accessing PostgreSQL database The description of parameters see [postgresql](../sql-reference/table-functions/postgresql.md). @@ -166,7 +167,7 @@ Example of configuration: ``` -### Example of using named connections with the postgresql function +### Example of using named collections with the postgresql function ```sql SELECT * FROM postgresql(mypg, table = 'test'); @@ -186,8 +187,7 @@ SELECT * FROM postgresql(mypg, table = 'test', schema = 'public'); └───┘ ``` - -### Example of using named connections with database with engine PostgreSQL +### Example of using named collections with database with engine PostgreSQL ```sql CREATE TABLE mypgtable (a Int64) ENGINE = PostgreSQL(mypg, table = 'test', schema = 'public'); @@ -201,7 +201,7 @@ SELECT * FROM mypgtable; └───┘ ``` -### Example of using named connections with database with engine PostgreSQL +### Example of using named collections with database with engine PostgreSQL ```sql CREATE DATABASE mydatabase ENGINE = PostgreSQL(mypg); @@ -213,7 +213,7 @@ SHOW TABLES FROM mydatabase └──────┘ ``` -### Example of using named connections with an external dictionary with source POSTGRESQL +### Example of using named collections with an external dictionary with source POSTGRESQL ```sql CREATE DICTIONARY dict (a Int64, b String) @@ -228,3 +228,59 @@ SELECT dictGet('dict', 'b', 2); │ two │ └─────────────────────────┘ ``` + +## Named collections for accessing remote ClickHouse database + +The description of parameters see [remote](../sql-reference/table-functions/remote.md/#parameters). + +Example of configuration: + +```xml + + + + localhost + 9000 + system + foo + secret + + + +``` + +### Example of using named collections with the `remote`/`remoteSecure` functions + +```sql +SELECT * FROM remote(remote1, table = one); +┌─dummy─┐ +│ 0 │ +└───────┘ + +SELECT * FROM remote(remote1, database = merge(system, '^one')); +┌─dummy─┐ +│ 0 │ +└───────┘ + +INSERT INTO FUNCTION remote(remote1, database = default, table = test) VALUES (1,'a'); + +SELECT * FROM remote(remote1, database = default, table = test); +┌─a─┬─b─┐ +│ 1 │ a │ +└───┴───┘ +``` + +### Example of using named collections with an external dictionary with source ClickHouse + +```sql +CREATE DICTIONARY dict(a Int64, b String) +PRIMARY KEY a +SOURCE(CLICKHOUSE(NAME remote1 TABLE test DB default)) +LIFETIME(MIN 1 MAX 2) +LAYOUT(HASHED()); + +SELECT dictGet('dict', 'b', 1); +┌─dictGet('dict', 'b', 1)─┐ +│ a │ +└─────────────────────────┘ +``` diff --git a/docs/en/operations/opentelemetry.md b/docs/en/operations/opentelemetry.md index 740537d88bc..e51fad01ce2 100644 --- a/docs/en/operations/opentelemetry.md +++ b/docs/en/operations/opentelemetry.md @@ -1,4 +1,5 @@ --- +slug: /en/operations/opentelemetry sidebar_position: 62 sidebar_label: OpenTelemetry Support --- diff --git a/docs/en/operations/optimizing-performance/index.md b/docs/en/operations/optimizing-performance/index.md index cde1ca9614b..e25f3b4adb7 100644 --- a/docs/en/operations/optimizing-performance/index.md +++ b/docs/en/operations/optimizing-performance/index.md @@ -1,4 +1,5 @@ --- +slug: /en/operations/optimizing-performance/ sidebar_label: Optimizing Performance sidebar_position: 52 --- diff --git a/docs/en/operations/optimizing-performance/sampling-query-profiler.md b/docs/en/operations/optimizing-performance/sampling-query-profiler.md index 83bab2a3204..0178d5bcfa9 100644 --- a/docs/en/operations/optimizing-performance/sampling-query-profiler.md +++ b/docs/en/operations/optimizing-performance/sampling-query-profiler.md @@ -1,4 +1,5 @@ --- +slug: /en/operations/optimizing-performance/sampling-query-profiler sidebar_position: 54 sidebar_label: Query Profiling --- diff --git a/docs/en/operations/performance-test.md b/docs/en/operations/performance-test.md index 09c5f5b023e..6e185c121de 100644 --- a/docs/en/operations/performance-test.md +++ b/docs/en/operations/performance-test.md @@ -1,10 +1,10 @@ --- +slug: /en/operations/performance-test sidebar_position: 54 sidebar_label: Testing Hardware +title: "How to Test Your Hardware with ClickHouse" --- -# How to Test Your Hardware with ClickHouse - You can run a basic ClickHouse performance test on any server without installation of ClickHouse packages. diff --git a/docs/en/operations/quotas.md b/docs/en/operations/quotas.md index 2aa1c09386d..f35bf44fcd0 100644 --- a/docs/en/operations/quotas.md +++ b/docs/en/operations/quotas.md @@ -1,4 +1,5 @@ --- +slug: /en/operations/quotas sidebar_position: 51 sidebar_label: Quotas --- diff --git a/docs/en/operations/requirements.md b/docs/en/operations/requirements.md index 753b4ee2b94..dc05a7b4896 100644 --- a/docs/en/operations/requirements.md +++ b/docs/en/operations/requirements.md @@ -1,4 +1,5 @@ --- +slug: /en/operations/requirements sidebar_position: 44 sidebar_label: Requirements --- diff --git a/docs/en/operations/server-configuration-parameters/index.md b/docs/en/operations/server-configuration-parameters/index.md index 1e4ddc6368e..0a6b1953a62 100644 --- a/docs/en/operations/server-configuration-parameters/index.md +++ b/docs/en/operations/server-configuration-parameters/index.md @@ -1,4 +1,5 @@ --- +slug: /en/operations/server-configuration-parameters/ sidebar_position: 54 sidebar_label: Server Configuration Parameters --- diff --git a/docs/en/operations/server-configuration-parameters/settings.md b/docs/en/operations/server-configuration-parameters/settings.md index 936a20c5e9c..24e08fe1fcd 100644 --- a/docs/en/operations/server-configuration-parameters/settings.md +++ b/docs/en/operations/server-configuration-parameters/settings.md @@ -1,4 +1,5 @@ --- +slug: /en/operations/server-configuration-parameters/settings sidebar_position: 57 sidebar_label: Server Settings --- @@ -441,6 +442,8 @@ For more information, see the section “[Configuration files](../../operations/ ## interserver_listen_host {#interserver-listen-host} Restriction on hosts that can exchange data between ClickHouse servers. +If Keeper is used, the same restriction will be applied to the communication +between different Keeper instances. The default value equals to `listen_host` setting. Examples: @@ -743,13 +746,24 @@ On hosts with low RAM and swap, you possibly need setting `max_server_memory_usa - [max_server_memory_usage](#max_server_memory_usage) -## concurrent_threads_soft_limit {#concurrent_threads_soft_limit} -The maximum number of query processing threads, excluding threads for retrieving data from remote servers, allowed to run all queries. This is not a hard limit. In case if the limit is reached the query will still get one thread to run. +## concurrent_threads_soft_limit_num {#concurrent_threads_soft_limit_num} +The maximum number of query processing threads, excluding threads for retrieving data from remote servers, allowed to run all queries. This is not a hard limit. In case if the limit is reached the query will still get at least one thread to run. Query can upscale to desired number of threads during execution if more threads become available. Possible values: + +- Positive integer. +- 0 — No limit. + +Default value: `0`. + +## concurrent_threads_soft_limit_ratio_to_cores {#concurrent_threads_soft_limit_ratio_to_cores} +The maximum number of query processing threads as multiple of number of logical cores. +More details: [concurrent_threads_soft_limit_num](#concurrent-threads-soft-limit-num). + +Possible values: + - Positive integer. - 0 — No limit. -- -1 — The parameter is initialized by number of logical cores multiplies by 3. Which is a good heuristic for CPU-bound tasks. Default value: `0`. diff --git a/docs/en/operations/settings/constraints-on-settings.md b/docs/en/operations/settings/constraints-on-settings.md index d240fde8ff3..4bef197b6cb 100644 --- a/docs/en/operations/settings/constraints-on-settings.md +++ b/docs/en/operations/settings/constraints-on-settings.md @@ -1,4 +1,5 @@ --- +slug: /en/operations/settings/constraints-on-settings sidebar_position: 62 sidebar_label: Constraints on Settings --- diff --git a/docs/en/operations/settings/memory-overcommit.md b/docs/en/operations/settings/memory-overcommit.md index 74cbc4dbd03..43a7784e1ed 100644 --- a/docs/en/operations/settings/memory-overcommit.md +++ b/docs/en/operations/settings/memory-overcommit.md @@ -1,3 +1,6 @@ +--- +slug: /en/operations/settings/memory-overcommit +--- # Memory overcommit Memory overcommit is an experimental technique intended to allow to set more flexible memory limits for queries. diff --git a/docs/en/operations/settings/merge-tree-settings.md b/docs/en/operations/settings/merge-tree-settings.md index e72314d84a5..0c33cdb7d43 100644 --- a/docs/en/operations/settings/merge-tree-settings.md +++ b/docs/en/operations/settings/merge-tree-settings.md @@ -1,3 +1,6 @@ +--- +slug: /en/operations/settings/merge-tree-settings +--- # MergeTree tables settings The values of `merge_tree` settings (for all MergeTree tables) can be viewed in the table `system.merge_tree_settings`, they can be overridden in `config.xml` in the `merge_tree` section, or set in the `SETTINGS` section of each table. @@ -135,7 +138,7 @@ Possible values: Default value: 100. -The `Insert` command creates one or more blocks (parts). For [insert deduplication](../../engines/table-engines/mergetree-family/replication/), when writing into replicated tables, ClickHouse writes the hash sums of the created parts into ClickHouse Keeper. Hash sums are stored only for the most recent `replicated_deduplication_window` blocks. The oldest hash sums are removed from ClickHouse Keeper. +The `Insert` command creates one or more blocks (parts). For [insert deduplication](../../engines/table-engines/mergetree-family/replication.md), when writing into replicated tables, ClickHouse writes the hash sums of the created parts into ClickHouse Keeper. Hash sums are stored only for the most recent `replicated_deduplication_window` blocks. The oldest hash sums are removed from ClickHouse Keeper. A large number of `replicated_deduplication_window` slows down `Inserts` because it needs to compare more entries. The hash sum is calculated from the composition of the field names and types and the data of the inserted part (stream of bytes). @@ -164,6 +167,8 @@ Default value: 604800 (1 week). Similar to [replicated_deduplication_window](#replicated-deduplication-window), `replicated_deduplication_window_seconds` specifies how long to store hash sums of blocks for insert deduplication. Hash sums older than `replicated_deduplication_window_seconds` are removed from ClickHouse Keeper, even if they are less than ` replicated_deduplication_window`. +The time is relative to the time of the most recent record, not to the wall time. If it's the only record it will be stored forever. + ## max_replicated_logs_to_keep How many records may be in the ClickHouse Keeper log if there is inactive replica. An inactive replica becomes lost when when this number exceed. @@ -218,6 +223,10 @@ Default value: 0 (seconds) When this setting has a value greater than than zero only a single replica starts the merge immediately if merged part on shared storage and `allow_remote_fs_zero_copy_replication` is enabled. +:::warning Zero-copy replication is not ready for production +Zero-copy replication is disabled by default in ClickHouse version 22.8 and higher. This feature is not recommended for production use. +::: + Possible values: - Any positive integer. diff --git a/docs/en/operations/settings/permissions-for-queries.md b/docs/en/operations/settings/permissions-for-queries.md index 668cb9993eb..c183b159423 100644 --- a/docs/en/operations/settings/permissions-for-queries.md +++ b/docs/en/operations/settings/permissions-for-queries.md @@ -1,4 +1,5 @@ --- +slug: /en/operations/settings/permissions-for-queries sidebar_position: 58 sidebar_label: Permissions for Queries --- diff --git a/docs/en/operations/settings/query-complexity.md b/docs/en/operations/settings/query-complexity.md index 342c8002572..597d524dd3f 100644 --- a/docs/en/operations/settings/query-complexity.md +++ b/docs/en/operations/settings/query-complexity.md @@ -1,4 +1,5 @@ --- +slug: /en/operations/settings/query-complexity sidebar_position: 59 sidebar_label: Restrictions on Query Complexity --- diff --git a/docs/en/operations/settings/settings-profiles.md b/docs/en/operations/settings/settings-profiles.md index ea6c88a0f86..140c2bcb983 100644 --- a/docs/en/operations/settings/settings-profiles.md +++ b/docs/en/operations/settings/settings-profiles.md @@ -1,4 +1,5 @@ --- +slug: /en/operations/settings/settings-profiles sidebar_position: 61 sidebar_label: Settings Profiles --- diff --git a/docs/en/operations/settings/settings-users.md b/docs/en/operations/settings/settings-users.md index 101ad46e55a..753eef1fb42 100644 --- a/docs/en/operations/settings/settings-users.md +++ b/docs/en/operations/settings/settings-users.md @@ -1,4 +1,5 @@ --- +slug: /en/operations/settings/settings-users sidebar_position: 63 sidebar_label: User Settings --- diff --git a/docs/en/operations/settings/settings.md b/docs/en/operations/settings/settings.md index ddd8d2846f7..b4d44547328 100644 --- a/docs/en/operations/settings/settings.md +++ b/docs/en/operations/settings/settings.md @@ -966,7 +966,13 @@ Default value: 5000. See also: -- [Apache Kafka](https://kafka.apache.org/) +- [Apache Kafka](https://kafka.apache.org/) + +## kafka_disable_num_consumers_limit {#kafka-disable-num-consumers-limit} + +Disable limit on kafka_num_consumers that depends on the number of available CPU cores. + +Default value: false. ## use_uncompressed_cache {#setting-use_uncompressed_cache} @@ -1253,7 +1259,9 @@ Possible values: Default value: 1. -By default, blocks inserted into replicated tables by the `INSERT` statement are deduplicated (see [Data Replication](../../engines/table-engines/mergetree-family/replication.md)). +By default, blocks inserted into replicated tables by the `INSERT` statement are deduplicated (see [Data Replication](../../engines/table-engines/mergetree-family/replication.md)). +For the replicated tables by default the only 100 of the most recent blocks for each partition are deduplicated (see [replicated_deduplication_window](merge-tree-settings.md#replicated-deduplication-window), [replicated_deduplication_window_seconds](merge-tree-settings.md/#replicated-deduplication-window-seconds)). +For not replicated tables see [non_replicated_deduplication_window](merge-tree-settings.md/#non-replicated-deduplication-window). ## deduplicate_blocks_in_dependent_materialized_views {#settings-deduplicate-blocks-in-dependent-materialized-views} @@ -1287,6 +1295,9 @@ Default value: empty string (disabled) `insert_deduplication_token` is used for deduplication _only_ when not empty. +For the replicated tables by default the only 100 of the most recent inserts for each partition are deduplicated (see [replicated_deduplication_window](merge-tree-settings.md#replicated-deduplication-window), [replicated_deduplication_window_seconds](merge-tree-settings.md/#replicated-deduplication-window-seconds)). +For not replicated tables see [non_replicated_deduplication_window](merge-tree-settings.md/#non-replicated-deduplication-window). + Example: ```sql @@ -3237,7 +3248,7 @@ Possible values: - Positive integer. - 0 — Asynchronous insertions are disabled. -Default value: `1000000`. +Default value: `100000`. ## async_insert_busy_timeout_ms {#async-insert-busy-timeout-ms} diff --git a/docs/en/operations/ssl-zookeeper.md b/docs/en/operations/ssl-zookeeper.md index 79c65853d34..a38e9f81b41 100644 --- a/docs/en/operations/ssl-zookeeper.md +++ b/docs/en/operations/ssl-zookeeper.md @@ -1,4 +1,5 @@ --- +slug: /en/operations/ssl-zookeeper sidebar_position: 45 sidebar_label: Secured Communication with Zookeeper --- diff --git a/docs/en/operations/storing-data.md b/docs/en/operations/storing-data.md index 6afd5d4b726..fb30d36b1dc 100644 --- a/docs/en/operations/storing-data.md +++ b/docs/en/operations/storing-data.md @@ -1,4 +1,5 @@ --- +slug: /en/operations/storing-data sidebar_position: 68 sidebar_label: External Disks for Storing Data --- @@ -316,4 +317,8 @@ Use [http_max_single_read_retries](../operations/settings/settings.md#http-max-s ## Zero-copy Replication (not ready for production) {#zero-copy} -ClickHouse supports zero-copy replication for `S3` and `HDFS` disks, which means that if the data is stored remotely on several machines and needs to be synchronized, then only the metadata is replicated (paths to the data parts), but not the data itself. +Zero-copy replication is possible, but not recommended, with `S3` and `HDFS` disks. Zero-copy replication means that if the data is stored remotely on several machines and needs to be synchronized, then only the metadata is replicated (paths to the data parts), but not the data itself. + +:::warning Zero-copy replication is not ready for production +Zero-copy replication is disabled by default in ClickHouse version 22.8 and higher. This feature is not recommended for production use. +::: \ No newline at end of file diff --git a/docs/en/operations/system-tables/asynchronous_metric_log.md b/docs/en/operations/system-tables/asynchronous_metric_log.md index f40b1e500c2..d047061baa3 100644 --- a/docs/en/operations/system-tables/asynchronous_metric_log.md +++ b/docs/en/operations/system-tables/asynchronous_metric_log.md @@ -1,3 +1,6 @@ +--- +slug: /en/operations/system-tables/asynchronous_metric_log +--- # asynchronous_metric_log Contains the historical values for `system.asynchronous_metrics`, which are saved once per minute. Enabled by default. diff --git a/docs/en/operations/system-tables/asynchronous_metrics.md b/docs/en/operations/system-tables/asynchronous_metrics.md index 17ee1b27e3d..c452e7f7766 100644 --- a/docs/en/operations/system-tables/asynchronous_metrics.md +++ b/docs/en/operations/system-tables/asynchronous_metrics.md @@ -1,3 +1,6 @@ +--- +slug: /en/operations/system-tables/asynchronous_metrics +--- # asynchronous_metrics Contains metrics that are calculated periodically in the background. For example, the amount of RAM in use. diff --git a/docs/en/operations/system-tables/clusters.md b/docs/en/operations/system-tables/clusters.md index 9e086ef7808..2f958e977db 100644 --- a/docs/en/operations/system-tables/clusters.md +++ b/docs/en/operations/system-tables/clusters.md @@ -1,3 +1,6 @@ +--- +slug: /en/operations/system-tables/clusters +--- # clusters Contains information about clusters available in the config file and the servers in them. diff --git a/docs/en/operations/system-tables/columns.md b/docs/en/operations/system-tables/columns.md index 1945963352b..a2b26c3684c 100644 --- a/docs/en/operations/system-tables/columns.md +++ b/docs/en/operations/system-tables/columns.md @@ -1,3 +1,6 @@ +--- +slug: /en/operations/system-tables/columns +--- # columns Contains information about columns in all the tables. diff --git a/docs/en/operations/system-tables/contributors.md b/docs/en/operations/system-tables/contributors.md index 51a625bb844..8ad2577fc34 100644 --- a/docs/en/operations/system-tables/contributors.md +++ b/docs/en/operations/system-tables/contributors.md @@ -1,3 +1,6 @@ +--- +slug: /en/operations/system-tables/contributors +--- # contributors Contains information about contributors. The order is random at query execution time. diff --git a/docs/en/operations/system-tables/crash-log.md b/docs/en/operations/system-tables/crash-log.md index 670ae7bc080..0c0a4cd967d 100644 --- a/docs/en/operations/system-tables/crash-log.md +++ b/docs/en/operations/system-tables/crash-log.md @@ -1,3 +1,6 @@ +--- +slug: /en/operations/system-tables/crash-log +--- # crash_log Contains information about stack traces for fatal errors. The table does not exist in the database by default, it is created only when fatal errors occur. diff --git a/docs/en/operations/system-tables/current-roles.md b/docs/en/operations/system-tables/current-roles.md index af4559ce6f7..35e789188ef 100644 --- a/docs/en/operations/system-tables/current-roles.md +++ b/docs/en/operations/system-tables/current-roles.md @@ -1,3 +1,6 @@ +--- +slug: /en/operations/system-tables/current-roles +--- # current_roles Contains active roles of a current user. `SET ROLE` changes the contents of this table. diff --git a/docs/en/operations/system-tables/data_skipping_indices.md b/docs/en/operations/system-tables/data_skipping_indices.md index b3c7cbe2b23..338c6d02206 100644 --- a/docs/en/operations/system-tables/data_skipping_indices.md +++ b/docs/en/operations/system-tables/data_skipping_indices.md @@ -1,3 +1,6 @@ +--- +slug: /en/operations/system-tables/data_skipping_indices +--- # data_skipping_indices Contains information about existing data skipping indices in all the tables. diff --git a/docs/en/operations/system-tables/data_type_families.md b/docs/en/operations/system-tables/data_type_families.md index 0202ba78ffe..0581c893337 100644 --- a/docs/en/operations/system-tables/data_type_families.md +++ b/docs/en/operations/system-tables/data_type_families.md @@ -1,3 +1,6 @@ +--- +slug: /en/operations/system-tables/data_type_families +--- # data_type_families Contains information about supported [data types](../../sql-reference/data-types/index.md). diff --git a/docs/en/operations/system-tables/databases.md b/docs/en/operations/system-tables/databases.md index 6dbe02ca706..cd90c94c480 100644 --- a/docs/en/operations/system-tables/databases.md +++ b/docs/en/operations/system-tables/databases.md @@ -1,3 +1,6 @@ +--- +slug: /en/operations/system-tables/databases +--- # databases Contains information about the databases that are available to the current user. diff --git a/docs/en/operations/system-tables/detached_parts.md b/docs/en/operations/system-tables/detached_parts.md index 9c0717fb062..78ae382f396 100644 --- a/docs/en/operations/system-tables/detached_parts.md +++ b/docs/en/operations/system-tables/detached_parts.md @@ -1,3 +1,6 @@ +--- +slug: /en/operations/system-tables/detached_parts +--- # detached_parts Contains information about detached parts of [MergeTree](../../engines/table-engines/mergetree-family/mergetree.md) tables. The `reason` column specifies why the part was detached. diff --git a/docs/en/operations/system-tables/dictionaries.md b/docs/en/operations/system-tables/dictionaries.md index e0f2a7baa03..112e2cc2cdf 100644 --- a/docs/en/operations/system-tables/dictionaries.md +++ b/docs/en/operations/system-tables/dictionaries.md @@ -1,3 +1,6 @@ +--- +slug: /en/operations/system-tables/dictionaries +--- # dictionaries Contains information about [external dictionaries](../../sql-reference/dictionaries/external-dictionaries/external-dicts.md). diff --git a/docs/en/operations/system-tables/disks.md b/docs/en/operations/system-tables/disks.md index 0fe557bf985..1106562da53 100644 --- a/docs/en/operations/system-tables/disks.md +++ b/docs/en/operations/system-tables/disks.md @@ -1,3 +1,6 @@ +--- +slug: /en/operations/system-tables/disks +--- # disks Contains information about disks defined in the [server configuration](../../engines/table-engines/mergetree-family/mergetree.md#table_engine-mergetree-multiple-volumes_configure). diff --git a/docs/en/operations/system-tables/distributed_ddl_queue.md b/docs/en/operations/system-tables/distributed_ddl_queue.md index a35d4a2a5b7..ef04df9751b 100644 --- a/docs/en/operations/system-tables/distributed_ddl_queue.md +++ b/docs/en/operations/system-tables/distributed_ddl_queue.md @@ -1,3 +1,6 @@ +--- +slug: /en/operations/system-tables/distributed_ddl_queue +--- # distributed_ddl_queue Contains information about [distributed ddl queries (ON CLUSTER clause)](../../sql-reference/distributed-ddl.md) that were executed on a cluster. diff --git a/docs/en/operations/system-tables/distribution_queue.md b/docs/en/operations/system-tables/distribution_queue.md index 896491a458b..ac0798a933a 100644 --- a/docs/en/operations/system-tables/distribution_queue.md +++ b/docs/en/operations/system-tables/distribution_queue.md @@ -1,3 +1,6 @@ +--- +slug: /en/operations/system-tables/distribution_queue +--- # distribution_queue Contains information about local files that are in the queue to be sent to the shards. These local files contain new parts that are created by inserting new data into the Distributed table in asynchronous mode. diff --git a/docs/en/operations/system-tables/enabled-roles.md b/docs/en/operations/system-tables/enabled-roles.md index a1649df875a..16d8329bc9b 100644 --- a/docs/en/operations/system-tables/enabled-roles.md +++ b/docs/en/operations/system-tables/enabled-roles.md @@ -1,3 +1,6 @@ +--- +slug: /en/operations/system-tables/enabled-roles +--- # enabled_roles Contains all active roles at the moment, including current role of the current user and granted roles for current role. diff --git a/docs/en/operations/system-tables/errors.md b/docs/en/operations/system-tables/errors.md index 3e40e898a78..abd55d02aa2 100644 --- a/docs/en/operations/system-tables/errors.md +++ b/docs/en/operations/system-tables/errors.md @@ -1,3 +1,6 @@ +--- +slug: /en/operations/system-tables/errors +--- # errors Contains error codes with the number of times they have been triggered. diff --git a/docs/en/operations/system-tables/events.md b/docs/en/operations/system-tables/events.md index 4525733a775..9ce92681507 100644 --- a/docs/en/operations/system-tables/events.md +++ b/docs/en/operations/system-tables/events.md @@ -1,3 +1,6 @@ +--- +slug: /en/operations/system-tables/events +--- # events Contains information about the number of events that have occurred in the system. For example, in the table, you can find how many `SELECT` queries were processed since the ClickHouse server started. diff --git a/docs/en/operations/system-tables/functions.md b/docs/en/operations/system-tables/functions.md index 8dcad0b48a7..34553bed527 100644 --- a/docs/en/operations/system-tables/functions.md +++ b/docs/en/operations/system-tables/functions.md @@ -1,3 +1,6 @@ +--- +slug: /en/operations/system-tables/functions +--- # functions Contains information about normal and aggregate functions. diff --git a/docs/en/operations/system-tables/grants.md b/docs/en/operations/system-tables/grants.md index d7cbc4ea556..536a9a46e86 100644 --- a/docs/en/operations/system-tables/grants.md +++ b/docs/en/operations/system-tables/grants.md @@ -1,3 +1,6 @@ +--- +slug: /en/operations/system-tables/grants +--- # grants Privileges granted to ClickHouse user accounts. diff --git a/docs/en/operations/system-tables/graphite_retentions.md b/docs/en/operations/system-tables/graphite_retentions.md index 697e272e810..9edc958c6df 100644 --- a/docs/en/operations/system-tables/graphite_retentions.md +++ b/docs/en/operations/system-tables/graphite_retentions.md @@ -1,3 +1,6 @@ +--- +slug: /en/operations/system-tables/graphite_retentions +--- # graphite_retentions Contains information about parameters [graphite_rollup](../../operations/server-configuration-parameters/settings.md#server_configuration_parameters-graphite) which are used in tables with [\*GraphiteMergeTree](../../engines/table-engines/mergetree-family/graphitemergetree.md) engines. diff --git a/docs/en/operations/system-tables/index.md b/docs/en/operations/system-tables/index.md index e33c7bde1e5..e08a727a62a 100644 --- a/docs/en/operations/system-tables/index.md +++ b/docs/en/operations/system-tables/index.md @@ -1,4 +1,5 @@ --- +slug: /en/operations/system-tables/ sidebar_position: 52 sidebar_label: System Tables --- diff --git a/docs/en/operations/system-tables/information_schema.md b/docs/en/operations/system-tables/information_schema.md index 50c15dacd13..a573491282a 100644 --- a/docs/en/operations/system-tables/information_schema.md +++ b/docs/en/operations/system-tables/information_schema.md @@ -1,3 +1,6 @@ +--- +slug: /en/operations/system-tables/information_schema +--- # INFORMATION_SCHEMA `INFORMATION_SCHEMA` (`information_schema`) is a system database that contains views. Using these views, you can get information about the metadata of database objects. These views read data from the columns of the [system.columns](../../operations/system-tables/columns.md), [system.databases](../../operations/system-tables/databases.md) and [system.tables](../../operations/system-tables/tables.md) system tables. diff --git a/docs/en/operations/system-tables/licenses.md b/docs/en/operations/system-tables/licenses.md index fad6e16fd8a..7802cb9121b 100644 --- a/docs/en/operations/system-tables/licenses.md +++ b/docs/en/operations/system-tables/licenses.md @@ -1,3 +1,6 @@ +--- +slug: /en/operations/system-tables/licenses +--- # licenses Сontains licenses of third-party libraries that are located in the [contrib](https://github.com/ClickHouse/ClickHouse/tree/master/contrib) directory of ClickHouse sources. diff --git a/docs/en/operations/system-tables/merge_tree_settings.md b/docs/en/operations/system-tables/merge_tree_settings.md index 49c5b951352..06fe3b42f3c 100644 --- a/docs/en/operations/system-tables/merge_tree_settings.md +++ b/docs/en/operations/system-tables/merge_tree_settings.md @@ -1,4 +1,7 @@ -# merge_tree_settings +--- +slug: /en/operations/system-tables/merge_tree_settings +title: merge_tree_settings +--- Contains information about settings for `MergeTree` tables. diff --git a/docs/en/operations/system-tables/merges.md b/docs/en/operations/system-tables/merges.md index 829be3e3147..5670d682b21 100644 --- a/docs/en/operations/system-tables/merges.md +++ b/docs/en/operations/system-tables/merges.md @@ -1,3 +1,6 @@ +--- +slug: /en/operations/system-tables/merges +--- # merges Contains information about merges and part mutations currently in process for tables in the MergeTree family. diff --git a/docs/en/operations/system-tables/metric_log.md b/docs/en/operations/system-tables/metric_log.md index bb637d006d4..503b6a22b2b 100644 --- a/docs/en/operations/system-tables/metric_log.md +++ b/docs/en/operations/system-tables/metric_log.md @@ -1,3 +1,6 @@ +--- +slug: /en/operations/system-tables/metric_log +--- # metric_log Contains history of metrics values from tables `system.metrics` and `system.events`, periodically flushed to disk. diff --git a/docs/en/operations/system-tables/metrics.md b/docs/en/operations/system-tables/metrics.md index 66a56cf3618..f07578cd931 100644 --- a/docs/en/operations/system-tables/metrics.md +++ b/docs/en/operations/system-tables/metrics.md @@ -1,3 +1,6 @@ +--- +slug: /en/operations/system-tables/metrics +--- # metrics Contains metrics which can be calculated instantly, or have a current value. For example, the number of simultaneously processed queries or the current replica delay. This table is always up to date. diff --git a/docs/en/operations/system-tables/mutations.md b/docs/en/operations/system-tables/mutations.md index 57fa3684c34..45447f3644e 100644 --- a/docs/en/operations/system-tables/mutations.md +++ b/docs/en/operations/system-tables/mutations.md @@ -1,3 +1,6 @@ +--- +slug: /en/operations/system-tables/mutations +--- # mutations The table contains information about [mutations](../../sql-reference/statements/alter/index.md#mutations) of [MergeTree](../../engines/table-engines/mergetree-family/mergetree.md) tables and their progress. Each mutation command is represented by a single row. diff --git a/docs/en/operations/system-tables/numbers.md b/docs/en/operations/system-tables/numbers.md index 4b18e9177e0..4e8a74050b6 100644 --- a/docs/en/operations/system-tables/numbers.md +++ b/docs/en/operations/system-tables/numbers.md @@ -1,3 +1,6 @@ +--- +slug: /en/operations/system-tables/numbers +--- # numbers This table contains a single UInt64 column named `number` that contains almost all the natural numbers starting from zero. diff --git a/docs/en/operations/system-tables/numbers_mt.md b/docs/en/operations/system-tables/numbers_mt.md index d420186aec4..d5a89985007 100644 --- a/docs/en/operations/system-tables/numbers_mt.md +++ b/docs/en/operations/system-tables/numbers_mt.md @@ -1,3 +1,6 @@ +--- +slug: /en/operations/system-tables/numbers_mt +--- # numbers_mt The same as [system.numbers](../../operations/system-tables/numbers.md) but reads are parallelized. The numbers can be returned in any order. diff --git a/docs/en/operations/system-tables/one.md b/docs/en/operations/system-tables/one.md index 6a4991a5190..1667de25e01 100644 --- a/docs/en/operations/system-tables/one.md +++ b/docs/en/operations/system-tables/one.md @@ -1,3 +1,6 @@ +--- +slug: /en/operations/system-tables/one +--- # one This table contains a single row with a single `dummy` UInt8 column containing the value 0. diff --git a/docs/en/operations/system-tables/opentelemetry_span_log.md b/docs/en/operations/system-tables/opentelemetry_span_log.md index a9ca32ae030..71248447db2 100644 --- a/docs/en/operations/system-tables/opentelemetry_span_log.md +++ b/docs/en/operations/system-tables/opentelemetry_span_log.md @@ -1,3 +1,6 @@ +--- +slug: /en/operations/system-tables/opentelemetry_span_log +--- # opentelemetry_span_log Contains information about [trace spans](https://opentracing.io/docs/overview/spans/) for executed queries. diff --git a/docs/en/operations/system-tables/part_log.md b/docs/en/operations/system-tables/part_log.md index e1134f6baf6..d5689b10fdd 100644 --- a/docs/en/operations/system-tables/part_log.md +++ b/docs/en/operations/system-tables/part_log.md @@ -1,3 +1,6 @@ +--- +slug: /en/operations/system-tables/part_log +--- # part_log The `system.part_log` table is created only if the [part_log](../../operations/server-configuration-parameters/settings.md#server_configuration_parameters-part-log) server setting is specified. diff --git a/docs/en/operations/system-tables/parts.md b/docs/en/operations/system-tables/parts.md index 845c63e5626..f1d60896a2e 100644 --- a/docs/en/operations/system-tables/parts.md +++ b/docs/en/operations/system-tables/parts.md @@ -1,3 +1,6 @@ +--- +slug: /en/operations/system-tables/parts +--- # parts {#system_tables-parts} Contains information about parts of [MergeTree](../../engines/table-engines/mergetree-family/mergetree.md) tables. diff --git a/docs/en/operations/system-tables/parts_columns.md b/docs/en/operations/system-tables/parts_columns.md index 2f85b912f38..68757ddfbff 100644 --- a/docs/en/operations/system-tables/parts_columns.md +++ b/docs/en/operations/system-tables/parts_columns.md @@ -1,3 +1,6 @@ +--- +slug: /en/operations/system-tables/parts_columns +--- # parts_columns Contains information about parts and columns of [MergeTree](../../engines/table-engines/mergetree-family/mergetree.md) tables. diff --git a/docs/en/operations/system-tables/processes.md b/docs/en/operations/system-tables/processes.md index b808e801819..f505bb93430 100644 --- a/docs/en/operations/system-tables/processes.md +++ b/docs/en/operations/system-tables/processes.md @@ -1,3 +1,6 @@ +--- +slug: /en/operations/system-tables/processes +--- # processes This system table is used for implementing the `SHOW PROCESSLIST` query. diff --git a/docs/en/operations/system-tables/query_log.md b/docs/en/operations/system-tables/query_log.md index 80343a1cc2b..a04214f6488 100644 --- a/docs/en/operations/system-tables/query_log.md +++ b/docs/en/operations/system-tables/query_log.md @@ -1,3 +1,6 @@ +--- +slug: /en/operations/system-tables/query_log +--- # query_log Contains information about executed queries, for example, start time, duration of processing, error messages. diff --git a/docs/en/operations/system-tables/query_thread_log.md b/docs/en/operations/system-tables/query_thread_log.md index 2b4c4bab841..97a7d0a83ed 100644 --- a/docs/en/operations/system-tables/query_thread_log.md +++ b/docs/en/operations/system-tables/query_thread_log.md @@ -1,3 +1,6 @@ +--- +slug: /en/operations/system-tables/query_thread_log +--- # query_thread_log Contains information about threads that execute queries, for example, thread name, thread start time, duration of query processing. diff --git a/docs/en/operations/system-tables/query_views_log.md b/docs/en/operations/system-tables/query_views_log.md index 007128f5619..1818dc261aa 100644 --- a/docs/en/operations/system-tables/query_views_log.md +++ b/docs/en/operations/system-tables/query_views_log.md @@ -1,3 +1,6 @@ +--- +slug: /en/operations/system-tables/query_views_log +--- # query_views_log Contains information about the dependent views executed when running a query, for example, the view type or the execution time. diff --git a/docs/en/operations/system-tables/quota_limits.md b/docs/en/operations/system-tables/quota_limits.md index e3fea42c009..86c6231a3c8 100644 --- a/docs/en/operations/system-tables/quota_limits.md +++ b/docs/en/operations/system-tables/quota_limits.md @@ -1,3 +1,6 @@ +--- +slug: /en/operations/system-tables/quota_limits +--- # quota_limits Contains information about maximums for all intervals of all quotas. Any number of rows or zero can correspond to one quota. diff --git a/docs/en/operations/system-tables/quota_usage.md b/docs/en/operations/system-tables/quota_usage.md index 059c073babb..ac050b630be 100644 --- a/docs/en/operations/system-tables/quota_usage.md +++ b/docs/en/operations/system-tables/quota_usage.md @@ -1,3 +1,6 @@ +--- +slug: /en/operations/system-tables/quota_usage +--- # quota_usage Quota usage by the current user: how much is used and how much is left. diff --git a/docs/en/operations/system-tables/quotas.md b/docs/en/operations/system-tables/quotas.md index 2ef89f6749c..4ef88aa38d6 100644 --- a/docs/en/operations/system-tables/quotas.md +++ b/docs/en/operations/system-tables/quotas.md @@ -1,3 +1,6 @@ +--- +slug: /en/operations/system-tables/quotas +--- # quotas Contains information about [quotas](../../operations/system-tables/quotas.md). diff --git a/docs/en/operations/system-tables/quotas_usage.md b/docs/en/operations/system-tables/quotas_usage.md index f295187f2ac..0cb4ebf38f0 100644 --- a/docs/en/operations/system-tables/quotas_usage.md +++ b/docs/en/operations/system-tables/quotas_usage.md @@ -1,3 +1,6 @@ +--- +slug: /en/operations/system-tables/quotas_usage +--- # quotas_usage Quota usage by all users. diff --git a/docs/en/operations/system-tables/replicas.md b/docs/en/operations/system-tables/replicas.md index c32014b8f48..584736fe312 100644 --- a/docs/en/operations/system-tables/replicas.md +++ b/docs/en/operations/system-tables/replicas.md @@ -1,3 +1,6 @@ +--- +slug: /en/operations/system-tables/replicas +--- # replicas Contains information and status for replicated tables residing on the local server. diff --git a/docs/en/operations/system-tables/replicated_fetches.md b/docs/en/operations/system-tables/replicated_fetches.md index fd6d7b54778..3536bbaff4d 100644 --- a/docs/en/operations/system-tables/replicated_fetches.md +++ b/docs/en/operations/system-tables/replicated_fetches.md @@ -1,3 +1,6 @@ +--- +slug: /en/operations/system-tables/replicated_fetches +--- # replicated_fetches Contains information about currently running background fetches. diff --git a/docs/en/operations/system-tables/replication_queue.md b/docs/en/operations/system-tables/replication_queue.md index a7ac748ebbd..ced20b0048a 100644 --- a/docs/en/operations/system-tables/replication_queue.md +++ b/docs/en/operations/system-tables/replication_queue.md @@ -1,3 +1,6 @@ +--- +slug: /en/operations/system-tables/replication_queue +--- # replication_queue Contains information about tasks from replication queues stored in ClickHouse Keeper, or ZooKeeper, for tables in the `ReplicatedMergeTree` family. diff --git a/docs/en/operations/system-tables/role-grants.md b/docs/en/operations/system-tables/role-grants.md index cb0c5bf0b0b..ce7ab4fd51e 100644 --- a/docs/en/operations/system-tables/role-grants.md +++ b/docs/en/operations/system-tables/role-grants.md @@ -1,3 +1,6 @@ +--- +slug: /en/operations/system-tables/role-grants +--- # role_grants Contains the role grants for users and roles. To add entries to this table, use `GRANT role TO user`. diff --git a/docs/en/operations/system-tables/roles.md b/docs/en/operations/system-tables/roles.md index 6e1d112a6e5..95d9c97e61c 100644 --- a/docs/en/operations/system-tables/roles.md +++ b/docs/en/operations/system-tables/roles.md @@ -1,3 +1,6 @@ +--- +slug: /en/operations/system-tables/roles +--- # roles Contains information about configured [roles](../../operations/access-rights.md#role-management). diff --git a/docs/en/operations/system-tables/row_policies.md b/docs/en/operations/system-tables/row_policies.md index d7869c62499..166c8a46f9a 100644 --- a/docs/en/operations/system-tables/row_policies.md +++ b/docs/en/operations/system-tables/row_policies.md @@ -1,3 +1,6 @@ +--- +slug: /en/operations/system-tables/row_policies +--- # row_policies Contains filters for one particular table, as well as a list of roles and/or users which should use this row policy. diff --git a/docs/en/operations/system-tables/session_log.md b/docs/en/operations/system-tables/session_log.md index a42f0e79fe7..79c8ea184ce 100644 --- a/docs/en/operations/system-tables/session_log.md +++ b/docs/en/operations/system-tables/session_log.md @@ -1,3 +1,6 @@ +--- +slug: /en/operations/system-tables/session_log +--- # session_log Contains information about all successful and failed login and logout events. diff --git a/docs/en/operations/system-tables/settings.md b/docs/en/operations/system-tables/settings.md index 35af1e286d8..2b05cb9bc32 100644 --- a/docs/en/operations/system-tables/settings.md +++ b/docs/en/operations/system-tables/settings.md @@ -1,3 +1,6 @@ +--- +slug: /en/operations/system-tables/settings +--- # settings Contains information about session settings for current user. diff --git a/docs/en/operations/system-tables/settings_profile_elements.md b/docs/en/operations/system-tables/settings_profile_elements.md index 9afde010d0f..d9fce8d035f 100644 --- a/docs/en/operations/system-tables/settings_profile_elements.md +++ b/docs/en/operations/system-tables/settings_profile_elements.md @@ -1,3 +1,6 @@ +--- +slug: /en/operations/system-tables/settings_profile_elements +--- # settings_profile_elements Describes the content of the settings profile: diff --git a/docs/en/operations/system-tables/settings_profiles.md b/docs/en/operations/system-tables/settings_profiles.md index f14f8077143..ece13c80bea 100644 --- a/docs/en/operations/system-tables/settings_profiles.md +++ b/docs/en/operations/system-tables/settings_profiles.md @@ -1,3 +1,6 @@ +--- +slug: /en/operations/system-tables/settings_profiles +--- # settings_profiles Contains properties of configured setting profiles. diff --git a/docs/en/operations/system-tables/stack_trace.md b/docs/en/operations/system-tables/stack_trace.md index 3cc3b4043e3..c64cf067220 100644 --- a/docs/en/operations/system-tables/stack_trace.md +++ b/docs/en/operations/system-tables/stack_trace.md @@ -1,3 +1,6 @@ +--- +slug: /en/operations/system-tables/stack_trace +--- # stack_trace Contains stack traces of all server threads. Allows developers to introspect the server state. diff --git a/docs/en/operations/system-tables/storage_policies.md b/docs/en/operations/system-tables/storage_policies.md index 85e745dd0f8..103d95f65e4 100644 --- a/docs/en/operations/system-tables/storage_policies.md +++ b/docs/en/operations/system-tables/storage_policies.md @@ -1,3 +1,6 @@ +--- +slug: /en/operations/system-tables/storage_policies +--- # storage_policies Contains information about storage policies and volumes defined in the [server configuration](../../engines/table-engines/mergetree-family/mergetree.md#table_engine-mergetree-multiple-volumes_configure). diff --git a/docs/en/operations/system-tables/table_engines.md b/docs/en/operations/system-tables/table_engines.md index f5da0777d7c..7a326d0015d 100644 --- a/docs/en/operations/system-tables/table_engines.md +++ b/docs/en/operations/system-tables/table_engines.md @@ -1,3 +1,6 @@ +--- +slug: /en/operations/system-tables/table_engines +--- # table_engines Contains description of table engines supported by server and their feature support information. diff --git a/docs/en/operations/system-tables/tables.md b/docs/en/operations/system-tables/tables.md index 804b5862b34..497e23dd7ca 100644 --- a/docs/en/operations/system-tables/tables.md +++ b/docs/en/operations/system-tables/tables.md @@ -1,3 +1,6 @@ +--- +slug: /en/operations/system-tables/tables +--- # tables Contains metadata of each table that the server knows about. diff --git a/docs/en/operations/system-tables/text_log.md b/docs/en/operations/system-tables/text_log.md index f2d2042f5da..4c45f6d81da 100644 --- a/docs/en/operations/system-tables/text_log.md +++ b/docs/en/operations/system-tables/text_log.md @@ -1,3 +1,6 @@ +--- +slug: /en/operations/system-tables/text_log +--- # text_log Contains logging entries. The logging level which goes to this table can be limited to the `text_log.level` server setting. diff --git a/docs/en/operations/system-tables/time_zones.md b/docs/en/operations/system-tables/time_zones.md index 78ce02ba3ae..7d5361d26fc 100644 --- a/docs/en/operations/system-tables/time_zones.md +++ b/docs/en/operations/system-tables/time_zones.md @@ -1,4 +1,7 @@ -# time_zones +--- +slug: /en/operations/system-tables/time_zones +title: time_zones +--- Contains a list of time zones that are supported by the ClickHouse server. This list of timezones might vary depending on the version of ClickHouse. diff --git a/docs/en/operations/system-tables/trace_log.md b/docs/en/operations/system-tables/trace_log.md index 8d9936b5097..0effe085b80 100644 --- a/docs/en/operations/system-tables/trace_log.md +++ b/docs/en/operations/system-tables/trace_log.md @@ -1,3 +1,6 @@ +--- +slug: /en/operations/system-tables/trace_log +--- # trace_log Contains stack traces collected by the sampling query profiler. diff --git a/docs/en/operations/system-tables/users.md b/docs/en/operations/system-tables/users.md index 4543b35c0ff..eaeabab131b 100644 --- a/docs/en/operations/system-tables/users.md +++ b/docs/en/operations/system-tables/users.md @@ -1,3 +1,6 @@ +--- +slug: /en/operations/system-tables/users +--- # users Contains a list of [user accounts](../../operations/access-rights.md#user-account-management) configured at the server. diff --git a/docs/en/operations/system-tables/zookeeper.md b/docs/en/operations/system-tables/zookeeper.md index 923676b31e6..1522d6044e9 100644 --- a/docs/en/operations/system-tables/zookeeper.md +++ b/docs/en/operations/system-tables/zookeeper.md @@ -1,3 +1,6 @@ +--- +slug: /en/operations/system-tables/zookeeper +--- # zookeeper The table does not exist unless ClickHouse Keeper or ZooKeeper is configured. The `system.zookeeper` table exposes data from the Keeper cluster defined in the config. diff --git a/docs/en/operations/system-tables/zookeeper_log.md b/docs/en/operations/system-tables/zookeeper_log.md index 2a5d7dbe8d2..58c44325737 100644 --- a/docs/en/operations/system-tables/zookeeper_log.md +++ b/docs/en/operations/system-tables/zookeeper_log.md @@ -1,3 +1,6 @@ +--- +slug: /en/operations/system-tables/zookeeper_log +--- # zookeeper_log This table contains information about the parameters of the request to the ZooKeeper server and the response from it. diff --git a/docs/en/operations/tips.md b/docs/en/operations/tips.md index 5325311a9e6..85927cd0e05 100644 --- a/docs/en/operations/tips.md +++ b/docs/en/operations/tips.md @@ -1,10 +1,10 @@ --- +slug: /en/operations/tips sidebar_position: 58 sidebar_label: Usage Recommendations +title: "Usage Recommendations" --- -# Usage Recommendations - ## CPU Scaling Governor Always use the `performance` scaling governor. The `on-demand` scaling governor works much worse with constantly high demand. diff --git a/docs/en/operations/troubleshooting.md b/docs/en/operations/troubleshooting.md index 510024eb468..5a61359a2c0 100644 --- a/docs/en/operations/troubleshooting.md +++ b/docs/en/operations/troubleshooting.md @@ -1,4 +1,5 @@ --- +slug: /en/operations/troubleshooting sidebar_position: 46 sidebar_label: Troubleshooting --- diff --git a/docs/en/operations/update.md b/docs/en/operations/update.md index 95e9bed675b..24f7efecc7b 100644 --- a/docs/en/operations/update.md +++ b/docs/en/operations/update.md @@ -1,4 +1,5 @@ --- +slug: /en/operations/update sidebar_position: 47 sidebar_label: ClickHouse Upgrade --- diff --git a/docs/en/operations/utilities/clickhouse-benchmark.md b/docs/en/operations/utilities/clickhouse-benchmark.md index 3a52ec92dc3..1a250ea5481 100644 --- a/docs/en/operations/utilities/clickhouse-benchmark.md +++ b/docs/en/operations/utilities/clickhouse-benchmark.md @@ -1,4 +1,5 @@ --- +slug: /en/operations/utilities/clickhouse-benchmark sidebar_position: 61 sidebar_label: clickhouse-benchmark --- diff --git a/docs/en/operations/utilities/clickhouse-compressor.md b/docs/en/operations/utilities/clickhouse-compressor.md index 2f8f4794ba8..d432e8114c0 100644 --- a/docs/en/operations/utilities/clickhouse-compressor.md +++ b/docs/en/operations/utilities/clickhouse-compressor.md @@ -1,3 +1,6 @@ +--- +slug: /en/operations/utilities/clickhouse-compressor +--- # clickhouse-compressor diff --git a/docs/en/operations/utilities/clickhouse-copier.md b/docs/en/operations/utilities/clickhouse-copier.md index f3806d1afbc..c6236a7f5fb 100644 --- a/docs/en/operations/utilities/clickhouse-copier.md +++ b/docs/en/operations/utilities/clickhouse-copier.md @@ -1,4 +1,5 @@ --- +slug: /en/operations/utilities/clickhouse-copier sidebar_position: 59 sidebar_label: clickhouse-copier --- diff --git a/docs/en/operations/utilities/clickhouse-format.md b/docs/en/operations/utilities/clickhouse-format.md index 219a170fc23..4a74901c980 100644 --- a/docs/en/operations/utilities/clickhouse-format.md +++ b/docs/en/operations/utilities/clickhouse-format.md @@ -1,3 +1,6 @@ +--- +slug: /en/operations/utilities/clickhouse-format +--- # clickhouse-format Allows formatting input queries. diff --git a/docs/en/operations/utilities/clickhouse-local.md b/docs/en/operations/utilities/clickhouse-local.md index a22b8ae0750..cb1b8b9a8e6 100644 --- a/docs/en/operations/utilities/clickhouse-local.md +++ b/docs/en/operations/utilities/clickhouse-local.md @@ -1,4 +1,5 @@ --- +slug: /en/operations/utilities/clickhouse-local sidebar_position: 60 sidebar_label: clickhouse-local --- diff --git a/docs/en/operations/utilities/clickhouse-obfuscator.md b/docs/en/operations/utilities/clickhouse-obfuscator.md index 02d6fd225ec..8787f0b1bb3 100644 --- a/docs/en/operations/utilities/clickhouse-obfuscator.md +++ b/docs/en/operations/utilities/clickhouse-obfuscator.md @@ -1,3 +1,6 @@ +--- +slug: /en/operations/utilities/clickhouse-obfuscator +--- # clickhouse-obfuscator A simple tool for table data obfuscation. diff --git a/docs/en/operations/utilities/index.md b/docs/en/operations/utilities/index.md index 7fdc783f9c4..df4af30768c 100644 --- a/docs/en/operations/utilities/index.md +++ b/docs/en/operations/utilities/index.md @@ -1,4 +1,5 @@ --- +slug: /en/operations/utilities/ sidebar_position: 56 sidebar_label: Utilities --- diff --git a/docs/en/operations/utilities/odbc-bridge.md b/docs/en/operations/utilities/odbc-bridge.md index e5967085c49..789f382fde8 100644 --- a/docs/en/operations/utilities/odbc-bridge.md +++ b/docs/en/operations/utilities/odbc-bridge.md @@ -1,3 +1,6 @@ +--- +slug: /en/operations/utilities/odbc-bridge +--- # clickhouse-odbc-bridge Simple HTTP-server which works like a proxy for ODBC driver. The main motivation diff --git a/docs/en/sql-reference/aggregate-functions/combinators.md b/docs/en/sql-reference/aggregate-functions/combinators.md index d89dad1b94d..9fa3f7d7803 100644 --- a/docs/en/sql-reference/aggregate-functions/combinators.md +++ b/docs/en/sql-reference/aggregate-functions/combinators.md @@ -1,4 +1,5 @@ --- +slug: /en/sql-reference/aggregate-functions/combinators sidebar_position: 37 sidebar_label: Combinators --- diff --git a/docs/en/sql-reference/aggregate-functions/grouping_function.md b/docs/en/sql-reference/aggregate-functions/grouping_function.md new file mode 100644 index 00000000000..d17279eca66 --- /dev/null +++ b/docs/en/sql-reference/aggregate-functions/grouping_function.md @@ -0,0 +1,347 @@ +--- +slug: /en/sql-reference/aggregate-functions/grouping_function +--- + +# GROUPING + +## GROUPING + +[ROLLUP](../statements/select/group-by.md/#rollup-modifier) and [CUBE](../statements/select/group-by.md/#cube-modifier) are modifiers to GROUP BY. Both of these calculate subtotals. ROLLUP takes an ordered list of columns, for example `(day, month, year)`, and calculates subtotals at each level of the aggregation and then a grand total. CUBE calculates subtotals across all possible combinations of the columns specified. GROUPING identifies which rows returned by ROLLUP or CUBE are superaggregates, and which are rows that would be returned by an unmodified GROUP BY. + +The GROUPING function takes multiple columns as an argument, and returns a bitmask. +- `1` indicates that a row returned by a `ROLLUP` or `CUBE` modifier to `GROUP BY` is a subtotal +- `0` indicates that a row returned by a `ROLLUP` or `CUBE` is a row that is not a subtotal + +## GROUPING SETS + +By default, the CUBE modifier calculates subtotals for all possible combinations of the columns passed to CUBE. GROUPING SETS allows you to specify the specific combinations to calculate. + +Analyzing hierarchical data is a good use case for ROLLUP, CUBE, and GROUPING SETS modifiers. The sample here is a table containing data about what Linux distribution, and the version of that distribution is installed across two datacenters. It may be valuable to look at the data by distribution, version, and location. + +### Load sample data + +```sql +CREATE TABLE servers ( datacenter VARCHAR(255), + distro VARCHAR(255) NOT NULL, + version VARCHAR(50) NOT NULL, + quantity INT + ) + ORDER BY (datacenter, distro, version) +``` + +```sql +INSERT INTO servers(datacenter, distro, version, quantity) +VALUES ('Schenectady', 'Arch','2022.08.05',50), + ('Westport', 'Arch','2022.08.05',40), + ('Schenectady','Arch','2021.09.01',30), + ('Westport', 'Arch','2021.09.01',20), + ('Schenectady','Arch','2020.05.01',10), + ('Westport', 'Arch','2020.05.01',5), + ('Schenectady','RHEL','9',60), + ('Westport','RHEL','9',70), + ('Westport','RHEL','7',80), + ('Schenectady','RHEL','7',80) +``` + +```sql +SELECT + * +FROM + servers; +``` +```response +┌─datacenter──┬─distro─┬─version────┬─quantity─┐ +│ Schenectady │ Arch │ 2020.05.01 │ 10 │ +│ Schenectady │ Arch │ 2021.09.01 │ 30 │ +│ Schenectady │ Arch │ 2022.08.05 │ 50 │ +│ Schenectady │ RHEL │ 7 │ 80 │ +│ Schenectady │ RHEL │ 9 │ 60 │ +│ Westport │ Arch │ 2020.05.01 │ 5 │ +│ Westport │ Arch │ 2021.09.01 │ 20 │ +│ Westport │ Arch │ 2022.08.05 │ 40 │ +│ Westport │ RHEL │ 7 │ 80 │ +│ Westport │ RHEL │ 9 │ 70 │ +└─────────────┴────────┴────────────┴──────────┘ + +10 rows in set. Elapsed: 0.409 sec. +``` + +### Simple queries + +Get the count of servers in each data center by distribution: +```sql +SELECT + datacenter, + distro, + SUM (quantity) qty +FROM + servers +GROUP BY + datacenter, + distro; +``` +```response +┌─datacenter──┬─distro─┬─qty─┐ +│ Schenectady │ RHEL │ 140 │ +│ Westport │ Arch │ 65 │ +│ Schenectady │ Arch │ 90 │ +│ Westport │ RHEL │ 150 │ +└─────────────┴────────┴─────┘ + +4 rows in set. Elapsed: 0.212 sec. +``` + +```sql +SELECT + datacenter, + SUM (quantity) qty +FROM + servers +GROUP BY + datacenter; +``` +```response +┌─datacenter──┬─qty─┐ +│ Westport │ 215 │ +│ Schenectady │ 230 │ +└─────────────┴─────┘ + +2 rows in set. Elapsed: 0.277 sec. +``` + + +```sql +SELECT + distro, + SUM (quantity) qty +FROM + servers +GROUP BY + distro; +``` + +```response + +┌─distro─┬─qty─┐ +│ Arch │ 155 │ +│ RHEL │ 290 │ +└────────┴─────┘ + +2 rows in set. Elapsed: 0.352 sec. +``` + + +```sql +SELECT + SUM(quantity) qty +FROM + servers; +``` +```response +┌─qty─┐ +│ 445 │ +└─────┘ + +1 row in set. Elapsed: 0.244 sec. +``` + +### Comparing multiple GROUP BY statements with GROUPING SETS + +Breaking down the data without CUBE, ROLLUP, or GROUPING SETS: +```sql +SELECT + datacenter, + distro, + SUM (quantity) qty +FROM + servers +GROUP BY + datacenter, + distro +UNION ALL +SELECT + datacenter, + null, + SUM (quantity) qty +FROM + servers +GROUP BY + datacenter +UNION ALL +SELECT + null, + distro, + SUM (quantity) qty +FROM + servers +GROUP BY + distro +UNION ALL +SELECT + null, + null, + SUM(quantity) qty +FROM + servers; +``` +```response +┌─datacenter─┬─distro─┬─qty─┐ +│ ᴺᵁᴸᴸ │ ᴺᵁᴸᴸ │ 445 │ +└────────────┴────────┴─────┘ +┌─datacenter──┬─distro─┬─qty─┐ +│ Westport │ ᴺᵁᴸᴸ │ 215 │ +│ Schenectady │ ᴺᵁᴸᴸ │ 230 │ +└─────────────┴────────┴─────┘ +┌─datacenter──┬─distro─┬─qty─┐ +│ Schenectady │ RHEL │ 140 │ +│ Westport │ Arch │ 65 │ +│ Schenectady │ Arch │ 90 │ +│ Westport │ RHEL │ 150 │ +└─────────────┴────────┴─────┘ +┌─datacenter─┬─distro─┬─qty─┐ +│ ᴺᵁᴸᴸ │ Arch │ 155 │ +│ ᴺᵁᴸᴸ │ RHEL │ 290 │ +└────────────┴────────┴─────┘ + +9 rows in set. Elapsed: 0.527 sec. +``` + +Getting the same information using GROUPING SETS: +```sql +SELECT + datacenter, + distro, + SUM (quantity) qty +FROM + servers +GROUP BY + GROUPING SETS( + (datacenter,distro), + (datacenter), + (distro), + () + ) +``` +```response +┌─datacenter──┬─distro─┬─qty─┐ +│ Schenectady │ RHEL │ 140 │ +│ Westport │ Arch │ 65 │ +│ Schenectady │ Arch │ 90 │ +│ Westport │ RHEL │ 150 │ +└─────────────┴────────┴─────┘ +┌─datacenter──┬─distro─┬─qty─┐ +│ Westport │ │ 215 │ +│ Schenectady │ │ 230 │ +└─────────────┴────────┴─────┘ +┌─datacenter─┬─distro─┬─qty─┐ +│ │ │ 445 │ +└────────────┴────────┴─────┘ +┌─datacenter─┬─distro─┬─qty─┐ +│ │ Arch │ 155 │ +│ │ RHEL │ 290 │ +└────────────┴────────┴─────┘ + +9 rows in set. Elapsed: 0.427 sec. +``` + +### Comparing CUBE with GROUPING SETS + +The CUBE in the next query, `CUBE(datacenter,distro,version)` provides a hierarchy that may not make sense. It does not make sense to look at Version across the two distributions (as Arch and RHEL do not have the same release cycle or version naming standards). The GROUPING SETS example following this one is more appropriate as it groups `distro` and `version` in the same set. + +```sql +SELECT + datacenter, + distro, + version, + SUM(quantity) +FROM + servers +GROUP BY + CUBE(datacenter,distro,version) +ORDER BY + datacenter, + distro; +``` +```response +┌─datacenter──┬─distro─┬─version────┬─sum(quantity)─┐ +│ │ │ 7 │ 160 │ +│ │ │ 2020.05.01 │ 15 │ +│ │ │ 2021.09.01 │ 50 │ +│ │ │ 2022.08.05 │ 90 │ +│ │ │ 9 │ 130 │ +│ │ │ │ 445 │ +│ │ Arch │ 2021.09.01 │ 50 │ +│ │ Arch │ 2022.08.05 │ 90 │ +│ │ Arch │ 2020.05.01 │ 15 │ +│ │ Arch │ │ 155 │ +│ │ RHEL │ 9 │ 130 │ +│ │ RHEL │ 7 │ 160 │ +│ │ RHEL │ │ 290 │ +│ Schenectady │ │ 9 │ 60 │ +│ Schenectady │ │ 2021.09.01 │ 30 │ +│ Schenectady │ │ 7 │ 80 │ +│ Schenectady │ │ 2022.08.05 │ 50 │ +│ Schenectady │ │ 2020.05.01 │ 10 │ +│ Schenectady │ │ │ 230 │ +│ Schenectady │ Arch │ 2022.08.05 │ 50 │ +│ Schenectady │ Arch │ 2021.09.01 │ 30 │ +│ Schenectady │ Arch │ 2020.05.01 │ 10 │ +│ Schenectady │ Arch │ │ 90 │ +│ Schenectady │ RHEL │ 7 │ 80 │ +│ Schenectady │ RHEL │ 9 │ 60 │ +│ Schenectady │ RHEL │ │ 140 │ +│ Westport │ │ 9 │ 70 │ +│ Westport │ │ 2020.05.01 │ 5 │ +│ Westport │ │ 2022.08.05 │ 40 │ +│ Westport │ │ 7 │ 80 │ +│ Westport │ │ 2021.09.01 │ 20 │ +│ Westport │ │ │ 215 │ +│ Westport │ Arch │ 2020.05.01 │ 5 │ +│ Westport │ Arch │ 2021.09.01 │ 20 │ +│ Westport │ Arch │ 2022.08.05 │ 40 │ +│ Westport │ Arch │ │ 65 │ +│ Westport │ RHEL │ 9 │ 70 │ +│ Westport │ RHEL │ 7 │ 80 │ +│ Westport │ RHEL │ │ 150 │ +└─────────────┴────────┴────────────┴───────────────┘ + +39 rows in set. Elapsed: 0.355 sec. +``` +:::note +Version in the above example may not make sense when it is not associated with a distro, if we were tracking the kernel version it might make sense because the kernel version can be associated with either distro. Using GROUPING SETS, as in the next example, may be a better choice. +::: + +```sql +SELECT + datacenter, + distro, + version, + SUM(quantity) +FROM servers +GROUP BY + GROUPING SETS ( + (datacenter, distro, version), + (datacenter, distro)) +``` +```response +┌─datacenter──┬─distro─┬─version────┬─sum(quantity)─┐ +│ Westport │ RHEL │ 9 │ 70 │ +│ Schenectady │ Arch │ 2022.08.05 │ 50 │ +│ Schenectady │ Arch │ 2021.09.01 │ 30 │ +│ Schenectady │ RHEL │ 7 │ 80 │ +│ Westport │ Arch │ 2020.05.01 │ 5 │ +│ Westport │ RHEL │ 7 │ 80 │ +│ Westport │ Arch │ 2021.09.01 │ 20 │ +│ Westport │ Arch │ 2022.08.05 │ 40 │ +│ Schenectady │ RHEL │ 9 │ 60 │ +│ Schenectady │ Arch │ 2020.05.01 │ 10 │ +└─────────────┴────────┴────────────┴───────────────┘ +┌─datacenter──┬─distro─┬─version─┬─sum(quantity)─┐ +│ Schenectady │ RHEL │ │ 140 │ +│ Westport │ Arch │ │ 65 │ +│ Schenectady │ Arch │ │ 90 │ +│ Westport │ RHEL │ │ 150 │ +└─────────────┴────────┴─────────┴───────────────┘ + +14 rows in set. Elapsed: 1.036 sec. +``` diff --git a/docs/en/sql-reference/aggregate-functions/index.md b/docs/en/sql-reference/aggregate-functions/index.md index 2a13e9a0bae..3d6475979b7 100644 --- a/docs/en/sql-reference/aggregate-functions/index.md +++ b/docs/en/sql-reference/aggregate-functions/index.md @@ -1,4 +1,5 @@ --- +slug: /en/sql-reference/aggregate-functions/ sidebar_label: Aggregate Functions sidebar_position: 33 --- diff --git a/docs/en/sql-reference/aggregate-functions/parametric-functions.md b/docs/en/sql-reference/aggregate-functions/parametric-functions.md index 112f94e9261..40184c0aa02 100644 --- a/docs/en/sql-reference/aggregate-functions/parametric-functions.md +++ b/docs/en/sql-reference/aggregate-functions/parametric-functions.md @@ -1,4 +1,5 @@ --- +slug: /en/sql-reference/aggregate-functions/parametric-functions sidebar_position: 38 sidebar_label: Parametric --- diff --git a/docs/en/sql-reference/aggregate-functions/reference/any.md b/docs/en/sql-reference/aggregate-functions/reference/any.md index c0af7a2a59e..db19f524b31 100644 --- a/docs/en/sql-reference/aggregate-functions/reference/any.md +++ b/docs/en/sql-reference/aggregate-functions/reference/any.md @@ -1,4 +1,5 @@ --- +slug: /en/sql-reference/aggregate-functions/reference/any sidebar_position: 6 --- diff --git a/docs/en/sql-reference/aggregate-functions/reference/anyheavy.md b/docs/en/sql-reference/aggregate-functions/reference/anyheavy.md index ab19b145ddc..4288b66bb2c 100644 --- a/docs/en/sql-reference/aggregate-functions/reference/anyheavy.md +++ b/docs/en/sql-reference/aggregate-functions/reference/anyheavy.md @@ -1,4 +1,5 @@ --- +slug: /en/sql-reference/aggregate-functions/reference/anyheavy sidebar_position: 103 --- diff --git a/docs/en/sql-reference/aggregate-functions/reference/anylast.md b/docs/en/sql-reference/aggregate-functions/reference/anylast.md index 44697359405..351c9fd8e2f 100644 --- a/docs/en/sql-reference/aggregate-functions/reference/anylast.md +++ b/docs/en/sql-reference/aggregate-functions/reference/anylast.md @@ -1,4 +1,5 @@ --- +slug: /en/sql-reference/aggregate-functions/reference/anylast sidebar_position: 104 --- diff --git a/docs/en/sql-reference/aggregate-functions/reference/argmax.md b/docs/en/sql-reference/aggregate-functions/reference/argmax.md index 42da27e320f..7b99c831010 100644 --- a/docs/en/sql-reference/aggregate-functions/reference/argmax.md +++ b/docs/en/sql-reference/aggregate-functions/reference/argmax.md @@ -1,4 +1,5 @@ --- +slug: /en/sql-reference/aggregate-functions/reference/argmax sidebar_position: 106 --- diff --git a/docs/en/sql-reference/aggregate-functions/reference/argmin.md b/docs/en/sql-reference/aggregate-functions/reference/argmin.md index 2d3ad337d3e..945dda5e46d 100644 --- a/docs/en/sql-reference/aggregate-functions/reference/argmin.md +++ b/docs/en/sql-reference/aggregate-functions/reference/argmin.md @@ -1,4 +1,5 @@ --- +slug: /en/sql-reference/aggregate-functions/reference/argmin sidebar_position: 105 --- diff --git a/docs/en/sql-reference/aggregate-functions/reference/avg.md b/docs/en/sql-reference/aggregate-functions/reference/avg.md index 63702d56111..f935f4548ed 100644 --- a/docs/en/sql-reference/aggregate-functions/reference/avg.md +++ b/docs/en/sql-reference/aggregate-functions/reference/avg.md @@ -1,4 +1,5 @@ --- +slug: /en/sql-reference/aggregate-functions/reference/avg sidebar_position: 5 --- diff --git a/docs/en/sql-reference/aggregate-functions/reference/avgweighted.md b/docs/en/sql-reference/aggregate-functions/reference/avgweighted.md index dbcfd3e3071..00dffdc33d2 100644 --- a/docs/en/sql-reference/aggregate-functions/reference/avgweighted.md +++ b/docs/en/sql-reference/aggregate-functions/reference/avgweighted.md @@ -1,4 +1,5 @@ --- +slug: /en/sql-reference/aggregate-functions/reference/avgweighted sidebar_position: 107 --- diff --git a/docs/en/sql-reference/aggregate-functions/reference/categoricalinformationvalue.md b/docs/en/sql-reference/aggregate-functions/reference/categoricalinformationvalue.md index e497d72b519..57edb47950a 100644 --- a/docs/en/sql-reference/aggregate-functions/reference/categoricalinformationvalue.md +++ b/docs/en/sql-reference/aggregate-functions/reference/categoricalinformationvalue.md @@ -1,9 +1,9 @@ --- +slug: /en/sql-reference/aggregate-functions/reference/categoricalinformationvalue sidebar_position: 250 +title: categoricalInformationValue --- -# categoricalInformationValue - Calculates the value of `(P(tag = 1) - P(tag = 0))(log(P(tag = 1)) - log(P(tag = 0)))` for each category. ``` sql diff --git a/docs/en/sql-reference/aggregate-functions/reference/corr.md b/docs/en/sql-reference/aggregate-functions/reference/corr.md index df491968309..8fa493c9630 100644 --- a/docs/en/sql-reference/aggregate-functions/reference/corr.md +++ b/docs/en/sql-reference/aggregate-functions/reference/corr.md @@ -1,4 +1,5 @@ --- +slug: /en/sql-reference/aggregate-functions/reference/corr sidebar_position: 107 --- diff --git a/docs/en/sql-reference/aggregate-functions/reference/count.md b/docs/en/sql-reference/aggregate-functions/reference/count.md index 1d6080f683d..356f731ff16 100644 --- a/docs/en/sql-reference/aggregate-functions/reference/count.md +++ b/docs/en/sql-reference/aggregate-functions/reference/count.md @@ -1,4 +1,5 @@ --- +slug: /en/sql-reference/aggregate-functions/reference/count sidebar_position: 1 --- diff --git a/docs/en/sql-reference/aggregate-functions/reference/covarpop.md b/docs/en/sql-reference/aggregate-functions/reference/covarpop.md index 7858d53bbbd..579035b2fe1 100644 --- a/docs/en/sql-reference/aggregate-functions/reference/covarpop.md +++ b/docs/en/sql-reference/aggregate-functions/reference/covarpop.md @@ -1,4 +1,5 @@ --- +slug: /en/sql-reference/aggregate-functions/reference/covarpop sidebar_position: 36 --- diff --git a/docs/en/sql-reference/aggregate-functions/reference/covarsamp.md b/docs/en/sql-reference/aggregate-functions/reference/covarsamp.md index fb25aaa9602..4d29e6f1272 100644 --- a/docs/en/sql-reference/aggregate-functions/reference/covarsamp.md +++ b/docs/en/sql-reference/aggregate-functions/reference/covarsamp.md @@ -1,4 +1,5 @@ --- +slug: /en/sql-reference/aggregate-functions/reference/covarsamp sidebar_position: 37 --- diff --git a/docs/en/sql-reference/aggregate-functions/reference/deltasum.md b/docs/en/sql-reference/aggregate-functions/reference/deltasum.md index d1318c6d830..d5d9e9369a4 100644 --- a/docs/en/sql-reference/aggregate-functions/reference/deltasum.md +++ b/docs/en/sql-reference/aggregate-functions/reference/deltasum.md @@ -1,4 +1,5 @@ --- +slug: /en/sql-reference/aggregate-functions/reference/deltasum sidebar_position: 141 --- diff --git a/docs/en/sql-reference/aggregate-functions/reference/deltasumtimestamp.md b/docs/en/sql-reference/aggregate-functions/reference/deltasumtimestamp.md index f8283178c6e..6c62ee6b63f 100644 --- a/docs/en/sql-reference/aggregate-functions/reference/deltasumtimestamp.md +++ b/docs/en/sql-reference/aggregate-functions/reference/deltasumtimestamp.md @@ -1,4 +1,5 @@ --- +slug: /en/sql-reference/aggregate-functions/reference/deltasumtimestamp sidebar_position: 141 --- diff --git a/docs/en/sql-reference/aggregate-functions/reference/entropy.md b/docs/en/sql-reference/aggregate-functions/reference/entropy.md index b563dbb1b79..d86f4f4197a 100644 --- a/docs/en/sql-reference/aggregate-functions/reference/entropy.md +++ b/docs/en/sql-reference/aggregate-functions/reference/entropy.md @@ -1,4 +1,5 @@ --- +slug: /en/sql-reference/aggregate-functions/reference/entropy sidebar_position: 302 --- diff --git a/docs/en/sql-reference/aggregate-functions/reference/exponentialmovingaverage.md b/docs/en/sql-reference/aggregate-functions/reference/exponentialmovingaverage.md index 7db8f2b6ef2..7b1709e6d5c 100644 --- a/docs/en/sql-reference/aggregate-functions/reference/exponentialmovingaverage.md +++ b/docs/en/sql-reference/aggregate-functions/reference/exponentialmovingaverage.md @@ -1,4 +1,5 @@ --- +slug: /en/sql-reference/aggregate-functions/reference/exponentialmovingaverage sidebar_position: 108 --- diff --git a/docs/en/sql-reference/aggregate-functions/reference/grouparray.md b/docs/en/sql-reference/aggregate-functions/reference/grouparray.md index f1d9e60f778..18048fa4f71 100644 --- a/docs/en/sql-reference/aggregate-functions/reference/grouparray.md +++ b/docs/en/sql-reference/aggregate-functions/reference/grouparray.md @@ -1,4 +1,5 @@ --- +slug: /en/sql-reference/aggregate-functions/reference/grouparray sidebar_position: 110 --- diff --git a/docs/en/sql-reference/aggregate-functions/reference/grouparrayinsertat.md b/docs/en/sql-reference/aggregate-functions/reference/grouparrayinsertat.md index b5b3656860d..aafa643a972 100644 --- a/docs/en/sql-reference/aggregate-functions/reference/grouparrayinsertat.md +++ b/docs/en/sql-reference/aggregate-functions/reference/grouparrayinsertat.md @@ -1,4 +1,5 @@ --- +slug: /en/sql-reference/aggregate-functions/reference/grouparrayinsertat sidebar_position: 112 --- diff --git a/docs/en/sql-reference/aggregate-functions/reference/grouparraymovingavg.md b/docs/en/sql-reference/aggregate-functions/reference/grouparraymovingavg.md index 5c58c314577..8fa1939e7d3 100644 --- a/docs/en/sql-reference/aggregate-functions/reference/grouparraymovingavg.md +++ b/docs/en/sql-reference/aggregate-functions/reference/grouparraymovingavg.md @@ -1,4 +1,5 @@ --- +slug: /en/sql-reference/aggregate-functions/reference/grouparraymovingavg sidebar_position: 114 --- diff --git a/docs/en/sql-reference/aggregate-functions/reference/grouparraymovingsum.md b/docs/en/sql-reference/aggregate-functions/reference/grouparraymovingsum.md index 93dc4e5da47..a51857418c6 100644 --- a/docs/en/sql-reference/aggregate-functions/reference/grouparraymovingsum.md +++ b/docs/en/sql-reference/aggregate-functions/reference/grouparraymovingsum.md @@ -1,4 +1,5 @@ --- +slug: /en/sql-reference/aggregate-functions/reference/grouparraymovingsum sidebar_position: 113 --- diff --git a/docs/en/sql-reference/aggregate-functions/reference/grouparraysample.md b/docs/en/sql-reference/aggregate-functions/reference/grouparraysample.md index ca54d49d827..26c41c6636b 100644 --- a/docs/en/sql-reference/aggregate-functions/reference/grouparraysample.md +++ b/docs/en/sql-reference/aggregate-functions/reference/grouparraysample.md @@ -1,4 +1,5 @@ --- +slug: /en/sql-reference/aggregate-functions/reference/grouparraysample sidebar_position: 114 --- diff --git a/docs/en/sql-reference/aggregate-functions/reference/groupbitand.md b/docs/en/sql-reference/aggregate-functions/reference/groupbitand.md index f3b89d530af..f89e3796aaa 100644 --- a/docs/en/sql-reference/aggregate-functions/reference/groupbitand.md +++ b/docs/en/sql-reference/aggregate-functions/reference/groupbitand.md @@ -1,4 +1,5 @@ --- +slug: /en/sql-reference/aggregate-functions/reference/groupbitand sidebar_position: 125 --- diff --git a/docs/en/sql-reference/aggregate-functions/reference/groupbitmap.md b/docs/en/sql-reference/aggregate-functions/reference/groupbitmap.md index 39373c59aba..02b9e0e8821 100644 --- a/docs/en/sql-reference/aggregate-functions/reference/groupbitmap.md +++ b/docs/en/sql-reference/aggregate-functions/reference/groupbitmap.md @@ -1,4 +1,5 @@ --- +slug: /en/sql-reference/aggregate-functions/reference/groupbitmap sidebar_position: 128 --- diff --git a/docs/en/sql-reference/aggregate-functions/reference/groupbitmapand.md b/docs/en/sql-reference/aggregate-functions/reference/groupbitmapand.md index 377b81c4ebf..1e649645e75 100644 --- a/docs/en/sql-reference/aggregate-functions/reference/groupbitmapand.md +++ b/docs/en/sql-reference/aggregate-functions/reference/groupbitmapand.md @@ -1,9 +1,9 @@ --- +slug: /en/sql-reference/aggregate-functions/reference/groupbitmapand sidebar_position: 129 +title: groupBitmapAnd --- -# groupBitmapAnd - Calculations the AND of a bitmap column, return cardinality of type UInt64, if add suffix -State, then return [bitmap object](../../../sql-reference/functions/bitmap-functions.md). ``` sql diff --git a/docs/en/sql-reference/aggregate-functions/reference/groupbitmapor.md b/docs/en/sql-reference/aggregate-functions/reference/groupbitmapor.md index 7e3973a00f0..c88c80ceff2 100644 --- a/docs/en/sql-reference/aggregate-functions/reference/groupbitmapor.md +++ b/docs/en/sql-reference/aggregate-functions/reference/groupbitmapor.md @@ -1,9 +1,9 @@ --- +slug: /en/sql-reference/aggregate-functions/reference/groupbitmapor sidebar_position: 130 +title: groupBitmapOr --- -# groupBitmapOr - Calculations the OR of a bitmap column, return cardinality of type UInt64, if add suffix -State, then return [bitmap object](../../../sql-reference/functions/bitmap-functions.md). This is equivalent to `groupBitmapMerge`. ``` sql diff --git a/docs/en/sql-reference/aggregate-functions/reference/groupbitmapxor.md b/docs/en/sql-reference/aggregate-functions/reference/groupbitmapxor.md index 13548665c98..aa24b3d2128 100644 --- a/docs/en/sql-reference/aggregate-functions/reference/groupbitmapxor.md +++ b/docs/en/sql-reference/aggregate-functions/reference/groupbitmapxor.md @@ -1,9 +1,9 @@ --- +slug: /en/sql-reference/aggregate-functions/reference/groupbitmapxor sidebar_position: 131 +title: groupBitmapXor --- -# groupBitmapXor - Calculations the XOR of a bitmap column, return cardinality of type UInt64, if add suffix -State, then return [bitmap object](../../../sql-reference/functions/bitmap-functions.md). ``` sql diff --git a/docs/en/sql-reference/aggregate-functions/reference/groupbitor.md b/docs/en/sql-reference/aggregate-functions/reference/groupbitor.md index fc3569b3e98..75b34d9c5a3 100644 --- a/docs/en/sql-reference/aggregate-functions/reference/groupbitor.md +++ b/docs/en/sql-reference/aggregate-functions/reference/groupbitor.md @@ -1,4 +1,5 @@ --- +slug: /en/sql-reference/aggregate-functions/reference/groupbitor sidebar_position: 126 --- diff --git a/docs/en/sql-reference/aggregate-functions/reference/groupbitxor.md b/docs/en/sql-reference/aggregate-functions/reference/groupbitxor.md index 70f080827cc..ca6fb9f8352 100644 --- a/docs/en/sql-reference/aggregate-functions/reference/groupbitxor.md +++ b/docs/en/sql-reference/aggregate-functions/reference/groupbitxor.md @@ -1,4 +1,5 @@ --- +slug: /en/sql-reference/aggregate-functions/reference/groupbitxor sidebar_position: 127 --- diff --git a/docs/en/sql-reference/aggregate-functions/reference/groupuniqarray.md b/docs/en/sql-reference/aggregate-functions/reference/groupuniqarray.md index 65edbbdf3e9..fe5f714c307 100644 --- a/docs/en/sql-reference/aggregate-functions/reference/groupuniqarray.md +++ b/docs/en/sql-reference/aggregate-functions/reference/groupuniqarray.md @@ -1,4 +1,5 @@ --- +slug: /en/sql-reference/aggregate-functions/reference/groupuniqarray sidebar_position: 111 --- diff --git a/docs/en/sql-reference/aggregate-functions/reference/index.md b/docs/en/sql-reference/aggregate-functions/reference/index.md index 4854a19f475..ee17c37100c 100644 --- a/docs/en/sql-reference/aggregate-functions/reference/index.md +++ b/docs/en/sql-reference/aggregate-functions/reference/index.md @@ -1,4 +1,5 @@ --- +slug: /en/sql-reference/aggregate-functions/reference/ toc_folder_title: Reference sidebar_position: 36 toc_hidden: true diff --git a/docs/en/sql-reference/aggregate-functions/reference/intervalLengthSum.md b/docs/en/sql-reference/aggregate-functions/reference/intervalLengthSum.md index 54469f3b56d..444ec0aec97 100644 --- a/docs/en/sql-reference/aggregate-functions/reference/intervalLengthSum.md +++ b/docs/en/sql-reference/aggregate-functions/reference/intervalLengthSum.md @@ -1,4 +1,5 @@ --- +slug: /en/sql-reference/aggregate-functions/reference/intervalLengthSum sidebar_position: 146 sidebar_label: intervalLengthSum --- diff --git a/docs/en/sql-reference/aggregate-functions/reference/kurtpop.md b/docs/en/sql-reference/aggregate-functions/reference/kurtpop.md index c21e780991c..e1a29973fcf 100644 --- a/docs/en/sql-reference/aggregate-functions/reference/kurtpop.md +++ b/docs/en/sql-reference/aggregate-functions/reference/kurtpop.md @@ -1,4 +1,5 @@ --- +slug: /en/sql-reference/aggregate-functions/reference/kurtpop sidebar_position: 153 --- diff --git a/docs/en/sql-reference/aggregate-functions/reference/kurtsamp.md b/docs/en/sql-reference/aggregate-functions/reference/kurtsamp.md index 601eebd6d9c..911c2bfbe74 100644 --- a/docs/en/sql-reference/aggregate-functions/reference/kurtsamp.md +++ b/docs/en/sql-reference/aggregate-functions/reference/kurtsamp.md @@ -1,4 +1,5 @@ --- +slug: /en/sql-reference/aggregate-functions/reference/kurtsamp sidebar_position: 154 --- diff --git a/docs/en/sql-reference/aggregate-functions/reference/mannwhitneyutest.md b/docs/en/sql-reference/aggregate-functions/reference/mannwhitneyutest.md index a9661fea8f9..452cf413857 100644 --- a/docs/en/sql-reference/aggregate-functions/reference/mannwhitneyutest.md +++ b/docs/en/sql-reference/aggregate-functions/reference/mannwhitneyutest.md @@ -1,4 +1,5 @@ --- +slug: /en/sql-reference/aggregate-functions/reference/mannwhitneyutest sidebar_position: 310 sidebar_label: mannWhitneyUTest --- diff --git a/docs/en/sql-reference/aggregate-functions/reference/max.md b/docs/en/sql-reference/aggregate-functions/reference/max.md index 481e8a4a21b..13c185e2948 100644 --- a/docs/en/sql-reference/aggregate-functions/reference/max.md +++ b/docs/en/sql-reference/aggregate-functions/reference/max.md @@ -1,9 +1,9 @@ --- +slug: /en/sql-reference/aggregate-functions/reference/max sidebar_position: 3 +title: max --- -# max - Aggregate function that calculates the maximum across a group of values. Example: diff --git a/docs/en/sql-reference/aggregate-functions/reference/maxmap.md b/docs/en/sql-reference/aggregate-functions/reference/maxmap.md index 6f53d9f0ae0..ebb9d054476 100644 --- a/docs/en/sql-reference/aggregate-functions/reference/maxmap.md +++ b/docs/en/sql-reference/aggregate-functions/reference/maxmap.md @@ -1,4 +1,5 @@ --- +slug: /en/sql-reference/aggregate-functions/reference/maxmap sidebar_position: 143 --- diff --git a/docs/en/sql-reference/aggregate-functions/reference/meanztest.md b/docs/en/sql-reference/aggregate-functions/reference/meanztest.md index 0752df05818..6b43e2dfc2a 100644 --- a/docs/en/sql-reference/aggregate-functions/reference/meanztest.md +++ b/docs/en/sql-reference/aggregate-functions/reference/meanztest.md @@ -1,4 +1,5 @@ --- +slug: /en/sql-reference/aggregate-functions/reference/meanztest sidebar_position: 303 sidebar_label: meanZTest --- diff --git a/docs/en/sql-reference/aggregate-functions/reference/median.md b/docs/en/sql-reference/aggregate-functions/reference/median.md index 1c798f7bbf9..5ac3c6ef721 100644 --- a/docs/en/sql-reference/aggregate-functions/reference/median.md +++ b/docs/en/sql-reference/aggregate-functions/reference/median.md @@ -1,4 +1,5 @@ --- +slug: /en/sql-reference/aggregate-functions/reference/median sidebar_position: 212 --- diff --git a/docs/en/sql-reference/aggregate-functions/reference/min.md b/docs/en/sql-reference/aggregate-functions/reference/min.md index 7252494f5ca..cca515b76e8 100644 --- a/docs/en/sql-reference/aggregate-functions/reference/min.md +++ b/docs/en/sql-reference/aggregate-functions/reference/min.md @@ -1,9 +1,9 @@ --- +slug: /en/sql-reference/aggregate-functions/reference/min sidebar_position: 2 +title: min --- -## min - Aggregate function that calculates the minimum across a group of values. Example: diff --git a/docs/en/sql-reference/aggregate-functions/reference/minmap.md b/docs/en/sql-reference/aggregate-functions/reference/minmap.md index 61c7f4358b6..5436e1fc6a6 100644 --- a/docs/en/sql-reference/aggregate-functions/reference/minmap.md +++ b/docs/en/sql-reference/aggregate-functions/reference/minmap.md @@ -1,4 +1,5 @@ --- +slug: /en/sql-reference/aggregate-functions/reference/minmap sidebar_position: 142 --- diff --git a/docs/en/sql-reference/aggregate-functions/reference/quantile.md b/docs/en/sql-reference/aggregate-functions/reference/quantile.md index 99346a50b33..414574e00e6 100644 --- a/docs/en/sql-reference/aggregate-functions/reference/quantile.md +++ b/docs/en/sql-reference/aggregate-functions/reference/quantile.md @@ -1,4 +1,5 @@ --- +slug: /en/sql-reference/aggregate-functions/reference/quantile sidebar_position: 200 --- diff --git a/docs/en/sql-reference/aggregate-functions/reference/quantilebfloat16.md b/docs/en/sql-reference/aggregate-functions/reference/quantilebfloat16.md index 01fd1ea8fb2..6ca8c45d14e 100644 --- a/docs/en/sql-reference/aggregate-functions/reference/quantilebfloat16.md +++ b/docs/en/sql-reference/aggregate-functions/reference/quantilebfloat16.md @@ -1,4 +1,5 @@ --- +slug: /en/sql-reference/aggregate-functions/reference/quantilebfloat16 sidebar_position: 209 --- diff --git a/docs/en/sql-reference/aggregate-functions/reference/quantiledeterministic.md b/docs/en/sql-reference/aggregate-functions/reference/quantiledeterministic.md index 50e0f089b72..26826afd126 100644 --- a/docs/en/sql-reference/aggregate-functions/reference/quantiledeterministic.md +++ b/docs/en/sql-reference/aggregate-functions/reference/quantiledeterministic.md @@ -1,4 +1,5 @@ --- +slug: /en/sql-reference/aggregate-functions/reference/quantiledeterministic sidebar_position: 206 --- diff --git a/docs/en/sql-reference/aggregate-functions/reference/quantileexact.md b/docs/en/sql-reference/aggregate-functions/reference/quantileexact.md index ba2fe5c1eaa..04fe597a34e 100644 --- a/docs/en/sql-reference/aggregate-functions/reference/quantileexact.md +++ b/docs/en/sql-reference/aggregate-functions/reference/quantileexact.md @@ -1,4 +1,5 @@ --- +slug: /en/sql-reference/aggregate-functions/reference/quantileexact sidebar_position: 202 --- diff --git a/docs/en/sql-reference/aggregate-functions/reference/quantileexactweighted.md b/docs/en/sql-reference/aggregate-functions/reference/quantileexactweighted.md index 593ad3a0e4c..16e6438a3bf 100644 --- a/docs/en/sql-reference/aggregate-functions/reference/quantileexactweighted.md +++ b/docs/en/sql-reference/aggregate-functions/reference/quantileexactweighted.md @@ -1,4 +1,5 @@ --- +slug: /en/sql-reference/aggregate-functions/reference/quantileexactweighted sidebar_position: 203 --- diff --git a/docs/en/sql-reference/aggregate-functions/reference/quantiles.md b/docs/en/sql-reference/aggregate-functions/reference/quantiles.md index a38d3cb141e..5c9120fb8f4 100644 --- a/docs/en/sql-reference/aggregate-functions/reference/quantiles.md +++ b/docs/en/sql-reference/aggregate-functions/reference/quantiles.md @@ -1,4 +1,5 @@ --- +slug: /en/sql-reference/aggregate-functions/reference/quantiles sidebar_position: 201 --- diff --git a/docs/en/sql-reference/aggregate-functions/reference/quantiletdigest.md b/docs/en/sql-reference/aggregate-functions/reference/quantiletdigest.md index be06e562334..5da37a4832f 100644 --- a/docs/en/sql-reference/aggregate-functions/reference/quantiletdigest.md +++ b/docs/en/sql-reference/aggregate-functions/reference/quantiletdigest.md @@ -1,4 +1,5 @@ --- +slug: /en/sql-reference/aggregate-functions/reference/quantiletdigest sidebar_position: 207 --- diff --git a/docs/en/sql-reference/aggregate-functions/reference/quantiletdigestweighted.md b/docs/en/sql-reference/aggregate-functions/reference/quantiletdigestweighted.md index afde202dd15..e7abe08e39f 100644 --- a/docs/en/sql-reference/aggregate-functions/reference/quantiletdigestweighted.md +++ b/docs/en/sql-reference/aggregate-functions/reference/quantiletdigestweighted.md @@ -1,4 +1,5 @@ --- +slug: /en/sql-reference/aggregate-functions/reference/quantiletdigestweighted sidebar_position: 208 --- diff --git a/docs/en/sql-reference/aggregate-functions/reference/quantiletiming.md b/docs/en/sql-reference/aggregate-functions/reference/quantiletiming.md index 8bcdbbc23aa..ead381b4497 100644 --- a/docs/en/sql-reference/aggregate-functions/reference/quantiletiming.md +++ b/docs/en/sql-reference/aggregate-functions/reference/quantiletiming.md @@ -1,4 +1,5 @@ --- +slug: /en/sql-reference/aggregate-functions/reference/quantiletiming sidebar_position: 204 --- diff --git a/docs/en/sql-reference/aggregate-functions/reference/quantiletimingweighted.md b/docs/en/sql-reference/aggregate-functions/reference/quantiletimingweighted.md index 4b56423c7d9..f65c6b1e6ec 100644 --- a/docs/en/sql-reference/aggregate-functions/reference/quantiletimingweighted.md +++ b/docs/en/sql-reference/aggregate-functions/reference/quantiletimingweighted.md @@ -1,4 +1,5 @@ --- +slug: /en/sql-reference/aggregate-functions/reference/quantiletimingweighted sidebar_position: 205 --- diff --git a/docs/en/sql-reference/aggregate-functions/reference/rankCorr.md b/docs/en/sql-reference/aggregate-functions/reference/rankCorr.md index 3b8477340b6..231eb2b091b 100644 --- a/docs/en/sql-reference/aggregate-functions/reference/rankCorr.md +++ b/docs/en/sql-reference/aggregate-functions/reference/rankCorr.md @@ -1,4 +1,5 @@ --- +slug: /en/sql-reference/aggregate-functions/reference/rankCorr sidebar_position: 145 --- diff --git a/docs/en/sql-reference/aggregate-functions/reference/simplelinearregression.md b/docs/en/sql-reference/aggregate-functions/reference/simplelinearregression.md index b6f7a94acad..a6380b78a79 100644 --- a/docs/en/sql-reference/aggregate-functions/reference/simplelinearregression.md +++ b/docs/en/sql-reference/aggregate-functions/reference/simplelinearregression.md @@ -1,4 +1,5 @@ --- +slug: /en/sql-reference/aggregate-functions/reference/simplelinearregression sidebar_position: 220 --- diff --git a/docs/en/sql-reference/aggregate-functions/reference/skewpop.md b/docs/en/sql-reference/aggregate-functions/reference/skewpop.md index 87fa7e136f1..379fdcfa7c2 100644 --- a/docs/en/sql-reference/aggregate-functions/reference/skewpop.md +++ b/docs/en/sql-reference/aggregate-functions/reference/skewpop.md @@ -1,4 +1,5 @@ --- +slug: /en/sql-reference/aggregate-functions/reference/skewpop sidebar_position: 150 --- diff --git a/docs/en/sql-reference/aggregate-functions/reference/skewsamp.md b/docs/en/sql-reference/aggregate-functions/reference/skewsamp.md index cec74896deb..9e64b186db3 100644 --- a/docs/en/sql-reference/aggregate-functions/reference/skewsamp.md +++ b/docs/en/sql-reference/aggregate-functions/reference/skewsamp.md @@ -1,4 +1,5 @@ --- +slug: /en/sql-reference/aggregate-functions/reference/skewsamp sidebar_position: 151 --- diff --git a/docs/en/sql-reference/aggregate-functions/reference/sparkbar.md b/docs/en/sql-reference/aggregate-functions/reference/sparkbar.md index 16b3e2ddba0..2026d086375 100644 --- a/docs/en/sql-reference/aggregate-functions/reference/sparkbar.md +++ b/docs/en/sql-reference/aggregate-functions/reference/sparkbar.md @@ -1,4 +1,5 @@ --- +slug: /en/sql-reference/aggregate-functions/reference/sparkbar sidebar_position: 311 sidebar_label: sparkbar --- diff --git a/docs/en/sql-reference/aggregate-functions/reference/stddevpop.md b/docs/en/sql-reference/aggregate-functions/reference/stddevpop.md index 015c0871dda..7ad7e37e5c2 100644 --- a/docs/en/sql-reference/aggregate-functions/reference/stddevpop.md +++ b/docs/en/sql-reference/aggregate-functions/reference/stddevpop.md @@ -1,4 +1,5 @@ --- +slug: /en/sql-reference/aggregate-functions/reference/stddevpop sidebar_position: 30 --- diff --git a/docs/en/sql-reference/aggregate-functions/reference/stddevsamp.md b/docs/en/sql-reference/aggregate-functions/reference/stddevsamp.md index 50dfa10a0d7..068725c4991 100644 --- a/docs/en/sql-reference/aggregate-functions/reference/stddevsamp.md +++ b/docs/en/sql-reference/aggregate-functions/reference/stddevsamp.md @@ -1,4 +1,5 @@ --- +slug: /en/sql-reference/aggregate-functions/reference/stddevsamp sidebar_position: 31 --- diff --git a/docs/en/sql-reference/aggregate-functions/reference/stochasticlinearregression.md b/docs/en/sql-reference/aggregate-functions/reference/stochasticlinearregression.md index f4a79fd588b..8126a80e25e 100644 --- a/docs/en/sql-reference/aggregate-functions/reference/stochasticlinearregression.md +++ b/docs/en/sql-reference/aggregate-functions/reference/stochasticlinearregression.md @@ -1,4 +1,5 @@ --- +slug: /en/sql-reference/aggregate-functions/reference/stochasticlinearregression sidebar_position: 221 --- diff --git a/docs/en/sql-reference/aggregate-functions/reference/stochasticlogisticregression.md b/docs/en/sql-reference/aggregate-functions/reference/stochasticlogisticregression.md index ea1cff0ddf8..41eeb70c04f 100644 --- a/docs/en/sql-reference/aggregate-functions/reference/stochasticlogisticregression.md +++ b/docs/en/sql-reference/aggregate-functions/reference/stochasticlogisticregression.md @@ -1,4 +1,5 @@ --- +slug: /en/sql-reference/aggregate-functions/reference/stochasticlogisticregression sidebar_position: 222 --- diff --git a/docs/en/sql-reference/aggregate-functions/reference/studentttest.md b/docs/en/sql-reference/aggregate-functions/reference/studentttest.md index b4a86d15597..aec0635ed73 100644 --- a/docs/en/sql-reference/aggregate-functions/reference/studentttest.md +++ b/docs/en/sql-reference/aggregate-functions/reference/studentttest.md @@ -1,4 +1,5 @@ --- +slug: /en/sql-reference/aggregate-functions/reference/studentttest sidebar_position: 300 sidebar_label: studentTTest --- diff --git a/docs/en/sql-reference/aggregate-functions/reference/sum.md b/docs/en/sql-reference/aggregate-functions/reference/sum.md index 527dc15d5a7..320bb73f9ac 100644 --- a/docs/en/sql-reference/aggregate-functions/reference/sum.md +++ b/docs/en/sql-reference/aggregate-functions/reference/sum.md @@ -1,4 +1,5 @@ --- +slug: /en/sql-reference/aggregate-functions/reference/sum sidebar_position: 4 --- diff --git a/docs/en/sql-reference/aggregate-functions/reference/sumcount.md b/docs/en/sql-reference/aggregate-functions/reference/sumcount.md index c4a302b076a..c4c93a844b1 100644 --- a/docs/en/sql-reference/aggregate-functions/reference/sumcount.md +++ b/docs/en/sql-reference/aggregate-functions/reference/sumcount.md @@ -1,4 +1,5 @@ --- +slug: /en/sql-reference/aggregate-functions/reference/sumcount sidebar_position: 144 --- diff --git a/docs/en/sql-reference/aggregate-functions/reference/sumkahan.md b/docs/en/sql-reference/aggregate-functions/reference/sumkahan.md index cbcb3362b64..d58b0d89348 100644 --- a/docs/en/sql-reference/aggregate-functions/reference/sumkahan.md +++ b/docs/en/sql-reference/aggregate-functions/reference/sumkahan.md @@ -1,4 +1,5 @@ --- +slug: /en/sql-reference/aggregate-functions/reference/sumkahan sidebar_position: 145 --- diff --git a/docs/en/sql-reference/aggregate-functions/reference/summap.md b/docs/en/sql-reference/aggregate-functions/reference/summap.md index 88af347c88f..1acfde3783a 100644 --- a/docs/en/sql-reference/aggregate-functions/reference/summap.md +++ b/docs/en/sql-reference/aggregate-functions/reference/summap.md @@ -1,4 +1,5 @@ --- +slug: /en/sql-reference/aggregate-functions/reference/summap sidebar_position: 141 --- diff --git a/docs/en/sql-reference/aggregate-functions/reference/sumwithoverflow.md b/docs/en/sql-reference/aggregate-functions/reference/sumwithoverflow.md index 9f8b4b4f577..be66340285b 100644 --- a/docs/en/sql-reference/aggregate-functions/reference/sumwithoverflow.md +++ b/docs/en/sql-reference/aggregate-functions/reference/sumwithoverflow.md @@ -1,4 +1,5 @@ --- +slug: /en/sql-reference/aggregate-functions/reference/sumwithoverflow sidebar_position: 140 --- diff --git a/docs/en/sql-reference/aggregate-functions/reference/topk.md b/docs/en/sql-reference/aggregate-functions/reference/topk.md index d968ca22b16..658cddf1e6e 100644 --- a/docs/en/sql-reference/aggregate-functions/reference/topk.md +++ b/docs/en/sql-reference/aggregate-functions/reference/topk.md @@ -1,4 +1,5 @@ --- +slug: /en/sql-reference/aggregate-functions/reference/topk sidebar_position: 108 --- diff --git a/docs/en/sql-reference/aggregate-functions/reference/topkweighted.md b/docs/en/sql-reference/aggregate-functions/reference/topkweighted.md index daa5d05e99f..133de88a07e 100644 --- a/docs/en/sql-reference/aggregate-functions/reference/topkweighted.md +++ b/docs/en/sql-reference/aggregate-functions/reference/topkweighted.md @@ -1,4 +1,5 @@ --- +slug: /en/sql-reference/aggregate-functions/reference/topkweighted sidebar_position: 109 --- diff --git a/docs/en/sql-reference/aggregate-functions/reference/uniq.md b/docs/en/sql-reference/aggregate-functions/reference/uniq.md index 942ad73dfd9..d72311b3ede 100644 --- a/docs/en/sql-reference/aggregate-functions/reference/uniq.md +++ b/docs/en/sql-reference/aggregate-functions/reference/uniq.md @@ -1,4 +1,5 @@ --- +slug: /en/sql-reference/aggregate-functions/reference/uniq sidebar_position: 190 --- diff --git a/docs/en/sql-reference/aggregate-functions/reference/uniqcombined.md b/docs/en/sql-reference/aggregate-functions/reference/uniqcombined.md index 652032eb575..f1287c6ff9b 100644 --- a/docs/en/sql-reference/aggregate-functions/reference/uniqcombined.md +++ b/docs/en/sql-reference/aggregate-functions/reference/uniqcombined.md @@ -1,4 +1,5 @@ --- +slug: /en/sql-reference/aggregate-functions/reference/uniqcombined sidebar_position: 192 --- diff --git a/docs/en/sql-reference/aggregate-functions/reference/uniqcombined64.md b/docs/en/sql-reference/aggregate-functions/reference/uniqcombined64.md index d2aa51954fe..9f010da57f2 100644 --- a/docs/en/sql-reference/aggregate-functions/reference/uniqcombined64.md +++ b/docs/en/sql-reference/aggregate-functions/reference/uniqcombined64.md @@ -1,4 +1,5 @@ --- +slug: /en/sql-reference/aggregate-functions/reference/uniqcombined64 sidebar_position: 193 --- diff --git a/docs/en/sql-reference/aggregate-functions/reference/uniqexact.md b/docs/en/sql-reference/aggregate-functions/reference/uniqexact.md index 9b3da4e317a..901c631b756 100644 --- a/docs/en/sql-reference/aggregate-functions/reference/uniqexact.md +++ b/docs/en/sql-reference/aggregate-functions/reference/uniqexact.md @@ -1,4 +1,5 @@ --- +slug: /en/sql-reference/aggregate-functions/reference/uniqexact sidebar_position: 191 --- diff --git a/docs/en/sql-reference/aggregate-functions/reference/uniqhll12.md b/docs/en/sql-reference/aggregate-functions/reference/uniqhll12.md index 5514eb692b7..b598ad23df3 100644 --- a/docs/en/sql-reference/aggregate-functions/reference/uniqhll12.md +++ b/docs/en/sql-reference/aggregate-functions/reference/uniqhll12.md @@ -1,4 +1,5 @@ --- +slug: /en/sql-reference/aggregate-functions/reference/uniqhll12 sidebar_position: 194 --- diff --git a/docs/en/sql-reference/aggregate-functions/reference/uniqthetasketch.md b/docs/en/sql-reference/aggregate-functions/reference/uniqthetasketch.md index ab3661f07d9..e2adf672909 100644 --- a/docs/en/sql-reference/aggregate-functions/reference/uniqthetasketch.md +++ b/docs/en/sql-reference/aggregate-functions/reference/uniqthetasketch.md @@ -1,9 +1,9 @@ --- +slug: /en/sql-reference/aggregate-functions/reference/uniqthetasketch sidebar_position: 195 +title: uniqTheta --- -# uniqTheta - Calculates the approximate number of different argument values, using the [Theta Sketch Framework](https://datasketches.apache.org/docs/Theta/ThetaSketchFramework.html). ``` sql diff --git a/docs/en/sql-reference/aggregate-functions/reference/varpop.md b/docs/en/sql-reference/aggregate-functions/reference/varpop.md index ec0d2b51185..0a665c83e74 100644 --- a/docs/en/sql-reference/aggregate-functions/reference/varpop.md +++ b/docs/en/sql-reference/aggregate-functions/reference/varpop.md @@ -1,4 +1,5 @@ --- +slug: /en/sql-reference/aggregate-functions/reference/varpop sidebar_position: 32 --- diff --git a/docs/en/sql-reference/aggregate-functions/reference/varsamp.md b/docs/en/sql-reference/aggregate-functions/reference/varsamp.md index 9c0636ad1b4..76639d2d7a0 100644 --- a/docs/en/sql-reference/aggregate-functions/reference/varsamp.md +++ b/docs/en/sql-reference/aggregate-functions/reference/varsamp.md @@ -1,4 +1,5 @@ --- +slug: /en/sql-reference/aggregate-functions/reference/varsamp sidebar_position: 33 --- diff --git a/docs/en/sql-reference/aggregate-functions/reference/welchttest.md b/docs/en/sql-reference/aggregate-functions/reference/welchttest.md index 0a0278f970e..34f875e2138 100644 --- a/docs/en/sql-reference/aggregate-functions/reference/welchttest.md +++ b/docs/en/sql-reference/aggregate-functions/reference/welchttest.md @@ -1,4 +1,5 @@ --- +slug: /en/sql-reference/aggregate-functions/reference/welchttest sidebar_position: 301 sidebar_label: welchTTest --- diff --git a/docs/en/sql-reference/ansi.md b/docs/en/sql-reference/ansi.md index 7b307b5d1be..6ba7b16831e 100644 --- a/docs/en/sql-reference/ansi.md +++ b/docs/en/sql-reference/ansi.md @@ -1,10 +1,10 @@ --- +slug: /en/sql-reference/ansi sidebar_position: 40 sidebar_label: ANSI Compatibility +title: "ANSI SQL Compatibility of ClickHouse SQL Dialect" --- -# ANSI SQL Compatibility of ClickHouse SQL Dialect - :::note This article relies on Table 38, “Feature taxonomy and definition for mandatory features”, Annex F of [ISO/IEC CD 9075-2:2011](https://www.iso.org/obp/ui/#iso:std:iso-iec:9075:-2:ed-4:v1:en:sec:8). ::: diff --git a/docs/en/sql-reference/data-types/aggregatefunction.md b/docs/en/sql-reference/data-types/aggregatefunction.md index 6220c6b2d6f..128a332fe13 100644 --- a/docs/en/sql-reference/data-types/aggregatefunction.md +++ b/docs/en/sql-reference/data-types/aggregatefunction.md @@ -1,4 +1,5 @@ --- +slug: /en/sql-reference/data-types/aggregatefunction sidebar_position: 53 sidebar_label: AggregateFunction --- diff --git a/docs/en/sql-reference/data-types/array.md b/docs/en/sql-reference/data-types/array.md index c0e9d217479..707acbda760 100644 --- a/docs/en/sql-reference/data-types/array.md +++ b/docs/en/sql-reference/data-types/array.md @@ -1,4 +1,5 @@ --- +slug: /en/sql-reference/data-types/array sidebar_position: 52 sidebar_label: Array(T) --- diff --git a/docs/en/sql-reference/data-types/boolean.md b/docs/en/sql-reference/data-types/boolean.md index 02e257bec43..7288f7cb993 100644 --- a/docs/en/sql-reference/data-types/boolean.md +++ b/docs/en/sql-reference/data-types/boolean.md @@ -1,4 +1,5 @@ --- +slug: /en/sql-reference/data-types/boolean sidebar_position: 43 sidebar_label: Boolean --- diff --git a/docs/en/sql-reference/data-types/date.md b/docs/en/sql-reference/data-types/date.md index e6aabb7aa79..d43a00312dd 100644 --- a/docs/en/sql-reference/data-types/date.md +++ b/docs/en/sql-reference/data-types/date.md @@ -1,4 +1,5 @@ --- +slug: /en/sql-reference/data-types/date sidebar_position: 47 sidebar_label: Date --- diff --git a/docs/en/sql-reference/data-types/date32.md b/docs/en/sql-reference/data-types/date32.md index b5a82128e69..ff1a745785b 100644 --- a/docs/en/sql-reference/data-types/date32.md +++ b/docs/en/sql-reference/data-types/date32.md @@ -1,4 +1,5 @@ --- +slug: /en/sql-reference/data-types/date32 sidebar_position: 48 sidebar_label: Date32 --- diff --git a/docs/en/sql-reference/data-types/datetime.md b/docs/en/sql-reference/data-types/datetime.md index cc58c33115d..85587882e01 100644 --- a/docs/en/sql-reference/data-types/datetime.md +++ b/docs/en/sql-reference/data-types/datetime.md @@ -1,4 +1,5 @@ --- +slug: /en/sql-reference/data-types/datetime sidebar_position: 48 sidebar_label: DateTime --- diff --git a/docs/en/sql-reference/data-types/datetime64.md b/docs/en/sql-reference/data-types/datetime64.md index a5a520a978e..c7372e4b064 100644 --- a/docs/en/sql-reference/data-types/datetime64.md +++ b/docs/en/sql-reference/data-types/datetime64.md @@ -1,4 +1,5 @@ --- +slug: /en/sql-reference/data-types/datetime64 sidebar_position: 49 sidebar_label: DateTime64 --- @@ -18,7 +19,9 @@ DateTime64(precision, [timezone]) Internally, stores data as a number of ‘ticks’ since epoch start (1970-01-01 00:00:00 UTC) as Int64. The tick resolution is determined by the precision parameter. Additionally, the `DateTime64` type can store time zone that is the same for the entire column, that affects how the values of the `DateTime64` type values are displayed in text format and how the values specified as strings are parsed (‘2020-01-01 05:00:01.000’). The time zone is not stored in the rows of the table (or in resultset), but is stored in the column metadata. See details in [DateTime](../../sql-reference/data-types/datetime.md). -Supported range of values: \[1900-01-01 00:00:00, 2299-12-31 23:59:59.99999999\] (Note: The precision of the maximum value is 8). +Supported range of values: \[1900-01-01 00:00:00, 2299-12-31 23:59:59.99999999\] + +Note: The precision of the maximum value is 8. If the maximum precision of 9 digits (nanoseconds) is used, the maximum supported value is `2262-04-11 23:47:16` in UTC. ## Examples diff --git a/docs/en/sql-reference/data-types/decimal.md b/docs/en/sql-reference/data-types/decimal.md index ddb1c091c7c..c11d5c879d7 100644 --- a/docs/en/sql-reference/data-types/decimal.md +++ b/docs/en/sql-reference/data-types/decimal.md @@ -1,4 +1,5 @@ --- +slug: /en/sql-reference/data-types/decimal sidebar_position: 42 sidebar_label: Decimal --- diff --git a/docs/en/sql-reference/data-types/domains/index.md b/docs/en/sql-reference/data-types/domains/index.md index 50599db2f47..4b705b3dcc2 100644 --- a/docs/en/sql-reference/data-types/domains/index.md +++ b/docs/en/sql-reference/data-types/domains/index.md @@ -1,4 +1,5 @@ --- +slug: /en/sql-reference/data-types/domains/ sidebar_position: 56 sidebar_label: Domains --- diff --git a/docs/en/sql-reference/data-types/domains/ipv4.md b/docs/en/sql-reference/data-types/domains/ipv4.md index 00d3a03ee29..25cc609f81c 100644 --- a/docs/en/sql-reference/data-types/domains/ipv4.md +++ b/docs/en/sql-reference/data-types/domains/ipv4.md @@ -1,4 +1,5 @@ --- +slug: /en/sql-reference/data-types/domains/ipv4 sidebar_position: 59 sidebar_label: IPv4 --- diff --git a/docs/en/sql-reference/data-types/domains/ipv6.md b/docs/en/sql-reference/data-types/domains/ipv6.md index 3863b085a14..5cea411f999 100644 --- a/docs/en/sql-reference/data-types/domains/ipv6.md +++ b/docs/en/sql-reference/data-types/domains/ipv6.md @@ -1,4 +1,5 @@ --- +slug: /en/sql-reference/data-types/domains/ipv6 sidebar_position: 60 sidebar_label: IPv6 --- diff --git a/docs/en/sql-reference/data-types/enum.md b/docs/en/sql-reference/data-types/enum.md index 5b975c83844..1aa641616fa 100644 --- a/docs/en/sql-reference/data-types/enum.md +++ b/docs/en/sql-reference/data-types/enum.md @@ -1,4 +1,5 @@ --- +slug: /en/sql-reference/data-types/enum sidebar_position: 50 sidebar_label: Enum --- diff --git a/docs/en/sql-reference/data-types/fixedstring.md b/docs/en/sql-reference/data-types/fixedstring.md index 444e6cbcd47..f7a01d5c6d4 100644 --- a/docs/en/sql-reference/data-types/fixedstring.md +++ b/docs/en/sql-reference/data-types/fixedstring.md @@ -1,4 +1,5 @@ --- +slug: /en/sql-reference/data-types/fixedstring sidebar_position: 45 sidebar_label: FixedString(N) --- diff --git a/docs/en/sql-reference/data-types/float.md b/docs/en/sql-reference/data-types/float.md index fbf1088b190..8bf2e4007da 100644 --- a/docs/en/sql-reference/data-types/float.md +++ b/docs/en/sql-reference/data-types/float.md @@ -1,4 +1,5 @@ --- +slug: /en/sql-reference/data-types/float sidebar_position: 41 sidebar_label: Float32, Float64 --- diff --git a/docs/en/sql-reference/data-types/geo.md b/docs/en/sql-reference/data-types/geo.md index 22fc56dbcf5..e5df01bab86 100644 --- a/docs/en/sql-reference/data-types/geo.md +++ b/docs/en/sql-reference/data-types/geo.md @@ -1,4 +1,5 @@ --- +slug: /en/sql-reference/data-types/geo sidebar_position: 62 sidebar_label: Geo --- diff --git a/docs/en/sql-reference/data-types/index.md b/docs/en/sql-reference/data-types/index.md index ca26b89ec87..7556481cf54 100644 --- a/docs/en/sql-reference/data-types/index.md +++ b/docs/en/sql-reference/data-types/index.md @@ -1,4 +1,5 @@ --- +slug: /en/sql-reference/data-types/ sidebar_label: Data Types sidebar_position: 37 --- diff --git a/docs/en/sql-reference/data-types/int-uint.md b/docs/en/sql-reference/data-types/int-uint.md index c63f6780154..030f749aa90 100644 --- a/docs/en/sql-reference/data-types/int-uint.md +++ b/docs/en/sql-reference/data-types/int-uint.md @@ -1,4 +1,5 @@ --- +slug: /en/sql-reference/data-types/int-uint sidebar_position: 40 sidebar_label: UInt8, UInt16, UInt32, UInt64, UInt128, UInt256, Int8, Int16, Int32, Int64, Int128, Int256 --- diff --git a/docs/en/sql-reference/data-types/json.md b/docs/en/sql-reference/data-types/json.md index 718e5279980..ab0f6115a41 100644 --- a/docs/en/sql-reference/data-types/json.md +++ b/docs/en/sql-reference/data-types/json.md @@ -1,4 +1,5 @@ --- +slug: /en/sql-reference/data-types/json sidebar_position: 54 sidebar_label: JSON --- diff --git a/docs/en/sql-reference/data-types/lowcardinality.md b/docs/en/sql-reference/data-types/lowcardinality.md index 3bfe7b8a14e..71f82fc2587 100644 --- a/docs/en/sql-reference/data-types/lowcardinality.md +++ b/docs/en/sql-reference/data-types/lowcardinality.md @@ -1,4 +1,5 @@ --- +slug: /en/sql-reference/data-types/lowcardinality sidebar_position: 51 sidebar_label: LowCardinality --- diff --git a/docs/en/sql-reference/data-types/map.md b/docs/en/sql-reference/data-types/map.md index 65a0f9cbc52..54f6374929d 100644 --- a/docs/en/sql-reference/data-types/map.md +++ b/docs/en/sql-reference/data-types/map.md @@ -1,4 +1,5 @@ --- +slug: /en/sql-reference/data-types/map sidebar_position: 65 sidebar_label: Map(key, value) --- diff --git a/docs/en/sql-reference/data-types/multiword-types.md b/docs/en/sql-reference/data-types/multiword-types.md index 913f5325e6f..728bbcc1d9b 100644 --- a/docs/en/sql-reference/data-types/multiword-types.md +++ b/docs/en/sql-reference/data-types/multiword-types.md @@ -1,4 +1,5 @@ --- +slug: /en/sql-reference/data-types/multiword-types sidebar_position: 61 sidebar_label: Multiword Type Names --- diff --git a/docs/en/sql-reference/data-types/nested-data-structures/index.md b/docs/en/sql-reference/data-types/nested-data-structures/index.md index 90150f3acc2..001702f7904 100644 --- a/docs/en/sql-reference/data-types/nested-data-structures/index.md +++ b/docs/en/sql-reference/data-types/nested-data-structures/index.md @@ -1,4 +1,5 @@ --- +slug: /en/sql-reference/data-types/nested-data-structures/ sidebar_label: Nested Data Structures sidebar_position: 54 --- diff --git a/docs/en/sql-reference/data-types/nested-data-structures/nested.md b/docs/en/sql-reference/data-types/nested-data-structures/nested.md index b14025cdfaf..c85aca3e956 100644 --- a/docs/en/sql-reference/data-types/nested-data-structures/nested.md +++ b/docs/en/sql-reference/data-types/nested-data-structures/nested.md @@ -1,4 +1,5 @@ --- +slug: /en/sql-reference/data-types/nested-data-structures/nested sidebar_position: 57 sidebar_label: Nested(Name1 Type1, Name2 Type2, ...) --- diff --git a/docs/en/sql-reference/data-types/nullable.md b/docs/en/sql-reference/data-types/nullable.md index b9a75274c08..b4e803c28e5 100644 --- a/docs/en/sql-reference/data-types/nullable.md +++ b/docs/en/sql-reference/data-types/nullable.md @@ -1,4 +1,5 @@ --- +slug: /en/sql-reference/data-types/nullable sidebar_position: 55 sidebar_label: Nullable --- diff --git a/docs/en/sql-reference/data-types/simpleaggregatefunction.md b/docs/en/sql-reference/data-types/simpleaggregatefunction.md index 069e2e68671..1464b739224 100644 --- a/docs/en/sql-reference/data-types/simpleaggregatefunction.md +++ b/docs/en/sql-reference/data-types/simpleaggregatefunction.md @@ -1,3 +1,6 @@ +--- +slug: /en/sql-reference/data-types/simpleaggregatefunction +--- # SimpleAggregateFunction `SimpleAggregateFunction(name, types_of_arguments…)` data type stores current value of the aggregate function, and does not store its full state as [`AggregateFunction`](../../sql-reference/data-types/aggregatefunction.md) does. This optimization can be applied to functions for which the following property holds: the result of applying a function `f` to a row set `S1 UNION ALL S2` can be obtained by applying `f` to parts of the row set separately, and then again applying `f` to the results: `f(S1 UNION ALL S2) = f(f(S1) UNION ALL f(S2))`. This property guarantees that partial aggregation results are enough to compute the combined one, so we do not have to store and process any extra data. diff --git a/docs/en/sql-reference/data-types/special-data-types/expression.md b/docs/en/sql-reference/data-types/special-data-types/expression.md index 0b9265eaa6e..64182d2c8ee 100644 --- a/docs/en/sql-reference/data-types/special-data-types/expression.md +++ b/docs/en/sql-reference/data-types/special-data-types/expression.md @@ -1,4 +1,5 @@ --- +slug: /en/sql-reference/data-types/special-data-types/expression sidebar_position: 58 sidebar_label: Expression --- diff --git a/docs/en/sql-reference/data-types/special-data-types/index.md b/docs/en/sql-reference/data-types/special-data-types/index.md index b2e5251d748..d30cd66da02 100644 --- a/docs/en/sql-reference/data-types/special-data-types/index.md +++ b/docs/en/sql-reference/data-types/special-data-types/index.md @@ -1,4 +1,5 @@ --- +slug: /en/sql-reference/data-types/special-data-types/ sidebar_label: Special Data Types sidebar_position: 55 --- diff --git a/docs/en/sql-reference/data-types/special-data-types/interval.md b/docs/en/sql-reference/data-types/special-data-types/interval.md index 6c2349d492c..5169bc646c9 100644 --- a/docs/en/sql-reference/data-types/special-data-types/interval.md +++ b/docs/en/sql-reference/data-types/special-data-types/interval.md @@ -1,4 +1,5 @@ --- +slug: /en/sql-reference/data-types/special-data-types/interval sidebar_position: 61 sidebar_label: Interval --- diff --git a/docs/en/sql-reference/data-types/special-data-types/nothing.md b/docs/en/sql-reference/data-types/special-data-types/nothing.md index d3164eab941..ada796f595c 100644 --- a/docs/en/sql-reference/data-types/special-data-types/nothing.md +++ b/docs/en/sql-reference/data-types/special-data-types/nothing.md @@ -1,4 +1,5 @@ --- +slug: /en/sql-reference/data-types/special-data-types/nothing sidebar_position: 60 sidebar_label: Nothing --- diff --git a/docs/en/sql-reference/data-types/special-data-types/set.md b/docs/en/sql-reference/data-types/special-data-types/set.md index 1490fd311ea..7ae41079a55 100644 --- a/docs/en/sql-reference/data-types/special-data-types/set.md +++ b/docs/en/sql-reference/data-types/special-data-types/set.md @@ -1,4 +1,5 @@ --- +slug: /en/sql-reference/data-types/special-data-types/set sidebar_position: 59 sidebar_label: Set --- diff --git a/docs/en/sql-reference/data-types/string.md b/docs/en/sql-reference/data-types/string.md index db8a399df9d..57de19334b8 100644 --- a/docs/en/sql-reference/data-types/string.md +++ b/docs/en/sql-reference/data-types/string.md @@ -1,4 +1,5 @@ --- +slug: /en/sql-reference/data-types/string sidebar_position: 44 sidebar_label: String --- diff --git a/docs/en/sql-reference/data-types/tuple.md b/docs/en/sql-reference/data-types/tuple.md index 159fe9b5ee4..97b26f42603 100644 --- a/docs/en/sql-reference/data-types/tuple.md +++ b/docs/en/sql-reference/data-types/tuple.md @@ -1,4 +1,5 @@ --- +slug: /en/sql-reference/data-types/tuple sidebar_position: 54 sidebar_label: Tuple(T1, T2, ...) --- diff --git a/docs/en/sql-reference/data-types/uuid.md b/docs/en/sql-reference/data-types/uuid.md index 75485561f96..e329c5d4443 100644 --- a/docs/en/sql-reference/data-types/uuid.md +++ b/docs/en/sql-reference/data-types/uuid.md @@ -1,4 +1,5 @@ --- +slug: /en/sql-reference/data-types/uuid sidebar_position: 46 sidebar_label: UUID --- diff --git a/docs/en/sql-reference/dictionaries/external-dictionaries/external-dicts-dict-hierarchical.md b/docs/en/sql-reference/dictionaries/external-dictionaries/external-dicts-dict-hierarchical.md index 6df7cf231b9..25eb05950b3 100644 --- a/docs/en/sql-reference/dictionaries/external-dictionaries/external-dicts-dict-hierarchical.md +++ b/docs/en/sql-reference/dictionaries/external-dictionaries/external-dicts-dict-hierarchical.md @@ -1,4 +1,5 @@ --- +slug: /en/sql-reference/dictionaries/external-dictionaries/external-dicts-dict-hierarchical sidebar_position: 45 sidebar_label: Hierarchical dictionaries --- diff --git a/docs/en/sql-reference/dictionaries/external-dictionaries/external-dicts-dict-layout.md b/docs/en/sql-reference/dictionaries/external-dictionaries/external-dicts-dict-layout.md index 887d9ee4612..d1e87801c4a 100644 --- a/docs/en/sql-reference/dictionaries/external-dictionaries/external-dicts-dict-layout.md +++ b/docs/en/sql-reference/dictionaries/external-dictionaries/external-dicts-dict-layout.md @@ -1,4 +1,5 @@ --- +slug: /en/sql-reference/dictionaries/external-dictionaries/external-dicts-dict-layout sidebar_position: 41 sidebar_label: Storing Dictionaries in Memory --- diff --git a/docs/en/sql-reference/dictionaries/external-dictionaries/external-dicts-dict-lifetime.md b/docs/en/sql-reference/dictionaries/external-dictionaries/external-dicts-dict-lifetime.md index ab83017f263..433300eefa4 100644 --- a/docs/en/sql-reference/dictionaries/external-dictionaries/external-dicts-dict-lifetime.md +++ b/docs/en/sql-reference/dictionaries/external-dictionaries/external-dicts-dict-lifetime.md @@ -1,4 +1,5 @@ --- +slug: /en/sql-reference/dictionaries/external-dictionaries/external-dicts-dict-lifetime sidebar_position: 42 sidebar_label: Dictionary Updates --- diff --git a/docs/en/sql-reference/dictionaries/external-dictionaries/external-dicts-dict-polygon.md b/docs/en/sql-reference/dictionaries/external-dictionaries/external-dicts-dict-polygon.md index 7fcea84b55d..ef1cc63aaa4 100644 --- a/docs/en/sql-reference/dictionaries/external-dictionaries/external-dicts-dict-polygon.md +++ b/docs/en/sql-reference/dictionaries/external-dictionaries/external-dicts-dict-polygon.md @@ -1,4 +1,5 @@ --- +slug: /en/sql-reference/dictionaries/external-dictionaries/external-dicts-dict-polygon sidebar_position: 46 sidebar_label: Polygon Dictionaries With Grids --- diff --git a/docs/en/sql-reference/dictionaries/external-dictionaries/external-dicts-dict-sources.md b/docs/en/sql-reference/dictionaries/external-dictionaries/external-dicts-dict-sources.md index 280dc1f54f4..d457f327e7a 100644 --- a/docs/en/sql-reference/dictionaries/external-dictionaries/external-dicts-dict-sources.md +++ b/docs/en/sql-reference/dictionaries/external-dictionaries/external-dicts-dict-sources.md @@ -1,4 +1,5 @@ --- +slug: /en/sql-reference/dictionaries/external-dictionaries/external-dicts-dict-sources sidebar_position: 43 sidebar_label: Sources of External Dictionaries --- diff --git a/docs/en/sql-reference/dictionaries/external-dictionaries/external-dicts-dict-structure.md b/docs/en/sql-reference/dictionaries/external-dictionaries/external-dicts-dict-structure.md index b159401ea45..895743c3b50 100644 --- a/docs/en/sql-reference/dictionaries/external-dictionaries/external-dicts-dict-structure.md +++ b/docs/en/sql-reference/dictionaries/external-dictionaries/external-dicts-dict-structure.md @@ -1,4 +1,5 @@ --- +slug: /en/sql-reference/dictionaries/external-dictionaries/external-dicts-dict-structure sidebar_position: 44 sidebar_label: Dictionary Key and Fields --- diff --git a/docs/en/sql-reference/dictionaries/external-dictionaries/external-dicts-dict.md b/docs/en/sql-reference/dictionaries/external-dictionaries/external-dicts-dict.md index bb4fcdab51a..5c237eea8c7 100644 --- a/docs/en/sql-reference/dictionaries/external-dictionaries/external-dicts-dict.md +++ b/docs/en/sql-reference/dictionaries/external-dictionaries/external-dicts-dict.md @@ -1,4 +1,5 @@ --- +slug: /en/sql-reference/dictionaries/external-dictionaries/external-dicts-dict sidebar_position: 40 sidebar_label: Configuring an External Dictionary --- diff --git a/docs/en/sql-reference/dictionaries/external-dictionaries/external-dicts.md b/docs/en/sql-reference/dictionaries/external-dictionaries/external-dicts.md index 92c73ca5978..095fb6360cd 100644 --- a/docs/en/sql-reference/dictionaries/external-dictionaries/external-dicts.md +++ b/docs/en/sql-reference/dictionaries/external-dictionaries/external-dicts.md @@ -1,4 +1,5 @@ --- +slug: /en/sql-reference/dictionaries/external-dictionaries/external-dicts sidebar_position: 39 sidebar_label: General Description --- diff --git a/docs/en/sql-reference/dictionaries/index.md b/docs/en/sql-reference/dictionaries/index.md index 2c96cc1916e..eccd1215e30 100644 --- a/docs/en/sql-reference/dictionaries/index.md +++ b/docs/en/sql-reference/dictionaries/index.md @@ -1,4 +1,5 @@ --- +slug: /en/sql-reference/dictionaries/ sidebar_label: Dictionaries sidebar_position: 35 --- diff --git a/docs/en/sql-reference/dictionaries/internal-dicts.md b/docs/en/sql-reference/dictionaries/internal-dicts.md index 3dd13631f08..dbc12a576f7 100644 --- a/docs/en/sql-reference/dictionaries/internal-dicts.md +++ b/docs/en/sql-reference/dictionaries/internal-dicts.md @@ -1,4 +1,5 @@ --- +slug: /en/sql-reference/dictionaries/internal-dicts sidebar_position: 39 sidebar_label: Internal Dictionaries --- diff --git a/docs/en/sql-reference/distributed-ddl.md b/docs/en/sql-reference/distributed-ddl.md index e12be4206a3..ff5155391be 100644 --- a/docs/en/sql-reference/distributed-ddl.md +++ b/docs/en/sql-reference/distributed-ddl.md @@ -1,4 +1,5 @@ --- +slug: /en/sql-reference/distributed-ddl sidebar_position: 3 sidebar_label: Distributed DDL --- diff --git a/docs/en/sql-reference/functions/arithmetic-functions.md b/docs/en/sql-reference/functions/arithmetic-functions.md index 45df5f7f227..9059facb0c6 100644 --- a/docs/en/sql-reference/functions/arithmetic-functions.md +++ b/docs/en/sql-reference/functions/arithmetic-functions.md @@ -1,4 +1,5 @@ --- +slug: /en/sql-reference/functions/arithmetic-functions sidebar_position: 34 sidebar_label: Arithmetic --- diff --git a/docs/en/sql-reference/functions/array-functions.md b/docs/en/sql-reference/functions/array-functions.md index e76317b9e47..c044b972754 100644 --- a/docs/en/sql-reference/functions/array-functions.md +++ b/docs/en/sql-reference/functions/array-functions.md @@ -1,4 +1,5 @@ --- +slug: /en/sql-reference/functions/array-functions sidebar_position: 35 sidebar_label: Arrays --- diff --git a/docs/en/sql-reference/functions/array-join.md b/docs/en/sql-reference/functions/array-join.md index 0eb1d1b431e..d6256ba2dc5 100644 --- a/docs/en/sql-reference/functions/array-join.md +++ b/docs/en/sql-reference/functions/array-join.md @@ -1,4 +1,5 @@ --- +slug: /en/sql-reference/functions/array-join sidebar_position: 61 sidebar_label: arrayJoin --- diff --git a/docs/en/sql-reference/functions/bit-functions.md b/docs/en/sql-reference/functions/bit-functions.md index 1fde738476d..1648ce35056 100644 --- a/docs/en/sql-reference/functions/bit-functions.md +++ b/docs/en/sql-reference/functions/bit-functions.md @@ -1,4 +1,5 @@ --- +slug: /en/sql-reference/functions/bit-functions sidebar_position: 48 sidebar_label: Bit --- diff --git a/docs/en/sql-reference/functions/bitmap-functions.md b/docs/en/sql-reference/functions/bitmap-functions.md index 3250c10ff84..b760108402b 100644 --- a/docs/en/sql-reference/functions/bitmap-functions.md +++ b/docs/en/sql-reference/functions/bitmap-functions.md @@ -1,4 +1,5 @@ --- +slug: /en/sql-reference/functions/bitmap-functions sidebar_position: 49 sidebar_label: Bitmap --- diff --git a/docs/en/sql-reference/functions/comparison-functions.md b/docs/en/sql-reference/functions/comparison-functions.md index f3315fb08d9..5da010cdb84 100644 --- a/docs/en/sql-reference/functions/comparison-functions.md +++ b/docs/en/sql-reference/functions/comparison-functions.md @@ -1,4 +1,5 @@ --- +slug: /en/sql-reference/functions/comparison-functions sidebar_position: 36 sidebar_label: Comparison --- diff --git a/docs/en/sql-reference/functions/conditional-functions.md b/docs/en/sql-reference/functions/conditional-functions.md index 0e81a2159a3..ff1ac237025 100644 --- a/docs/en/sql-reference/functions/conditional-functions.md +++ b/docs/en/sql-reference/functions/conditional-functions.md @@ -1,4 +1,5 @@ --- +slug: /en/sql-reference/functions/conditional-functions sidebar_position: 43 sidebar_label: 'Conditional ' --- diff --git a/docs/en/sql-reference/functions/date-time-functions.md b/docs/en/sql-reference/functions/date-time-functions.md index 7e2412340e7..3601208bb63 100644 --- a/docs/en/sql-reference/functions/date-time-functions.md +++ b/docs/en/sql-reference/functions/date-time-functions.md @@ -1,4 +1,5 @@ --- +slug: /en/sql-reference/functions/date-time-functions sidebar_position: 39 sidebar_label: Dates and Times --- @@ -266,8 +267,14 @@ Result: └────────────────┘ ``` -:::note -The return type `toStartOf*` functions described below is `Date` or `DateTime`. Though these functions can take `DateTime64` as an argument, passing them a `DateTime64` that is out of the normal range (years 1900 - 2299) will give an incorrect result. +:::Attention +The return type of `toStartOf*`, `toLastDayOfMonth`, `toMonday` functions described below is `Date` or `DateTime`. +Though these functions can take values of the extended types `Date32` and `DateTime64` as an argument, passing them a time outside the normal range (year 1970 to 2149 for `Date` / 2106 for `DateTime`) will produce wrong results. +In case argument is out of normal range: + * If the argument is smaller than 1970, the result will be calculated from the argument `1970-01-01 (00:00:00)` instead. + * If the return type is `DateTime` and the argument is larger than `2106-02-07 08:28:15`, the result will be calculated from the argument `2106-02-07 08:28:15` instead. + * If the return type is `Date` and the argument is larger than `2149-06-06`, the result will be calculated from the argument `2149-06-06` instead. + * If `toLastDayOfMonth` is called with an argument greater then `2149-05-31`, the result will be calculated from the argument `2149-05-31` instead. ::: ## toStartOfYear @@ -291,20 +298,23 @@ Returns the date. Rounds down a date or date with time to the first day of the month. Returns the date. -:::note -The behavior of parsing incorrect dates is implementation specific. ClickHouse may return zero date, throw an exception or do “natural” overflow. -::: +## toLastDayOfMonth + +Rounds up a date or date with time to the last day of the month. +Returns the date. ## toMonday Rounds down a date or date with time to the nearest Monday. +As a special case, date arguments `1970-01-01`, `1970-01-02`, `1970-01-03` and `1970-01-04` return date `1970-01-01`. Returns the date. ## toStartOfWeek(t\[,mode\]) Rounds down a date or date with time to the nearest Sunday or Monday by mode. Returns the date. -The mode argument works exactly like the mode argument to toWeek(). For the single-argument syntax, a mode value of 0 is used. +As a special case, date arguments `1970-01-01`, `1970-01-02`, `1970-01-03` and `1970-01-04` (and `1970-01-05` if `mode` is `1`) return date `1970-01-01`. +The `mode` argument works exactly like the mode argument to toWeek(). For the single-argument syntax, a mode value of 0 is used. ## toStartOfDay @@ -884,12 +894,85 @@ Result: └──────────────────────┘ ``` +## now64 + +Returns the current date and time with sub-second precision at the moment of query analysis. The function is a constant expression. + +**Syntax** + +``` sql +now64([scale], [timezone]) +``` + +**Arguments** + +- `scale` - Tick size (precision): 10-precision seconds. Valid range: [ 0 : 9 ]. Typically are used - 3 (default) (milliseconds), 6 (microseconds), 9 (nanoseconds). +- `timezone` — [Timezone name](../../operations/server-configuration-parameters/settings.md#server_configuration_parameters-timezone) for the returned value (optional). [String](../../sql-reference/data-types/string.md). + +**Returned value** + +- Current date and time with sub-second precision. + +Type: [Datetime64](../../sql-reference/data-types/datetime64.md). + +**Example** + +``` sql +SELECT now64(), now64(9, 'Asia/Istanbul'); +``` + +Result: + +``` text +┌─────────────────now64()─┬─────now64(9, 'Asia/Istanbul')─┐ +│ 2022-08-21 19:34:26.196 │ 2022-08-21 22:34:26.196542766 │ +└─────────────────────────┴───────────────────────────────┘ +``` + ## nowInBlock -Returns the current date and time at the moment of processing of each block of data. In contrast to the function `now`, it is not a constant expression, and the returned value will be different in different blocks for long-running queries. +Returns the current date and time at the moment of processing of each block of data. In contrast to the function [now](#now), it is not a constant expression, and the returned value will be different in different blocks for long-running queries. It makes sense to use this function to generate the current time in long-running INSERT SELECT queries. +**Syntax** + +``` sql +nowInBlock([timezone]) +``` + +**Arguments** + +- `timezone` — [Timezone name](../../operations/server-configuration-parameters/settings.md#server_configuration_parameters-timezone) for the returned value (optional). [String](../../sql-reference/data-types/string.md). + +**Returned value** + +- Current date and time at the moment of processing of each block of data. + +Type: [Datetime](../../sql-reference/data-types/datetime.md). + +**Example** + +``` sql +SELECT + now(), + nowInBlock(), + sleep(1) +FROM numbers(3) +SETTINGS max_block_size = 1 +FORMAT PrettyCompactMonoBlock +``` + +Result: + +``` text +┌───────────────now()─┬────────nowInBlock()─┬─sleep(1)─┐ +│ 2022-08-21 19:41:19 │ 2022-08-21 19:41:19 │ 0 │ +│ 2022-08-21 19:41:19 │ 2022-08-21 19:41:20 │ 0 │ +│ 2022-08-21 19:41:19 │ 2022-08-21 19:41:21 │ 0 │ +└─────────────────────┴─────────────────────┴──────────┘ +``` + ## today Accepts zero arguments and returns the current date at one of the moments of query analysis. @@ -966,7 +1049,7 @@ Example: SELECT timeSlots(toDateTime('2012-01-01 12:20:00'), toUInt32(600)); SELECT timeSlots(toDateTime('1980-12-12 21:01:02', 'UTC'), toUInt32(600), 299); SELECT timeSlots(toDateTime64('1980-12-12 21:01:02.1234', 4, 'UTC'), toDecimal64(600.1, 1), toDecimal64(299, 0)); -``` +``` ``` text ┌─timeSlots(toDateTime('2012-01-01 12:20:00'), toUInt32(600))─┐ │ ['2012-01-01 12:00:00','2012-01-01 12:30:00'] │ @@ -1068,7 +1151,10 @@ Query: ```sql WITH toDateTime('2021-04-14 11:22:33') AS date_value -SELECT dateName('year', date_value), dateName('month', date_value), dateName('day', date_value); +SELECT + dateName('year', date_value), + dateName('month', date_value), + dateName('day', date_value); ``` Result: @@ -1076,7 +1162,44 @@ Result: ```text ┌─dateName('year', date_value)─┬─dateName('month', date_value)─┬─dateName('day', date_value)─┐ │ 2021 │ April │ 14 │ -└──────────────────────────────┴───────────────────────────────┴───────────────────────────── +└──────────────────────────────┴───────────────────────────────┴─────────────────────────────┘ +``` + +## monthName + +Returns name of the month. + +**Syntax** + +``` sql +monthName(date) +``` + +**Arguments** + +- `date` — Date or date with time. [Date](../../sql-reference/data-types/date.md) or [DateTime](../../sql-reference/data-types/datetime.md). + +**Returned value** + +- The name of the month. + +Type: [String](../../sql-reference/data-types/string.md#string) + +**Example** + +Query: + +```sql +WITH toDateTime('2021-04-14 11:22:33') AS date_value +SELECT monthName(date_value); +``` + +Result: + +```text +┌─monthName(date_value)─┐ +│ April │ +└───────────────────────┘ ``` ## FROM\_UNIXTIME diff --git a/docs/en/sql-reference/functions/distance-functions.md b/docs/en/sql-reference/functions/distance-functions.md index 4af264f27ca..88d6c2f3e17 100644 --- a/docs/en/sql-reference/functions/distance-functions.md +++ b/docs/en/sql-reference/functions/distance-functions.md @@ -1,3 +1,6 @@ +--- +slug: /en/sql-reference/functions/distance-functions +--- # Distance functions ## L1Norm diff --git a/docs/en/sql-reference/functions/encoding-functions.md b/docs/en/sql-reference/functions/encoding-functions.md index 4ee71267a09..eb357df19db 100644 --- a/docs/en/sql-reference/functions/encoding-functions.md +++ b/docs/en/sql-reference/functions/encoding-functions.md @@ -1,4 +1,5 @@ --- +slug: /en/sql-reference/functions/encoding-functions sidebar_position: 52 sidebar_label: Encoding --- diff --git a/docs/en/sql-reference/functions/encryption-functions.md b/docs/en/sql-reference/functions/encryption-functions.md index 75f6cf18766..64e2a9128dc 100644 --- a/docs/en/sql-reference/functions/encryption-functions.md +++ b/docs/en/sql-reference/functions/encryption-functions.md @@ -1,4 +1,5 @@ --- +slug: /en/sql-reference/functions/encryption-functions sidebar_position: 67 sidebar_label: Encryption --- diff --git a/docs/en/sql-reference/functions/ext-dict-functions.md b/docs/en/sql-reference/functions/ext-dict-functions.md index a62445f82d1..728e26d6958 100644 --- a/docs/en/sql-reference/functions/ext-dict-functions.md +++ b/docs/en/sql-reference/functions/ext-dict-functions.md @@ -1,4 +1,5 @@ --- +slug: /en/sql-reference/functions/ext-dict-functions sidebar_position: 58 sidebar_label: External Dictionaries --- diff --git a/docs/en/sql-reference/functions/files.md b/docs/en/sql-reference/functions/files.md index b17981ed0e0..598dce86044 100644 --- a/docs/en/sql-reference/functions/files.md +++ b/docs/en/sql-reference/functions/files.md @@ -1,4 +1,5 @@ --- +slug: /en/sql-reference/functions/files sidebar_position: 43 sidebar_label: Files --- diff --git a/docs/en/sql-reference/functions/functions-for-nulls.md b/docs/en/sql-reference/functions/functions-for-nulls.md index 2c86aa403cd..1338806705f 100644 --- a/docs/en/sql-reference/functions/functions-for-nulls.md +++ b/docs/en/sql-reference/functions/functions-for-nulls.md @@ -1,4 +1,5 @@ --- +slug: /en/sql-reference/functions/functions-for-nulls sidebar_position: 63 sidebar_label: Nullable --- diff --git a/docs/en/sql-reference/functions/geo/coordinates.md b/docs/en/sql-reference/functions/geo/coordinates.md index 6cc8137a2ff..9c68549283e 100644 --- a/docs/en/sql-reference/functions/geo/coordinates.md +++ b/docs/en/sql-reference/functions/geo/coordinates.md @@ -1,4 +1,5 @@ --- +slug: /en/sql-reference/functions/geo/coordinates sidebar_label: Geographical Coordinates sidebar_position: 62 --- diff --git a/docs/en/sql-reference/functions/geo/geohash.md b/docs/en/sql-reference/functions/geo/geohash.md index a4f0328d0d0..d2a722cf5ab 100644 --- a/docs/en/sql-reference/functions/geo/geohash.md +++ b/docs/en/sql-reference/functions/geo/geohash.md @@ -1,4 +1,5 @@ --- +slug: /en/sql-reference/functions/geo/geohash sidebar_label: Geohash --- diff --git a/docs/en/sql-reference/functions/geo/h3.md b/docs/en/sql-reference/functions/geo/h3.md index 37a1a5857ea..d3b078cf05f 100644 --- a/docs/en/sql-reference/functions/geo/h3.md +++ b/docs/en/sql-reference/functions/geo/h3.md @@ -1,4 +1,5 @@ --- +slug: /en/sql-reference/functions/geo/h3 sidebar_label: H3 Indexes --- diff --git a/docs/en/sql-reference/functions/geo/index.md b/docs/en/sql-reference/functions/geo/index.md index c0162cb5b63..be4071be1ee 100644 --- a/docs/en/sql-reference/functions/geo/index.md +++ b/docs/en/sql-reference/functions/geo/index.md @@ -1,4 +1,5 @@ --- +slug: /en/sql-reference/functions/geo/ sidebar_label: Geo sidebar_position: 62 --- diff --git a/docs/en/sql-reference/functions/geo/s2.md b/docs/en/sql-reference/functions/geo/s2.md index 00b75ad42a7..ed3c66a0f6f 100644 --- a/docs/en/sql-reference/functions/geo/s2.md +++ b/docs/en/sql-reference/functions/geo/s2.md @@ -1,4 +1,5 @@ --- +slug: /en/sql-reference/functions/geo/s2 sidebar_label: S2 Geometry --- diff --git a/docs/en/sql-reference/functions/hash-functions.md b/docs/en/sql-reference/functions/hash-functions.md index ed48a590cd7..ad66047a92e 100644 --- a/docs/en/sql-reference/functions/hash-functions.md +++ b/docs/en/sql-reference/functions/hash-functions.md @@ -1,4 +1,5 @@ --- +slug: /en/sql-reference/functions/hash-functions sidebar_position: 50 sidebar_label: Hash --- diff --git a/docs/en/sql-reference/functions/in-functions.md b/docs/en/sql-reference/functions/in-functions.md index ffddf2c9009..67b6a13991e 100644 --- a/docs/en/sql-reference/functions/in-functions.md +++ b/docs/en/sql-reference/functions/in-functions.md @@ -1,4 +1,5 @@ --- +slug: /en/sql-reference/functions/in-functions sidebar_position: 60 sidebar_label: IN Operator --- diff --git a/docs/en/sql-reference/functions/index.md b/docs/en/sql-reference/functions/index.md index 2dadf91e069..840bcd583e4 100644 --- a/docs/en/sql-reference/functions/index.md +++ b/docs/en/sql-reference/functions/index.md @@ -1,4 +1,5 @@ --- +slug: /en/sql-reference/functions/ sidebar_position: 32 sidebar_label: Functions --- diff --git a/docs/en/sql-reference/functions/introspection.md b/docs/en/sql-reference/functions/introspection.md index b885b50ce22..2fac1a1d693 100644 --- a/docs/en/sql-reference/functions/introspection.md +++ b/docs/en/sql-reference/functions/introspection.md @@ -1,4 +1,5 @@ --- +slug: /en/sql-reference/functions/introspection sidebar_position: 65 sidebar_label: Introspection --- diff --git a/docs/en/sql-reference/functions/ip-address-functions.md b/docs/en/sql-reference/functions/ip-address-functions.md index 9b34a4db440..532bd9e47cf 100644 --- a/docs/en/sql-reference/functions/ip-address-functions.md +++ b/docs/en/sql-reference/functions/ip-address-functions.md @@ -1,4 +1,5 @@ --- +slug: /en/sql-reference/functions/ip-address-functions sidebar_position: 55 sidebar_label: IP Addresses --- diff --git a/docs/en/sql-reference/functions/json-functions.md b/docs/en/sql-reference/functions/json-functions.md index b71926f7b56..71483896189 100644 --- a/docs/en/sql-reference/functions/json-functions.md +++ b/docs/en/sql-reference/functions/json-functions.md @@ -1,4 +1,5 @@ --- +slug: /en/sql-reference/functions/json-functions sidebar_position: 56 sidebar_label: JSON --- diff --git a/docs/en/sql-reference/functions/logical-functions.md b/docs/en/sql-reference/functions/logical-functions.md index 0dd0c8af146..137753d12c9 100644 --- a/docs/en/sql-reference/functions/logical-functions.md +++ b/docs/en/sql-reference/functions/logical-functions.md @@ -1,4 +1,5 @@ --- +slug: /en/sql-reference/functions/logical-functions sidebar_position: 37 sidebar_label: Logical --- diff --git a/docs/en/sql-reference/functions/machine-learning-functions.md b/docs/en/sql-reference/functions/machine-learning-functions.md index 92765a65849..98408ef459c 100644 --- a/docs/en/sql-reference/functions/machine-learning-functions.md +++ b/docs/en/sql-reference/functions/machine-learning-functions.md @@ -1,4 +1,5 @@ --- +slug: /en/sql-reference/functions/machine-learning-functions sidebar_position: 64 sidebar_label: Machine Learning --- diff --git a/docs/en/sql-reference/functions/math-functions.md b/docs/en/sql-reference/functions/math-functions.md index 8ea2935ed5d..430762a1885 100644 --- a/docs/en/sql-reference/functions/math-functions.md +++ b/docs/en/sql-reference/functions/math-functions.md @@ -1,4 +1,5 @@ --- +slug: /en/sql-reference/functions/math-functions sidebar_position: 44 sidebar_label: Mathematical --- diff --git a/docs/en/sql-reference/functions/nlp-functions.md b/docs/en/sql-reference/functions/nlp-functions.md index 6d1e894a456..073d79519ac 100644 --- a/docs/en/sql-reference/functions/nlp-functions.md +++ b/docs/en/sql-reference/functions/nlp-functions.md @@ -1,4 +1,5 @@ --- +slug: /en/sql-reference/functions/nlp-functions sidebar_position: 67 sidebar_label: NLP --- diff --git a/docs/en/sql-reference/functions/other-functions.md b/docs/en/sql-reference/functions/other-functions.md index 6597cd2d6bf..d86eb6b45ae 100644 --- a/docs/en/sql-reference/functions/other-functions.md +++ b/docs/en/sql-reference/functions/other-functions.md @@ -1,4 +1,5 @@ --- +slug: /en/sql-reference/functions/other-functions sidebar_position: 67 sidebar_label: Other --- diff --git a/docs/en/sql-reference/functions/random-functions.md b/docs/en/sql-reference/functions/random-functions.md index 452be8a17b2..d77cc55e5eb 100644 --- a/docs/en/sql-reference/functions/random-functions.md +++ b/docs/en/sql-reference/functions/random-functions.md @@ -1,4 +1,5 @@ --- +slug: /en/sql-reference/functions/random-functions sidebar_position: 51 sidebar_label: Pseudo-Random Numbers --- diff --git a/docs/en/sql-reference/functions/rounding-functions.md b/docs/en/sql-reference/functions/rounding-functions.md index 37f1c35aa7e..4f90171119a 100644 --- a/docs/en/sql-reference/functions/rounding-functions.md +++ b/docs/en/sql-reference/functions/rounding-functions.md @@ -1,4 +1,5 @@ --- +slug: /en/sql-reference/functions/rounding-functions sidebar_position: 45 sidebar_label: Rounding --- diff --git a/docs/en/sql-reference/functions/splitting-merging-functions.md b/docs/en/sql-reference/functions/splitting-merging-functions.md index 3c6db8cc19e..70a1f10083b 100644 --- a/docs/en/sql-reference/functions/splitting-merging-functions.md +++ b/docs/en/sql-reference/functions/splitting-merging-functions.md @@ -1,4 +1,5 @@ --- +slug: /en/sql-reference/functions/splitting-merging-functions sidebar_position: 47 sidebar_label: Splitting and Merging Strings and Arrays --- diff --git a/docs/en/sql-reference/functions/string-functions.md b/docs/en/sql-reference/functions/string-functions.md index 55ab11763d5..6ce654496e4 100644 --- a/docs/en/sql-reference/functions/string-functions.md +++ b/docs/en/sql-reference/functions/string-functions.md @@ -1,4 +1,5 @@ --- +slug: /en/sql-reference/functions/string-functions sidebar_position: 40 sidebar_label: Strings --- diff --git a/docs/en/sql-reference/functions/string-replace-functions.md b/docs/en/sql-reference/functions/string-replace-functions.md index 294a7b49db5..adf2a07b732 100644 --- a/docs/en/sql-reference/functions/string-replace-functions.md +++ b/docs/en/sql-reference/functions/string-replace-functions.md @@ -1,4 +1,5 @@ --- +slug: /en/sql-reference/functions/string-replace-functions sidebar_position: 42 sidebar_label: For Replacing in Strings --- diff --git a/docs/en/sql-reference/functions/string-search-functions.md b/docs/en/sql-reference/functions/string-search-functions.md index 86408d8fc93..048d0864863 100644 --- a/docs/en/sql-reference/functions/string-search-functions.md +++ b/docs/en/sql-reference/functions/string-search-functions.md @@ -1,4 +1,5 @@ --- +slug: /en/sql-reference/functions/string-search-functions sidebar_position: 41 sidebar_label: For Searching in Strings --- diff --git a/docs/en/sql-reference/functions/time-window-functions.md b/docs/en/sql-reference/functions/time-window-functions.md index eea785e783e..f03a206da07 100644 --- a/docs/en/sql-reference/functions/time-window-functions.md +++ b/docs/en/sql-reference/functions/time-window-functions.md @@ -1,4 +1,5 @@ --- +slug: /en/sql-reference/functions/time-window-functions sidebar_position: 68 sidebar_label: Time Window --- diff --git a/docs/en/sql-reference/functions/tuple-functions.md b/docs/en/sql-reference/functions/tuple-functions.md index d3aac635841..006c0455dc0 100644 --- a/docs/en/sql-reference/functions/tuple-functions.md +++ b/docs/en/sql-reference/functions/tuple-functions.md @@ -1,4 +1,5 @@ --- +slug: /en/sql-reference/functions/tuple-functions sidebar_position: 66 sidebar_label: Tuples --- diff --git a/docs/en/sql-reference/functions/tuple-map-functions.md b/docs/en/sql-reference/functions/tuple-map-functions.md index b6b7a057894..48e492caaff 100644 --- a/docs/en/sql-reference/functions/tuple-map-functions.md +++ b/docs/en/sql-reference/functions/tuple-map-functions.md @@ -1,4 +1,5 @@ --- +slug: /en/sql-reference/functions/tuple-map-functions sidebar_position: 46 sidebar_label: Working with maps --- diff --git a/docs/en/sql-reference/functions/type-conversion-functions.md b/docs/en/sql-reference/functions/type-conversion-functions.md index ecdf34bf7ee..d82728b9721 100644 --- a/docs/en/sql-reference/functions/type-conversion-functions.md +++ b/docs/en/sql-reference/functions/type-conversion-functions.md @@ -1,4 +1,5 @@ --- +slug: /en/sql-reference/functions/type-conversion-functions sidebar_position: 38 sidebar_label: Type Conversion --- diff --git a/docs/en/sql-reference/functions/url-functions.md b/docs/en/sql-reference/functions/url-functions.md index a46dda8269c..b03ca88fc61 100644 --- a/docs/en/sql-reference/functions/url-functions.md +++ b/docs/en/sql-reference/functions/url-functions.md @@ -1,4 +1,5 @@ --- +slug: /en/sql-reference/functions/url-functions sidebar_position: 54 sidebar_label: URLs --- diff --git a/docs/en/sql-reference/functions/uuid-functions.md b/docs/en/sql-reference/functions/uuid-functions.md index 78a5ffa36a1..b8f222c2e4e 100644 --- a/docs/en/sql-reference/functions/uuid-functions.md +++ b/docs/en/sql-reference/functions/uuid-functions.md @@ -1,4 +1,5 @@ --- +slug: /en/sql-reference/functions/uuid-functions sidebar_position: 53 sidebar_label: UUID --- diff --git a/docs/en/sql-reference/functions/ym-dict-functions.md b/docs/en/sql-reference/functions/ym-dict-functions.md index 06f278c6abc..04df3db571e 100644 --- a/docs/en/sql-reference/functions/ym-dict-functions.md +++ b/docs/en/sql-reference/functions/ym-dict-functions.md @@ -1,4 +1,5 @@ --- +slug: /en/sql-reference/functions/ym-dict-functions sidebar_position: 59 sidebar_label: Embedded Dictionaries --- diff --git a/docs/en/sql-reference/operators/exists.md b/docs/en/sql-reference/operators/exists.md index 2e9f6f58df5..4bc29389c9c 100644 --- a/docs/en/sql-reference/operators/exists.md +++ b/docs/en/sql-reference/operators/exists.md @@ -1,3 +1,6 @@ +--- +slug: /en/sql-reference/operators/exists +--- # EXISTS The `EXISTS` operator checks how many records are in the result of a subquery. If it is empty, then the operator returns `0`. Otherwise, it returns `1`. diff --git a/docs/en/sql-reference/operators/in.md b/docs/en/sql-reference/operators/in.md index 709570eac2d..58119cfc4f5 100644 --- a/docs/en/sql-reference/operators/in.md +++ b/docs/en/sql-reference/operators/in.md @@ -1,3 +1,6 @@ +--- +slug: /en/sql-reference/operators/in +--- # IN Operators The `IN`, `NOT IN`, `GLOBAL IN`, and `GLOBAL NOT IN` operators are covered separately, since their functionality is quite rich. diff --git a/docs/en/sql-reference/operators/index.md b/docs/en/sql-reference/operators/index.md index 17b8f014366..0fe7ebbf4b6 100644 --- a/docs/en/sql-reference/operators/index.md +++ b/docs/en/sql-reference/operators/index.md @@ -1,4 +1,5 @@ --- +slug: /en/sql-reference/operators/ sidebar_position: 38 sidebar_label: Operators --- diff --git a/docs/en/sql-reference/statements/alter/column.md b/docs/en/sql-reference/statements/alter/column.md index 9387b442944..210d4898c67 100644 --- a/docs/en/sql-reference/statements/alter/column.md +++ b/docs/en/sql-reference/statements/alter/column.md @@ -1,4 +1,5 @@ --- +slug: /en/sql-reference/statements/alter/column sidebar_position: 37 sidebar_label: COLUMN --- diff --git a/docs/en/sql-reference/statements/alter/comment.md b/docs/en/sql-reference/statements/alter/comment.md index a6ec72221ff..f8742765619 100644 --- a/docs/en/sql-reference/statements/alter/comment.md +++ b/docs/en/sql-reference/statements/alter/comment.md @@ -1,4 +1,5 @@ --- +slug: /en/sql-reference/statements/alter/comment sidebar_position: 51 sidebar_label: COMMENT --- diff --git a/docs/en/sql-reference/statements/alter/constraint.md b/docs/en/sql-reference/statements/alter/constraint.md index 9f89a010a44..15bd27e1a95 100644 --- a/docs/en/sql-reference/statements/alter/constraint.md +++ b/docs/en/sql-reference/statements/alter/constraint.md @@ -1,4 +1,5 @@ --- +slug: /en/sql-reference/statements/alter/constraint sidebar_position: 43 sidebar_label: CONSTRAINT --- diff --git a/docs/en/sql-reference/statements/alter/delete.md b/docs/en/sql-reference/statements/alter/delete.md index 88ecf26961c..809715b5423 100644 --- a/docs/en/sql-reference/statements/alter/delete.md +++ b/docs/en/sql-reference/statements/alter/delete.md @@ -1,4 +1,5 @@ --- +slug: /en/sql-reference/statements/alter/delete sidebar_position: 39 sidebar_label: DELETE --- diff --git a/docs/en/sql-reference/statements/alter/index.md b/docs/en/sql-reference/statements/alter/index.md index e18e9e21a31..eeee5e03c8b 100644 --- a/docs/en/sql-reference/statements/alter/index.md +++ b/docs/en/sql-reference/statements/alter/index.md @@ -1,4 +1,5 @@ --- +slug: /en/sql-reference/statements/alter/ sidebar_position: 35 sidebar_label: ALTER --- diff --git a/docs/en/sql-reference/statements/alter/index/index.md b/docs/en/sql-reference/statements/alter/index/index.md index c9e7de22076..03d4bd47e71 100644 --- a/docs/en/sql-reference/statements/alter/index/index.md +++ b/docs/en/sql-reference/statements/alter/index/index.md @@ -1,4 +1,5 @@ --- +slug: /en/sql-reference/statements/alter/index toc_hidden_folder: true sidebar_position: 42 sidebar_label: INDEX diff --git a/docs/en/sql-reference/statements/alter/order-by.md b/docs/en/sql-reference/statements/alter/order-by.md index 1ffb6a3bbb3..8b51a868ace 100644 --- a/docs/en/sql-reference/statements/alter/order-by.md +++ b/docs/en/sql-reference/statements/alter/order-by.md @@ -1,4 +1,5 @@ --- +slug: /en/sql-reference/statements/alter/order-by sidebar_position: 41 sidebar_label: ORDER BY --- diff --git a/docs/en/sql-reference/statements/alter/partition.md b/docs/en/sql-reference/statements/alter/partition.md index b7787fbef92..ec5285eaaad 100644 --- a/docs/en/sql-reference/statements/alter/partition.md +++ b/docs/en/sql-reference/statements/alter/partition.md @@ -1,4 +1,5 @@ --- +slug: /en/sql-reference/statements/alter/partition sidebar_position: 38 sidebar_label: PARTITION --- diff --git a/docs/en/sql-reference/statements/alter/projection.md b/docs/en/sql-reference/statements/alter/projection.md index 72a4b792fa4..7aacdb628e1 100644 --- a/docs/en/sql-reference/statements/alter/projection.md +++ b/docs/en/sql-reference/statements/alter/projection.md @@ -1,4 +1,5 @@ --- +slug: /en/sql-reference/statements/alter/projection sidebar_position: 49 sidebar_label: PROJECTION --- diff --git a/docs/en/sql-reference/statements/alter/quota.md b/docs/en/sql-reference/statements/alter/quota.md index c5f1bac0666..37e866a0b3e 100644 --- a/docs/en/sql-reference/statements/alter/quota.md +++ b/docs/en/sql-reference/statements/alter/quota.md @@ -1,4 +1,5 @@ --- +slug: /en/sql-reference/statements/alter/quota sidebar_position: 46 sidebar_label: QUOTA --- diff --git a/docs/en/sql-reference/statements/alter/role.md b/docs/en/sql-reference/statements/alter/role.md index 62a80ccaf50..2bee9fd0dc6 100644 --- a/docs/en/sql-reference/statements/alter/role.md +++ b/docs/en/sql-reference/statements/alter/role.md @@ -1,4 +1,5 @@ --- +slug: /en/sql-reference/statements/alter/role sidebar_position: 46 sidebar_label: ROLE --- diff --git a/docs/en/sql-reference/statements/alter/row-policy.md b/docs/en/sql-reference/statements/alter/row-policy.md index 0851c5b052d..ec0390e766e 100644 --- a/docs/en/sql-reference/statements/alter/row-policy.md +++ b/docs/en/sql-reference/statements/alter/row-policy.md @@ -1,4 +1,5 @@ --- +slug: /en/sql-reference/statements/alter/row-policy sidebar_position: 47 sidebar_label: ROW POLICY --- diff --git a/docs/en/sql-reference/statements/alter/sample-by.md b/docs/en/sql-reference/statements/alter/sample-by.md index d3490916b26..d824bdf7644 100644 --- a/docs/en/sql-reference/statements/alter/sample-by.md +++ b/docs/en/sql-reference/statements/alter/sample-by.md @@ -1,4 +1,5 @@ --- +slug: /en/sql-reference/statements/alter/sample-by sidebar_position: 41 sidebar_label: SAMPLE BY --- diff --git a/docs/en/sql-reference/statements/alter/setting.md b/docs/en/sql-reference/statements/alter/setting.md index da31da0cf53..f68f035146a 100644 --- a/docs/en/sql-reference/statements/alter/setting.md +++ b/docs/en/sql-reference/statements/alter/setting.md @@ -1,4 +1,5 @@ --- +slug: /en/sql-reference/statements/alter/setting sidebar_position: 38 sidebar_label: SETTING --- diff --git a/docs/en/sql-reference/statements/alter/settings-profile.md b/docs/en/sql-reference/statements/alter/settings-profile.md index 902f3854a12..234bb22ae14 100644 --- a/docs/en/sql-reference/statements/alter/settings-profile.md +++ b/docs/en/sql-reference/statements/alter/settings-profile.md @@ -1,4 +1,5 @@ --- +slug: /en/sql-reference/statements/alter/settings-profile sidebar_position: 48 sidebar_label: SETTINGS PROFILE --- diff --git a/docs/en/sql-reference/statements/alter/ttl.md b/docs/en/sql-reference/statements/alter/ttl.md index 2682279d1f7..3e9846ba1ab 100644 --- a/docs/en/sql-reference/statements/alter/ttl.md +++ b/docs/en/sql-reference/statements/alter/ttl.md @@ -1,4 +1,5 @@ --- +slug: /en/sql-reference/statements/alter/ttl sidebar_position: 44 sidebar_label: TTL --- diff --git a/docs/en/sql-reference/statements/alter/update.md b/docs/en/sql-reference/statements/alter/update.md index 26a2d57f050..e4fb872ae24 100644 --- a/docs/en/sql-reference/statements/alter/update.md +++ b/docs/en/sql-reference/statements/alter/update.md @@ -1,4 +1,5 @@ --- +slug: /en/sql-reference/statements/alter/update sidebar_position: 40 sidebar_label: UPDATE --- diff --git a/docs/en/sql-reference/statements/alter/user.md b/docs/en/sql-reference/statements/alter/user.md index 30b024227be..3a98955e439 100644 --- a/docs/en/sql-reference/statements/alter/user.md +++ b/docs/en/sql-reference/statements/alter/user.md @@ -1,4 +1,5 @@ --- +slug: /en/sql-reference/statements/alter/user sidebar_position: 45 sidebar_label: USER --- diff --git a/docs/en/sql-reference/statements/alter/view.md b/docs/en/sql-reference/statements/alter/view.md index 45bc2ab155a..e382cdace30 100644 --- a/docs/en/sql-reference/statements/alter/view.md +++ b/docs/en/sql-reference/statements/alter/view.md @@ -1,4 +1,5 @@ --- +slug: /en/sql-reference/statements/alter/view sidebar_position: 50 sidebar_label: VIEW --- diff --git a/docs/en/sql-reference/statements/attach.md b/docs/en/sql-reference/statements/attach.md index e298df52409..36b62ba3109 100644 --- a/docs/en/sql-reference/statements/attach.md +++ b/docs/en/sql-reference/statements/attach.md @@ -1,4 +1,5 @@ --- +slug: /en/sql-reference/statements/attach sidebar_position: 40 sidebar_label: ATTACH --- diff --git a/docs/en/sql-reference/statements/check-table.md b/docs/en/sql-reference/statements/check-table.md index 2e1a6c5b366..e2547d88b67 100644 --- a/docs/en/sql-reference/statements/check-table.md +++ b/docs/en/sql-reference/statements/check-table.md @@ -1,4 +1,5 @@ --- +slug: /en/sql-reference/statements/check-table sidebar_position: 41 sidebar_label: CHECK --- diff --git a/docs/en/sql-reference/statements/create/database.md b/docs/en/sql-reference/statements/create/database.md index 5ce9d55a6b1..432f5975cc8 100644 --- a/docs/en/sql-reference/statements/create/database.md +++ b/docs/en/sql-reference/statements/create/database.md @@ -1,4 +1,5 @@ --- +slug: /en/sql-reference/statements/create/database sidebar_position: 35 sidebar_label: DATABASE --- diff --git a/docs/en/sql-reference/statements/create/dictionary.md b/docs/en/sql-reference/statements/create/dictionary.md index 442d7bd8afd..7bf32b265f3 100644 --- a/docs/en/sql-reference/statements/create/dictionary.md +++ b/docs/en/sql-reference/statements/create/dictionary.md @@ -1,4 +1,5 @@ --- +slug: /en/sql-reference/statements/create/dictionary sidebar_position: 38 sidebar_label: DICTIONARY --- diff --git a/docs/en/sql-reference/statements/create/function.md b/docs/en/sql-reference/statements/create/function.md index 0a452b6c4d2..63c006b1e3e 100644 --- a/docs/en/sql-reference/statements/create/function.md +++ b/docs/en/sql-reference/statements/create/function.md @@ -1,4 +1,5 @@ --- +slug: /en/sql-reference/statements/create/function sidebar_position: 38 sidebar_label: FUNCTION --- diff --git a/docs/en/sql-reference/statements/create/index.md b/docs/en/sql-reference/statements/create/index.md index 666a2c66d2f..22d97545aa6 100644 --- a/docs/en/sql-reference/statements/create/index.md +++ b/docs/en/sql-reference/statements/create/index.md @@ -1,4 +1,5 @@ --- +slug: /en/sql-reference/statements/create/ sidebar_position: 34 sidebar_label: CREATE --- diff --git a/docs/en/sql-reference/statements/create/quota.md b/docs/en/sql-reference/statements/create/quota.md index da6ce01aafd..04d6cdda6bf 100644 --- a/docs/en/sql-reference/statements/create/quota.md +++ b/docs/en/sql-reference/statements/create/quota.md @@ -1,4 +1,5 @@ --- +slug: /en/sql-reference/statements/create/quota sidebar_position: 42 sidebar_label: QUOTA --- diff --git a/docs/en/sql-reference/statements/create/role.md b/docs/en/sql-reference/statements/create/role.md index d69aeb0976c..aba455030c2 100644 --- a/docs/en/sql-reference/statements/create/role.md +++ b/docs/en/sql-reference/statements/create/role.md @@ -1,4 +1,5 @@ --- +slug: /en/sql-reference/statements/create/role sidebar_position: 40 sidebar_label: ROLE --- diff --git a/docs/en/sql-reference/statements/create/row-policy.md b/docs/en/sql-reference/statements/create/row-policy.md index c84c67f97f6..32f75c5a594 100644 --- a/docs/en/sql-reference/statements/create/row-policy.md +++ b/docs/en/sql-reference/statements/create/row-policy.md @@ -1,4 +1,5 @@ --- +slug: /en/sql-reference/statements/create/row-policy sidebar_position: 41 sidebar_label: ROW POLICY --- diff --git a/docs/en/sql-reference/statements/create/settings-profile.md b/docs/en/sql-reference/statements/create/settings-profile.md index 0a3e1c0daf1..3086e076fba 100644 --- a/docs/en/sql-reference/statements/create/settings-profile.md +++ b/docs/en/sql-reference/statements/create/settings-profile.md @@ -1,4 +1,5 @@ --- +slug: /en/sql-reference/statements/create/settings-profile sidebar_position: 43 sidebar_label: SETTINGS PROFILE --- diff --git a/docs/en/sql-reference/statements/create/table.md b/docs/en/sql-reference/statements/create/table.md index 0e033456998..6bc2aa66080 100644 --- a/docs/en/sql-reference/statements/create/table.md +++ b/docs/en/sql-reference/statements/create/table.md @@ -1,4 +1,5 @@ --- +slug: /en/sql-reference/statements/create/table sidebar_position: 36 sidebar_label: TABLE --- diff --git a/docs/en/sql-reference/statements/create/user.md b/docs/en/sql-reference/statements/create/user.md index 3837c60deb1..143099bbd4a 100644 --- a/docs/en/sql-reference/statements/create/user.md +++ b/docs/en/sql-reference/statements/create/user.md @@ -1,4 +1,5 @@ --- +slug: /en/sql-reference/statements/create/user sidebar_position: 39 sidebar_label: USER tags: diff --git a/docs/en/sql-reference/statements/create/view.md b/docs/en/sql-reference/statements/create/view.md index b29b5be9b29..da68ca05bbb 100644 --- a/docs/en/sql-reference/statements/create/view.md +++ b/docs/en/sql-reference/statements/create/view.md @@ -1,4 +1,5 @@ --- +slug: /en/sql-reference/statements/create/view sidebar_position: 37 sidebar_label: VIEW --- diff --git a/docs/en/sql-reference/statements/describe-table.md b/docs/en/sql-reference/statements/describe-table.md index bc15e0e3062..344335f4907 100644 --- a/docs/en/sql-reference/statements/describe-table.md +++ b/docs/en/sql-reference/statements/describe-table.md @@ -1,4 +1,5 @@ --- +slug: /en/sql-reference/statements/describe-table sidebar_position: 42 sidebar_label: DESCRIBE --- diff --git a/docs/en/sql-reference/statements/detach.md b/docs/en/sql-reference/statements/detach.md index 0265cb49f7e..1278c230794 100644 --- a/docs/en/sql-reference/statements/detach.md +++ b/docs/en/sql-reference/statements/detach.md @@ -1,4 +1,5 @@ --- +slug: /en/sql-reference/statements/detach sidebar_position: 43 sidebar_label: DETACH --- diff --git a/docs/en/sql-reference/statements/drop.md b/docs/en/sql-reference/statements/drop.md index 9621cd4944f..28d379421f1 100644 --- a/docs/en/sql-reference/statements/drop.md +++ b/docs/en/sql-reference/statements/drop.md @@ -1,4 +1,5 @@ --- +slug: /en/sql-reference/statements/drop sidebar_position: 44 sidebar_label: DROP --- diff --git a/docs/en/sql-reference/statements/exchange.md b/docs/en/sql-reference/statements/exchange.md index 8bfb142e8e3..33f3e08d547 100644 --- a/docs/en/sql-reference/statements/exchange.md +++ b/docs/en/sql-reference/statements/exchange.md @@ -1,4 +1,5 @@ --- +slug: /en/sql-reference/statements/exchange sidebar_position: 49 sidebar_label: EXCHANGE --- diff --git a/docs/en/sql-reference/statements/exists.md b/docs/en/sql-reference/statements/exists.md index 044bfb9a4b3..8195b34d71f 100644 --- a/docs/en/sql-reference/statements/exists.md +++ b/docs/en/sql-reference/statements/exists.md @@ -1,4 +1,5 @@ --- +slug: /en/sql-reference/statements/exists sidebar_position: 45 sidebar_label: EXISTS --- diff --git a/docs/en/sql-reference/statements/explain.md b/docs/en/sql-reference/statements/explain.md index bc6d967e71a..e89c811ce30 100644 --- a/docs/en/sql-reference/statements/explain.md +++ b/docs/en/sql-reference/statements/explain.md @@ -1,4 +1,5 @@ --- +slug: /en/sql-reference/statements/explain sidebar_position: 39 sidebar_label: EXPLAIN --- diff --git a/docs/en/sql-reference/statements/grant.md b/docs/en/sql-reference/statements/grant.md index c2395e83b7f..56bb4cd4b65 100644 --- a/docs/en/sql-reference/statements/grant.md +++ b/docs/en/sql-reference/statements/grant.md @@ -1,4 +1,5 @@ --- +slug: /en/sql-reference/statements/grant sidebar_position: 38 sidebar_label: GRANT --- diff --git a/docs/en/sql-reference/statements/index.md b/docs/en/sql-reference/statements/index.md index e0c080c13fd..bfb90f4a89f 100644 --- a/docs/en/sql-reference/statements/index.md +++ b/docs/en/sql-reference/statements/index.md @@ -1,4 +1,5 @@ --- +slug: /en/sql-reference/statements/ sidebar_position: 1 sidebar_label: Statements --- diff --git a/docs/en/sql-reference/statements/insert-into.md b/docs/en/sql-reference/statements/insert-into.md index 194e5f1ea91..764ea9b0292 100644 --- a/docs/en/sql-reference/statements/insert-into.md +++ b/docs/en/sql-reference/statements/insert-into.md @@ -1,4 +1,5 @@ --- +slug: /en/sql-reference/statements/insert-into sidebar_position: 33 sidebar_label: INSERT INTO --- diff --git a/docs/en/sql-reference/statements/kill.md b/docs/en/sql-reference/statements/kill.md index d7b32680abf..37fa821e369 100644 --- a/docs/en/sql-reference/statements/kill.md +++ b/docs/en/sql-reference/statements/kill.md @@ -1,4 +1,5 @@ --- +slug: /en/sql-reference/statements/kill sidebar_position: 46 sidebar_label: KILL --- diff --git a/docs/en/sql-reference/statements/misc.md b/docs/en/sql-reference/statements/misc.md index 6b239fff75f..d812dd2008a 100644 --- a/docs/en/sql-reference/statements/misc.md +++ b/docs/en/sql-reference/statements/misc.md @@ -1,4 +1,5 @@ --- +slug: /en/sql-reference/statements/misc toc_hidden: true sidebar_position: 70 --- diff --git a/docs/en/sql-reference/statements/optimize.md b/docs/en/sql-reference/statements/optimize.md index 969289b8070..f40fe2cfd0a 100644 --- a/docs/en/sql-reference/statements/optimize.md +++ b/docs/en/sql-reference/statements/optimize.md @@ -1,4 +1,5 @@ --- +slug: /en/sql-reference/statements/optimize sidebar_position: 47 sidebar_label: OPTIMIZE --- diff --git a/docs/en/sql-reference/statements/rename.md b/docs/en/sql-reference/statements/rename.md index a7f766efb08..cc33a7c41d4 100644 --- a/docs/en/sql-reference/statements/rename.md +++ b/docs/en/sql-reference/statements/rename.md @@ -1,4 +1,5 @@ --- +slug: /en/sql-reference/statements/rename sidebar_position: 48 sidebar_label: RENAME --- diff --git a/docs/en/sql-reference/statements/revoke.md b/docs/en/sql-reference/statements/revoke.md index f3b13c2664a..7d63d1df445 100644 --- a/docs/en/sql-reference/statements/revoke.md +++ b/docs/en/sql-reference/statements/revoke.md @@ -1,4 +1,5 @@ --- +slug: /en/sql-reference/statements/revoke sidebar_position: 39 sidebar_label: REVOKE --- diff --git a/docs/en/sql-reference/statements/select/all.md b/docs/en/sql-reference/statements/select/all.md index 06a7bbff16a..bb6c77a25c5 100644 --- a/docs/en/sql-reference/statements/select/all.md +++ b/docs/en/sql-reference/statements/select/all.md @@ -1,4 +1,5 @@ --- +slug: /en/sql-reference/statements/select/all sidebar_label: ALL --- diff --git a/docs/en/sql-reference/statements/select/array-join.md b/docs/en/sql-reference/statements/select/array-join.md index d168f421609..4bed43a3301 100644 --- a/docs/en/sql-reference/statements/select/array-join.md +++ b/docs/en/sql-reference/statements/select/array-join.md @@ -1,4 +1,5 @@ --- +slug: /en/sql-reference/statements/select/array-join sidebar_label: ARRAY JOIN --- diff --git a/docs/en/sql-reference/statements/select/distinct.md b/docs/en/sql-reference/statements/select/distinct.md index bb429dfbf4b..b2d940af3bb 100644 --- a/docs/en/sql-reference/statements/select/distinct.md +++ b/docs/en/sql-reference/statements/select/distinct.md @@ -1,4 +1,5 @@ --- +slug: /en/sql-reference/statements/select/distinct sidebar_label: DISTINCT --- diff --git a/docs/en/sql-reference/statements/select/except.md b/docs/en/sql-reference/statements/select/except.md index e8cf4283b47..83bf0879213 100644 --- a/docs/en/sql-reference/statements/select/except.md +++ b/docs/en/sql-reference/statements/select/except.md @@ -1,4 +1,5 @@ --- +slug: /en/sql-reference/statements/select/except sidebar_label: EXCEPT --- diff --git a/docs/en/sql-reference/statements/select/format.md b/docs/en/sql-reference/statements/select/format.md index d32770f04ce..29ff731af14 100644 --- a/docs/en/sql-reference/statements/select/format.md +++ b/docs/en/sql-reference/statements/select/format.md @@ -1,4 +1,5 @@ --- +slug: /en/sql-reference/statements/select/format sidebar_label: FORMAT --- diff --git a/docs/en/sql-reference/statements/select/from.md b/docs/en/sql-reference/statements/select/from.md index f1fb04f6818..3013a173c16 100644 --- a/docs/en/sql-reference/statements/select/from.md +++ b/docs/en/sql-reference/statements/select/from.md @@ -1,4 +1,5 @@ --- +slug: /en/sql-reference/statements/select/from sidebar_label: FROM --- diff --git a/docs/en/sql-reference/statements/select/group-by.md b/docs/en/sql-reference/statements/select/group-by.md index 1d6edc5fa3d..b5e194343ca 100644 --- a/docs/en/sql-reference/statements/select/group-by.md +++ b/docs/en/sql-reference/statements/select/group-by.md @@ -1,4 +1,5 @@ --- +slug: /en/sql-reference/statements/select/group-by sidebar_label: GROUP BY --- diff --git a/docs/en/sql-reference/statements/select/having.md b/docs/en/sql-reference/statements/select/having.md index a5226d6ccab..85033096a5c 100644 --- a/docs/en/sql-reference/statements/select/having.md +++ b/docs/en/sql-reference/statements/select/having.md @@ -1,4 +1,5 @@ --- +slug: /en/sql-reference/statements/select/having sidebar_label: HAVING --- diff --git a/docs/en/sql-reference/statements/select/index.md b/docs/en/sql-reference/statements/select/index.md index e039548e50a..c1692bd9b29 100644 --- a/docs/en/sql-reference/statements/select/index.md +++ b/docs/en/sql-reference/statements/select/index.md @@ -1,4 +1,5 @@ --- +slug: /en/sql-reference/statements/select/ sidebar_position: 32 sidebar_label: SELECT --- diff --git a/docs/en/sql-reference/statements/select/intersect.md b/docs/en/sql-reference/statements/select/intersect.md index 55204b7b0d8..d3b2b51b6be 100644 --- a/docs/en/sql-reference/statements/select/intersect.md +++ b/docs/en/sql-reference/statements/select/intersect.md @@ -1,4 +1,5 @@ --- +slug: /en/sql-reference/statements/select/intersect sidebar_label: INTERSECT --- diff --git a/docs/en/sql-reference/statements/select/into-outfile.md b/docs/en/sql-reference/statements/select/into-outfile.md index 6e33673a3c0..a14b23f6689 100644 --- a/docs/en/sql-reference/statements/select/into-outfile.md +++ b/docs/en/sql-reference/statements/select/into-outfile.md @@ -1,4 +1,5 @@ --- +slug: /en/sql-reference/statements/select/into-outfile sidebar_label: INTO OUTFILE --- diff --git a/docs/en/sql-reference/statements/select/join.md b/docs/en/sql-reference/statements/select/join.md index a36004566a5..1890ff081d8 100644 --- a/docs/en/sql-reference/statements/select/join.md +++ b/docs/en/sql-reference/statements/select/join.md @@ -1,4 +1,5 @@ --- +slug: /en/sql-reference/statements/select/join sidebar_label: JOIN --- diff --git a/docs/en/sql-reference/statements/select/limit-by.md b/docs/en/sql-reference/statements/select/limit-by.md index 0433ea946cc..28f3d7e86d7 100644 --- a/docs/en/sql-reference/statements/select/limit-by.md +++ b/docs/en/sql-reference/statements/select/limit-by.md @@ -1,4 +1,5 @@ --- +slug: /en/sql-reference/statements/select/limit-by sidebar_label: LIMIT BY --- diff --git a/docs/en/sql-reference/statements/select/limit.md b/docs/en/sql-reference/statements/select/limit.md index d7eb97c23f2..d61a5a44b58 100644 --- a/docs/en/sql-reference/statements/select/limit.md +++ b/docs/en/sql-reference/statements/select/limit.md @@ -1,4 +1,5 @@ --- +slug: /en/sql-reference/statements/select/limit sidebar_label: LIMIT --- diff --git a/docs/en/sql-reference/statements/select/offset.md b/docs/en/sql-reference/statements/select/offset.md index ca9a438ec1f..109b29344f2 100644 --- a/docs/en/sql-reference/statements/select/offset.md +++ b/docs/en/sql-reference/statements/select/offset.md @@ -1,4 +1,5 @@ --- +slug: /en/sql-reference/statements/select/offset sidebar_label: OFFSET --- diff --git a/docs/en/sql-reference/statements/select/order-by.md b/docs/en/sql-reference/statements/select/order-by.md index 0411147f18c..f69612f17fb 100644 --- a/docs/en/sql-reference/statements/select/order-by.md +++ b/docs/en/sql-reference/statements/select/order-by.md @@ -1,4 +1,5 @@ --- +slug: /en/sql-reference/statements/select/order-by sidebar_label: ORDER BY --- diff --git a/docs/en/sql-reference/statements/select/prewhere.md b/docs/en/sql-reference/statements/select/prewhere.md index 49aa6ea894e..d0248790bfd 100644 --- a/docs/en/sql-reference/statements/select/prewhere.md +++ b/docs/en/sql-reference/statements/select/prewhere.md @@ -1,4 +1,5 @@ --- +slug: /en/sql-reference/statements/select/prewhere sidebar_label: PREWHERE --- diff --git a/docs/en/sql-reference/statements/select/sample.md b/docs/en/sql-reference/statements/select/sample.md index 85c21f5e271..fb44d7c5a44 100644 --- a/docs/en/sql-reference/statements/select/sample.md +++ b/docs/en/sql-reference/statements/select/sample.md @@ -1,4 +1,5 @@ --- +slug: /en/sql-reference/statements/select/sample sidebar_label: SAMPLE --- diff --git a/docs/en/sql-reference/statements/select/union.md b/docs/en/sql-reference/statements/select/union.md index ea8c8bcb1e6..002aeaa4488 100644 --- a/docs/en/sql-reference/statements/select/union.md +++ b/docs/en/sql-reference/statements/select/union.md @@ -1,4 +1,5 @@ --- +slug: /en/sql-reference/statements/select/union sidebar_label: UNION --- diff --git a/docs/en/sql-reference/statements/select/where.md b/docs/en/sql-reference/statements/select/where.md index e010c2dc913..a585942f07f 100644 --- a/docs/en/sql-reference/statements/select/where.md +++ b/docs/en/sql-reference/statements/select/where.md @@ -1,4 +1,5 @@ --- +slug: /en/sql-reference/statements/select/where sidebar_label: WHERE --- diff --git a/docs/en/sql-reference/statements/select/with.md b/docs/en/sql-reference/statements/select/with.md index 4630e46cdec..689ae74f009 100644 --- a/docs/en/sql-reference/statements/select/with.md +++ b/docs/en/sql-reference/statements/select/with.md @@ -1,4 +1,5 @@ --- +slug: /en/sql-reference/statements/select/with sidebar_label: WITH --- diff --git a/docs/en/sql-reference/statements/set-role.md b/docs/en/sql-reference/statements/set-role.md index 67b85fee9a2..0005ada3f8a 100644 --- a/docs/en/sql-reference/statements/set-role.md +++ b/docs/en/sql-reference/statements/set-role.md @@ -1,4 +1,5 @@ --- +slug: /en/sql-reference/statements/set-role sidebar_position: 51 sidebar_label: SET ROLE --- diff --git a/docs/en/sql-reference/statements/set.md b/docs/en/sql-reference/statements/set.md index aa95eacd071..14f523adc3b 100644 --- a/docs/en/sql-reference/statements/set.md +++ b/docs/en/sql-reference/statements/set.md @@ -1,4 +1,5 @@ --- +slug: /en/sql-reference/statements/set sidebar_position: 50 sidebar_label: SET --- diff --git a/docs/en/sql-reference/statements/show.md b/docs/en/sql-reference/statements/show.md index 6071c129c97..0721f17e9e2 100644 --- a/docs/en/sql-reference/statements/show.md +++ b/docs/en/sql-reference/statements/show.md @@ -1,4 +1,5 @@ --- +slug: /en/sql-reference/statements/show sidebar_position: 37 sidebar_label: SHOW --- diff --git a/docs/en/sql-reference/statements/system.md b/docs/en/sql-reference/statements/system.md index 189c247f9fb..9b7527caaa9 100644 --- a/docs/en/sql-reference/statements/system.md +++ b/docs/en/sql-reference/statements/system.md @@ -1,4 +1,5 @@ --- +slug: /en/sql-reference/statements/system sidebar_position: 36 sidebar_label: SYSTEM --- diff --git a/docs/en/sql-reference/statements/truncate.md b/docs/en/sql-reference/statements/truncate.md index 7aff5f392bf..2fd004c8d60 100644 --- a/docs/en/sql-reference/statements/truncate.md +++ b/docs/en/sql-reference/statements/truncate.md @@ -1,4 +1,5 @@ --- +slug: /en/sql-reference/statements/truncate sidebar_position: 52 sidebar_label: TRUNCATE --- diff --git a/docs/en/sql-reference/statements/use.md b/docs/en/sql-reference/statements/use.md index 508e1269537..5cc052268d4 100644 --- a/docs/en/sql-reference/statements/use.md +++ b/docs/en/sql-reference/statements/use.md @@ -1,4 +1,5 @@ --- +slug: /en/sql-reference/statements/use sidebar_position: 53 sidebar_label: USE --- diff --git a/docs/en/sql-reference/statements/watch.md b/docs/en/sql-reference/statements/watch.md index 2db2c019f18..90d19e6be0e 100644 --- a/docs/en/sql-reference/statements/watch.md +++ b/docs/en/sql-reference/statements/watch.md @@ -1,4 +1,5 @@ --- +slug: /en/sql-reference/statements/watch sidebar_position: 53 sidebar_label: WATCH --- diff --git a/docs/en/sql-reference/syntax.md b/docs/en/sql-reference/syntax.md index 0b403ae2789..837022a424f 100644 --- a/docs/en/sql-reference/syntax.md +++ b/docs/en/sql-reference/syntax.md @@ -1,4 +1,5 @@ --- +slug: /en/sql-reference/syntax sidebar_position: 2 sidebar_label: Syntax --- diff --git a/docs/en/sql-reference/table-functions/cluster.md b/docs/en/sql-reference/table-functions/cluster.md index 11dd63e7f65..9da7a233e82 100644 --- a/docs/en/sql-reference/table-functions/cluster.md +++ b/docs/en/sql-reference/table-functions/cluster.md @@ -1,4 +1,5 @@ --- +slug: /en/sql-reference/table-functions/cluster sidebar_position: 50 sidebar_label: cluster --- diff --git a/docs/en/sql-reference/table-functions/dictionary.md b/docs/en/sql-reference/table-functions/dictionary.md index b192498af66..2bdc2e580cd 100644 --- a/docs/en/sql-reference/table-functions/dictionary.md +++ b/docs/en/sql-reference/table-functions/dictionary.md @@ -1,4 +1,5 @@ --- +slug: /en/sql-reference/table-functions/dictionary sidebar_position: 54 sidebar_label: dictionary function --- diff --git a/docs/en/sql-reference/table-functions/file.md b/docs/en/sql-reference/table-functions/file.md index e1d9eb73b73..a110bfbd15c 100644 --- a/docs/en/sql-reference/table-functions/file.md +++ b/docs/en/sql-reference/table-functions/file.md @@ -1,4 +1,5 @@ --- +slug: /en/sql-reference/table-functions/file sidebar_position: 37 sidebar_label: file --- diff --git a/docs/en/sql-reference/table-functions/generate.md b/docs/en/sql-reference/table-functions/generate.md index 8459d47899b..854d42480e9 100644 --- a/docs/en/sql-reference/table-functions/generate.md +++ b/docs/en/sql-reference/table-functions/generate.md @@ -1,4 +1,5 @@ --- +slug: /en/sql-reference/table-functions/generate sidebar_position: 47 sidebar_label: generateRandom --- diff --git a/docs/en/sql-reference/table-functions/hdfs.md b/docs/en/sql-reference/table-functions/hdfs.md index 49cc65cb87a..94d0b16b4f3 100644 --- a/docs/en/sql-reference/table-functions/hdfs.md +++ b/docs/en/sql-reference/table-functions/hdfs.md @@ -1,4 +1,5 @@ --- +slug: /en/sql-reference/table-functions/hdfs sidebar_position: 45 sidebar_label: hdfs --- diff --git a/docs/en/sql-reference/table-functions/hdfsCluster.md b/docs/en/sql-reference/table-functions/hdfsCluster.md index f8511d74bff..231c552610f 100644 --- a/docs/en/sql-reference/table-functions/hdfsCluster.md +++ b/docs/en/sql-reference/table-functions/hdfsCluster.md @@ -1,4 +1,5 @@ --- +slug: /en/sql-reference/table-functions/hdfsCluster sidebar_position: 55 sidebar_label: hdfsCluster --- diff --git a/docs/en/sql-reference/table-functions/index.md b/docs/en/sql-reference/table-functions/index.md index 95c0d2f8494..d09adcd13d6 100644 --- a/docs/en/sql-reference/table-functions/index.md +++ b/docs/en/sql-reference/table-functions/index.md @@ -1,4 +1,5 @@ --- +slug: /en/sql-reference/table-functions/ sidebar_label: Table Functions sidebar_position: 34 --- diff --git a/docs/en/sql-reference/table-functions/input.md b/docs/en/sql-reference/table-functions/input.md index bf9da0091a3..2640d108083 100644 --- a/docs/en/sql-reference/table-functions/input.md +++ b/docs/en/sql-reference/table-functions/input.md @@ -1,4 +1,5 @@ --- +slug: /en/sql-reference/table-functions/input sidebar_position: 46 sidebar_label: input --- diff --git a/docs/en/sql-reference/table-functions/jdbc.md b/docs/en/sql-reference/table-functions/jdbc.md index d0111246d96..168df08fbd7 100644 --- a/docs/en/sql-reference/table-functions/jdbc.md +++ b/docs/en/sql-reference/table-functions/jdbc.md @@ -1,4 +1,5 @@ --- +slug: /en/sql-reference/table-functions/jdbc sidebar_position: 43 sidebar_label: jdbc --- diff --git a/docs/en/sql-reference/table-functions/merge.md b/docs/en/sql-reference/table-functions/merge.md index 1597b7be98f..d83e088a613 100644 --- a/docs/en/sql-reference/table-functions/merge.md +++ b/docs/en/sql-reference/table-functions/merge.md @@ -1,4 +1,5 @@ --- +slug: /en/sql-reference/table-functions/merge sidebar_position: 38 sidebar_label: merge --- diff --git a/docs/en/sql-reference/table-functions/mysql.md b/docs/en/sql-reference/table-functions/mysql.md index 60d95b17c4c..f867cda45bd 100644 --- a/docs/en/sql-reference/table-functions/mysql.md +++ b/docs/en/sql-reference/table-functions/mysql.md @@ -1,4 +1,5 @@ --- +slug: /en/sql-reference/table-functions/mysql sidebar_position: 42 sidebar_label: mysql --- diff --git a/docs/en/sql-reference/table-functions/null.md b/docs/en/sql-reference/table-functions/null.md index 57a885afc54..cbe8480fbd8 100644 --- a/docs/en/sql-reference/table-functions/null.md +++ b/docs/en/sql-reference/table-functions/null.md @@ -1,4 +1,5 @@ --- +slug: /en/sql-reference/table-functions/null sidebar_position: 53 sidebar_label: null function --- diff --git a/docs/en/sql-reference/table-functions/numbers.md b/docs/en/sql-reference/table-functions/numbers.md index a069afc3b58..f8598d10afb 100644 --- a/docs/en/sql-reference/table-functions/numbers.md +++ b/docs/en/sql-reference/table-functions/numbers.md @@ -1,4 +1,5 @@ --- +slug: /en/sql-reference/table-functions/numbers sidebar_position: 39 sidebar_label: numbers --- diff --git a/docs/en/sql-reference/table-functions/odbc.md b/docs/en/sql-reference/table-functions/odbc.md index 71f36a3da1a..f8c46fe44d8 100644 --- a/docs/en/sql-reference/table-functions/odbc.md +++ b/docs/en/sql-reference/table-functions/odbc.md @@ -1,4 +1,5 @@ --- +slug: /en/sql-reference/table-functions/odbc sidebar_position: 44 sidebar_label: odbc --- diff --git a/docs/en/sql-reference/table-functions/postgresql.md b/docs/en/sql-reference/table-functions/postgresql.md index b955b946d4e..367edbe9a00 100644 --- a/docs/en/sql-reference/table-functions/postgresql.md +++ b/docs/en/sql-reference/table-functions/postgresql.md @@ -1,4 +1,5 @@ --- +slug: /en/sql-reference/table-functions/postgresql sidebar_position: 42 sidebar_label: postgresql --- diff --git a/docs/en/sql-reference/table-functions/remote.md b/docs/en/sql-reference/table-functions/remote.md index 61018a3d5a7..ccaf9565144 100644 --- a/docs/en/sql-reference/table-functions/remote.md +++ b/docs/en/sql-reference/table-functions/remote.md @@ -1,4 +1,5 @@ --- +slug: /en/sql-reference/table-functions/remote sidebar_position: 40 sidebar_label: remote --- diff --git a/docs/en/sql-reference/table-functions/s3.md b/docs/en/sql-reference/table-functions/s3.md index 39446dbd512..2df7d6e46b3 100644 --- a/docs/en/sql-reference/table-functions/s3.md +++ b/docs/en/sql-reference/table-functions/s3.md @@ -1,4 +1,5 @@ --- +slug: /en/sql-reference/table-functions/s3 sidebar_position: 45 sidebar_label: s3 --- diff --git a/docs/en/sql-reference/table-functions/s3Cluster.md b/docs/en/sql-reference/table-functions/s3Cluster.md index 939aface0d7..ec6a73e4cbb 100644 --- a/docs/en/sql-reference/table-functions/s3Cluster.md +++ b/docs/en/sql-reference/table-functions/s3Cluster.md @@ -1,4 +1,5 @@ --- +slug: /en/sql-reference/table-functions/s3Cluster sidebar_position: 55 sidebar_label: s3Cluster --- diff --git a/docs/en/sql-reference/table-functions/sqlite.md b/docs/en/sql-reference/table-functions/sqlite.md index ff6ac64b382..789ab97c3c3 100644 --- a/docs/en/sql-reference/table-functions/sqlite.md +++ b/docs/en/sql-reference/table-functions/sqlite.md @@ -1,4 +1,5 @@ --- +slug: /en/sql-reference/table-functions/sqlite sidebar_position: 55 sidebar_label: sqlite --- diff --git a/docs/en/sql-reference/table-functions/url.md b/docs/en/sql-reference/table-functions/url.md index ebd793a5f3e..f1ed7b4dfe4 100644 --- a/docs/en/sql-reference/table-functions/url.md +++ b/docs/en/sql-reference/table-functions/url.md @@ -1,4 +1,5 @@ --- +slug: /en/sql-reference/table-functions/url sidebar_position: 41 sidebar_label: url --- diff --git a/docs/en/sql-reference/table-functions/view.md b/docs/en/sql-reference/table-functions/view.md index e3b63cf5588..ee8da8209d1 100644 --- a/docs/en/sql-reference/table-functions/view.md +++ b/docs/en/sql-reference/table-functions/view.md @@ -1,4 +1,5 @@ --- +slug: /en/sql-reference/table-functions/view sidebar_position: 51 sidebar_label: view --- diff --git a/docs/en/sql-reference/window-functions/index.md b/docs/en/sql-reference/window-functions/index.md index d42828b74e2..4a23c6d66bc 100644 --- a/docs/en/sql-reference/window-functions/index.md +++ b/docs/en/sql-reference/window-functions/index.md @@ -1,10 +1,10 @@ --- +slug: /en/sql-reference/window-functions/ sidebar_position: 62 sidebar_label: Window Functions +title: Window Functions --- -# Window Functions - ClickHouse supports the standard grammar for defining windows and window functions. The following features are currently supported: | Feature | Support or workaround | @@ -14,7 +14,7 @@ ClickHouse supports the standard grammar for defining windows and window functio | `WINDOW` clause (`select ... from table window w as (partition by id)`) | supported | | `ROWS` frame | supported | | `RANGE` frame | supported, the default | -| `INTERVAL` syntax for `DateTime` `RANGE OFFSET` frame | not supported, specify the number of seconds instead | +| `INTERVAL` syntax for `DateTime` `RANGE OFFSET` frame | not supported, specify the number of seconds instead (`RANGE` works with any numeric type). | | `GROUPS` frame | not supported | | Calculating aggregate functions over a frame (`sum(value) over (order by time)`) | all aggregate functions are supported | | `rank()`, `dense_rank()`, `row_number()` | supported | @@ -534,3 +534,56 @@ ORDER BY │ cpu_temp │ 2020-01-01 00:07:10 │ 87 │ 87 │ └──────────┴─────────────────────┴───────┴────────────────────────────┘ ``` + +### Moving / Sliding Average (per 10 days) + +Temperature is stored with second precision, but using `Range` and `ORDER BY toDate(ts)` we form a frame with the size of 10 units, and because of `toDate(ts)` the unit is a day. + +```sql +CREATE TABLE sensors +( + `metric` String, + `ts` DateTime, + `value` Float +) +ENGINE = Memory; + +insert into sensors values('ambient_temp', '2020-01-01 00:00:00', 16), + ('ambient_temp', '2020-01-01 12:00:00', 16), + ('ambient_temp', '2020-01-02 11:00:00', 9), + ('ambient_temp', '2020-01-02 12:00:00', 9), + ('ambient_temp', '2020-02-01 10:00:00', 10), + ('ambient_temp', '2020-02-01 12:00:00', 10), + ('ambient_temp', '2020-02-10 12:00:00', 12), + ('ambient_temp', '2020-02-10 13:00:00', 12), + ('ambient_temp', '2020-02-20 12:00:01', 16), + ('ambient_temp', '2020-03-01 12:00:00', 16), + ('ambient_temp', '2020-03-01 12:00:00', 16), + ('ambient_temp', '2020-03-01 12:00:00', 16); + +SELECT + metric, + ts, + value, + round(avg(value) OVER (PARTITION BY metric ORDER BY toDate(ts) + Range BETWEEN 10 PRECEDING AND CURRENT ROW),2) moving_avg_10_days_temp +FROM sensors +ORDER BY + metric ASC, + ts ASC; + +┌─metric───────┬──────────────────ts─┬─value─┬─moving_avg_10_days_temp─┐ +│ ambient_temp │ 2020-01-01 00:00:00 │ 16 │ 16 │ +│ ambient_temp │ 2020-01-01 12:00:00 │ 16 │ 16 │ +│ ambient_temp │ 2020-01-02 11:00:00 │ 9 │ 12.5 │ +│ ambient_temp │ 2020-01-02 12:00:00 │ 9 │ 12.5 │ +│ ambient_temp │ 2020-02-01 10:00:00 │ 10 │ 10 │ +│ ambient_temp │ 2020-02-01 12:00:00 │ 10 │ 10 │ +│ ambient_temp │ 2020-02-10 12:00:00 │ 12 │ 11 │ +│ ambient_temp │ 2020-02-10 13:00:00 │ 12 │ 11 │ +│ ambient_temp │ 2020-02-20 12:00:01 │ 16 │ 13.33 │ +│ ambient_temp │ 2020-03-01 12:00:00 │ 16 │ 16 │ +│ ambient_temp │ 2020-03-01 12:00:00 │ 16 │ 16 │ +│ ambient_temp │ 2020-03-01 12:00:00 │ 16 │ 16 │ +└──────────────┴─────────────────────┴───────┴─────────────────────────┘ +``` diff --git a/docs/ru/about-us/_category_.yml b/docs/ru/about-us/_category_.yml new file mode 100644 index 00000000000..62abf8e9e43 --- /dev/null +++ b/docs/ru/about-us/_category_.yml @@ -0,0 +1,8 @@ +position: 80 +label: 'About Us' +collapsible: true +collapsed: true +link: + type: generated-index + title: About Us + slug: /ru/about-us/ diff --git a/docs/ru/about-us/adopters.mdx b/docs/ru/about-us/adopters.mdx new file mode 100644 index 00000000000..9e59d9edd70 --- /dev/null +++ b/docs/ru/about-us/adopters.mdx @@ -0,0 +1,9 @@ +--- +slug: /ru/about-us/adopters +sidebar_label: Adopters +title: ClickHouse Adopters +--- + +import Adopters from '@site/docs/en/about-us/adopters.md'; + + diff --git a/docs/ru/about-us/support.mdx b/docs/ru/about-us/support.mdx new file mode 100644 index 00000000000..f1c94f15ef6 --- /dev/null +++ b/docs/ru/about-us/support.mdx @@ -0,0 +1,9 @@ +--- +slug: /ru/about-us/support +sidebar_label: Commercial Support +title: ClickHouse Commercial Support Service +--- + +import Support from '@site/docs/en/about-us/support.md'; + + diff --git a/docs/ru/commercial/cloud.md b/docs/ru/commercial/cloud.md index 50a447e516e..5d47850af9d 100644 --- a/docs/ru/commercial/cloud.md +++ b/docs/ru/commercial/cloud.md @@ -1,4 +1,5 @@ --- +slug: /ru/commercial/cloud sidebar_position: 1 sidebar_label: "Поставщики облачных услуг ClickHouse" --- diff --git a/docs/ru/commercial/index.md b/docs/ru/commercial/index.md index 365db3ee91a..31deaae00ac 100644 --- a/docs/ru/commercial/index.md +++ b/docs/ru/commercial/index.md @@ -1,4 +1,5 @@ --- +slug: /ru/commercial/ sidebar_position: 70 sidebar_label: "Коммерческие услуги" --- diff --git a/docs/ru/development/adding_test_queries.md b/docs/ru/development/adding_test_queries.md deleted file mode 120000 index def9c4077be..00000000000 --- a/docs/ru/development/adding_test_queries.md +++ /dev/null @@ -1 +0,0 @@ -../../en/development/adding_test_queries.md \ No newline at end of file diff --git a/docs/ru/development/adding_test_queries.mdx b/docs/ru/development/adding_test_queries.mdx new file mode 100644 index 00000000000..24149984724 --- /dev/null +++ b/docs/ru/development/adding_test_queries.mdx @@ -0,0 +1,10 @@ +--- +slug: /ru/development/adding_test_queries +sidebar_label: Adding Test Queries +sidebar_position: 63 +title: How to add test queries to ClickHouse CI +--- + +import Content from '@site/docs/en/development/adding_test_queries.md'; + + diff --git a/docs/ru/development/architecture.md b/docs/ru/development/architecture.md index 11eece9a960..a4b99987d03 100644 --- a/docs/ru/development/architecture.md +++ b/docs/ru/development/architecture.md @@ -1,4 +1,5 @@ --- +slug: /ru/development/architecture sidebar_position: 62 sidebar_label: "Обзор архитектуры ClickHouse" --- diff --git a/docs/ru/development/browse-code.md b/docs/ru/development/browse-code.md index 7290eed5c6f..640b1ac3693 100644 --- a/docs/ru/development/browse-code.md +++ b/docs/ru/development/browse-code.md @@ -1,4 +1,5 @@ --- +slug: /ru/development/browse-code sidebar_position: 72 sidebar_label: "Навигация по коду ClickHouse" --- diff --git a/docs/ru/development/build-cross-arm.md b/docs/ru/development/build-cross-arm.md deleted file mode 120000 index 134f128a40c..00000000000 --- a/docs/ru/development/build-cross-arm.md +++ /dev/null @@ -1 +0,0 @@ -../../en/development/build-cross-arm.md \ No newline at end of file diff --git a/docs/ru/development/build-cross-arm.mdx b/docs/ru/development/build-cross-arm.mdx new file mode 100644 index 00000000000..fd510716174 --- /dev/null +++ b/docs/ru/development/build-cross-arm.mdx @@ -0,0 +1,10 @@ +--- +slug: /ru/development/build-cross-arm +sidebar_position: 67 +title: How to Build ClickHouse on Linux for AARCH64 (ARM64) Architecture +sidebar_label: Build on Linux for AARCH64 (ARM64) +--- + +import Content from '@site/docs/en/development/build-cross-arm.md'; + + diff --git a/docs/ru/development/build-cross-osx.md b/docs/ru/development/build-cross-osx.md deleted file mode 120000 index bcc10df574c..00000000000 --- a/docs/ru/development/build-cross-osx.md +++ /dev/null @@ -1 +0,0 @@ -../../en/development/build-cross-osx.md \ No newline at end of file diff --git a/docs/ru/development/build-cross-osx.mdx b/docs/ru/development/build-cross-osx.mdx new file mode 100644 index 00000000000..9a64c4abccd --- /dev/null +++ b/docs/ru/development/build-cross-osx.mdx @@ -0,0 +1,10 @@ +--- +slug: /ru/development/build-cross-osx +sidebar_position: 67 +sidebar_label: Build on Linux for Mac OS X +title: How to Build ClickHouse on Linux for Mac OS X +--- + +import Content from '@site/docs/en/development/build-cross-osx.md'; + + diff --git a/docs/ru/development/build-cross-riscv.md b/docs/ru/development/build-cross-riscv.md deleted file mode 120000 index 7d1e8c46ed8..00000000000 --- a/docs/ru/development/build-cross-riscv.md +++ /dev/null @@ -1 +0,0 @@ -../../en/development/build-cross-riscv.md \ No newline at end of file diff --git a/docs/ru/development/build-cross-riscv.mdx b/docs/ru/development/build-cross-riscv.mdx new file mode 100644 index 00000000000..847717dacf3 --- /dev/null +++ b/docs/ru/development/build-cross-riscv.mdx @@ -0,0 +1,10 @@ +--- +slug: /ru/development/build-cross-riscv +sidebar_position: 68 +sidebar_label: Build on Linux for RISC-V 64 +title: Build on Linux for RISC-V 64 +--- + +import Content from '@site/docs/en/development/build-cross-riscv.md'; + + diff --git a/docs/ru/development/build-osx.md b/docs/ru/development/build-osx.md index 205edce2b78..82668a1ea55 100644 --- a/docs/ru/development/build-osx.md +++ b/docs/ru/development/build-osx.md @@ -1,5 +1,6 @@ --- -sidebar_position: 65 +slug: /ru/development/build-osx +sidebar_position: 66 sidebar_label: Сборка на Mac OS X --- diff --git a/docs/ru/development/build.md b/docs/ru/development/build.md deleted file mode 120000 index 480dbc2e9f5..00000000000 --- a/docs/ru/development/build.md +++ /dev/null @@ -1 +0,0 @@ -../../en/development/build.md \ No newline at end of file diff --git a/docs/ru/development/build.mdx b/docs/ru/development/build.mdx new file mode 100644 index 00000000000..5939429e610 --- /dev/null +++ b/docs/ru/development/build.mdx @@ -0,0 +1,10 @@ +--- +slug: /ru/development/build +sidebar_position: 65 +sidebar_label: Build on Linux +title: How to Build ClickHouse on Linux +--- + +import Content from '@site/docs/en/development/build.md'; + + diff --git a/docs/ru/development/continuous-integration.md b/docs/ru/development/continuous-integration.md deleted file mode 120000 index f68058a436e..00000000000 --- a/docs/ru/development/continuous-integration.md +++ /dev/null @@ -1 +0,0 @@ -../../en/development/continuous-integration.md \ No newline at end of file diff --git a/docs/ru/development/continuous-integration.mdx b/docs/ru/development/continuous-integration.mdx new file mode 100644 index 00000000000..c15fd3fc54d --- /dev/null +++ b/docs/ru/development/continuous-integration.mdx @@ -0,0 +1,10 @@ +--- +slug: /ru/development/continuous-integration +sidebar_position: 62 +sidebar_label: Continuous Integration Checks +title: Continuous Integration Checks +--- + +import Content from '@site/docs/en/development/continuous-integration.md'; + + diff --git a/docs/ru/development/contrib.md b/docs/ru/development/contrib.md index 1b99ec97553..f3a88a2da0c 100644 --- a/docs/ru/development/contrib.md +++ b/docs/ru/development/contrib.md @@ -1,4 +1,5 @@ --- +slug: /ru/development/contrib sidebar_position: 71 sidebar_label: "Используемые сторонние библиотеки" --- diff --git a/docs/ru/development/developer-instruction.md b/docs/ru/development/developer-instruction.md index 8e1320c4dcd..0713fe42f38 100644 --- a/docs/ru/development/developer-instruction.md +++ b/docs/ru/development/developer-instruction.md @@ -1,4 +1,5 @@ --- +slug: /ru/development/developer-instruction sidebar_position: 61 sidebar_label: "Инструкция для разработчиков" --- diff --git a/docs/ru/development/index.md b/docs/ru/development/index.md deleted file mode 120000 index 1e2ad97dcc5..00000000000 --- a/docs/ru/development/index.md +++ /dev/null @@ -1 +0,0 @@ -../../en/development/index.md \ No newline at end of file diff --git a/docs/ru/development/integrating_rust_libraries.md b/docs/ru/development/integrating_rust_libraries.md index 076d32a892e..aad83f61c14 100644 --- a/docs/ru/development/integrating_rust_libraries.md +++ b/docs/ru/development/integrating_rust_libraries.md @@ -1,3 +1,7 @@ +--- +sidebar_position: 98 +slug: /ru/development/integrating_rust_libraries +--- # Интеграция библиотек на языке Rust в ClickHouse. Интеграция библиотек будет описываться на основе работы проведенной для библиотеки BLAKE3. @@ -59,4 +63,4 @@ pub unsafe extern "C" fn blake3_apply_shim( В заключение, стоит отметить пару пробелм, возникших при интеграции BLAKE3: 1) Некоторые архитектуры могут потребовать настройки компиляции в build.rs и в build_rust_lib.cmake в связи со своими особенностями. -2) MemorySanitizer плохо понимает инициализацию памяти в Rust, поэтому для избежания ложноположительных срабатываний для BLAKE3 был создан альтернативный метод, который более явно, но при этом медленнее, инициализировал память. Он компилируется только для сборки с MemorySanitizer и в релиз не попадает. Вероятно, возможны и более красивые способы решения этой проблемы, но при интеграции BLAKE3 они не были обнаружены. \ No newline at end of file +2) MemorySanitizer плохо понимает инициализацию памяти в Rust, поэтому для избежания ложноположительных срабатываний для BLAKE3 был создан альтернативный метод, который более явно, но при этом медленнее, инициализировал память. Он компилируется только для сборки с MemorySanitizer и в релиз не попадает. Вероятно, возможны и более красивые способы решения этой проблемы, но при интеграции BLAKE3 они не были обнаружены. diff --git a/docs/ru/development/style.md b/docs/ru/development/style.md index 8388ce16809..6098dc9c13b 100644 --- a/docs/ru/development/style.md +++ b/docs/ru/development/style.md @@ -1,4 +1,5 @@ --- +slug: /ru/development/style sidebar_position: 69 sidebar_label: "Как писать код на C++" --- diff --git a/docs/ru/development/tests.md b/docs/ru/development/tests.md deleted file mode 120000 index c03d36c3916..00000000000 --- a/docs/ru/development/tests.md +++ /dev/null @@ -1 +0,0 @@ -../../en/development/tests.md \ No newline at end of file diff --git a/docs/ru/development/tests.mdx b/docs/ru/development/tests.mdx new file mode 100644 index 00000000000..3f44771d754 --- /dev/null +++ b/docs/ru/development/tests.mdx @@ -0,0 +1,10 @@ +--- +sidebar_position: 99 +slug: /ru/development/tests +sidebar_label: Testing +title: ClickHouse Testing +--- + +import Content from '@site/docs/en/development/tests.md'; + + diff --git a/docs/ru/engines/database-engines/atomic.md b/docs/ru/engines/database-engines/atomic.md index a1523a47fe9..2b999b3bd3d 100644 --- a/docs/ru/engines/database-engines/atomic.md +++ b/docs/ru/engines/database-engines/atomic.md @@ -1,4 +1,5 @@ --- +slug: /ru/engines/database-engines/atomic sidebar_position: 32 sidebar_label: Atomic --- diff --git a/docs/ru/engines/database-engines/index.md b/docs/ru/engines/database-engines/index.md index a1a69336c24..d1df709822c 100644 --- a/docs/ru/engines/database-engines/index.md +++ b/docs/ru/engines/database-engines/index.md @@ -1,4 +1,5 @@ --- +slug: /ru/engines/database-engines/ sidebar_position: 27 sidebar_label: "Движки баз данных" --- diff --git a/docs/ru/engines/database-engines/lazy.md b/docs/ru/engines/database-engines/lazy.md index b718c22db97..b43ab5da029 100644 --- a/docs/ru/engines/database-engines/lazy.md +++ b/docs/ru/engines/database-engines/lazy.md @@ -1,4 +1,5 @@ --- +slug: /ru/engines/database-engines/lazy sidebar_position: 31 sidebar_label: Lazy --- diff --git a/docs/ru/engines/database-engines/materialized-mysql.md b/docs/ru/engines/database-engines/materialized-mysql.md index 5fca9783deb..55f62628a3d 100644 --- a/docs/ru/engines/database-engines/materialized-mysql.md +++ b/docs/ru/engines/database-engines/materialized-mysql.md @@ -1,4 +1,5 @@ --- +slug: /ru/engines/database-engines/materialized-mysql sidebar_position: 29 sidebar_label: "[experimental] MaterializedMySQL" --- diff --git a/docs/ru/engines/database-engines/materialized-postgresql.md b/docs/ru/engines/database-engines/materialized-postgresql.md index e5c8ddb11c0..f2268e9ba0f 100644 --- a/docs/ru/engines/database-engines/materialized-postgresql.md +++ b/docs/ru/engines/database-engines/materialized-postgresql.md @@ -1,4 +1,5 @@ --- +slug: /ru/engines/database-engines/materialized-postgresql sidebar_position: 30 sidebar_label: MaterializedPostgreSQL --- diff --git a/docs/ru/engines/database-engines/mysql.md b/docs/ru/engines/database-engines/mysql.md index aa38d1e3059..fb5c9d16ee4 100644 --- a/docs/ru/engines/database-engines/mysql.md +++ b/docs/ru/engines/database-engines/mysql.md @@ -1,4 +1,5 @@ --- +slug: /ru/engines/database-engines/mysql sidebar_position: 30 sidebar_label: MySQL --- diff --git a/docs/ru/engines/database-engines/postgresql.md b/docs/ru/engines/database-engines/postgresql.md index 324b39117d5..799a24ab1ec 100644 --- a/docs/ru/engines/database-engines/postgresql.md +++ b/docs/ru/engines/database-engines/postgresql.md @@ -1,4 +1,5 @@ --- +slug: /ru/engines/database-engines/postgresql sidebar_position: 35 sidebar_label: PostgreSQL --- diff --git a/docs/ru/engines/database-engines/replicated.md b/docs/ru/engines/database-engines/replicated.md index fef78ad6ee8..05e38e774fd 100644 --- a/docs/ru/engines/database-engines/replicated.md +++ b/docs/ru/engines/database-engines/replicated.md @@ -1,4 +1,5 @@ --- +slug: /ru/engines/database-engines/replicated sidebar_position: 36 sidebar_label: Replicated --- diff --git a/docs/ru/engines/database-engines/sqlite.md b/docs/ru/engines/database-engines/sqlite.md index 01da41c3590..f84ff2ea05f 100644 --- a/docs/ru/engines/database-engines/sqlite.md +++ b/docs/ru/engines/database-engines/sqlite.md @@ -1,4 +1,5 @@ --- +slug: /ru/engines/database-engines/sqlite sidebar_position: 32 sidebar_label: SQLite --- diff --git a/docs/ru/engines/table-engines/index.md b/docs/ru/engines/table-engines/index.md index 0596771973d..f8b66cb5f60 100644 --- a/docs/ru/engines/table-engines/index.md +++ b/docs/ru/engines/table-engines/index.md @@ -1,4 +1,5 @@ --- +slug: /ru/engines/table-engines/ sidebar_label: "Движки таблиц" sidebar_position: 26 --- diff --git a/docs/ru/engines/table-engines/integrations/ExternalDistributed.md b/docs/ru/engines/table-engines/integrations/ExternalDistributed.md index 9cbe4c730da..0529e6d698d 100644 --- a/docs/ru/engines/table-engines/integrations/ExternalDistributed.md +++ b/docs/ru/engines/table-engines/integrations/ExternalDistributed.md @@ -1,4 +1,5 @@ --- +slug: /ru/engines/table-engines/integrations/ExternalDistributed sidebar_position: 12 sidebar_label: ExternalDistributed --- diff --git a/docs/ru/engines/table-engines/integrations/embedded-rocksdb.md b/docs/ru/engines/table-engines/integrations/embedded-rocksdb.md index 15504118d5f..62f544a76c8 100644 --- a/docs/ru/engines/table-engines/integrations/embedded-rocksdb.md +++ b/docs/ru/engines/table-engines/integrations/embedded-rocksdb.md @@ -1,4 +1,5 @@ --- +slug: /ru/engines/table-engines/integrations/embedded-rocksdb sidebar_position: 9 sidebar_label: EmbeddedRocksDB --- diff --git a/docs/ru/engines/table-engines/integrations/hdfs.md b/docs/ru/engines/table-engines/integrations/hdfs.md index 84f31c0afcc..aed90d53f2a 100644 --- a/docs/ru/engines/table-engines/integrations/hdfs.md +++ b/docs/ru/engines/table-engines/integrations/hdfs.md @@ -1,4 +1,5 @@ --- +slug: /ru/engines/table-engines/integrations/hdfs sidebar_position: 6 sidebar_label: HDFS --- diff --git a/docs/ru/engines/table-engines/integrations/index.md b/docs/ru/engines/table-engines/integrations/index.md index eb03fa213cd..58473cbb467 100644 --- a/docs/ru/engines/table-engines/integrations/index.md +++ b/docs/ru/engines/table-engines/integrations/index.md @@ -1,4 +1,5 @@ --- +slug: /ru/engines/table-engines/integrations/ sidebar_label: "Движки таблиц для интеграции" sidebar_position: 30 --- diff --git a/docs/ru/engines/table-engines/integrations/jdbc.md b/docs/ru/engines/table-engines/integrations/jdbc.md index 497e902c4d1..484426ce30d 100644 --- a/docs/ru/engines/table-engines/integrations/jdbc.md +++ b/docs/ru/engines/table-engines/integrations/jdbc.md @@ -1,4 +1,5 @@ --- +slug: /ru/engines/table-engines/integrations/jdbc sidebar_position: 3 sidebar_label: JDBC --- diff --git a/docs/ru/engines/table-engines/integrations/kafka.md b/docs/ru/engines/table-engines/integrations/kafka.md index b51a0113302..37fc902e777 100644 --- a/docs/ru/engines/table-engines/integrations/kafka.md +++ b/docs/ru/engines/table-engines/integrations/kafka.md @@ -1,4 +1,5 @@ --- +slug: /ru/engines/table-engines/integrations/kafka sidebar_position: 8 sidebar_label: Kafka --- diff --git a/docs/ru/engines/table-engines/integrations/materialized-postgresql.md b/docs/ru/engines/table-engines/integrations/materialized-postgresql.md index 3a1caad1956..bc10066f6a6 100644 --- a/docs/ru/engines/table-engines/integrations/materialized-postgresql.md +++ b/docs/ru/engines/table-engines/integrations/materialized-postgresql.md @@ -1,4 +1,5 @@ --- +slug: /ru/engines/table-engines/integrations/materialized-postgresql sidebar_position: 12 sidebar_label: MaterializedPostgreSQL --- diff --git a/docs/ru/engines/table-engines/integrations/mongodb.md b/docs/ru/engines/table-engines/integrations/mongodb.md index 03a9c08a7d8..84f4c9f167c 100644 --- a/docs/ru/engines/table-engines/integrations/mongodb.md +++ b/docs/ru/engines/table-engines/integrations/mongodb.md @@ -1,4 +1,5 @@ --- +slug: /ru/engines/table-engines/integrations/mongodb sidebar_position: 5 sidebar_label: MongoDB --- diff --git a/docs/ru/engines/table-engines/integrations/mysql.md b/docs/ru/engines/table-engines/integrations/mysql.md index 7380d926f90..481f9b71af9 100644 --- a/docs/ru/engines/table-engines/integrations/mysql.md +++ b/docs/ru/engines/table-engines/integrations/mysql.md @@ -1,4 +1,5 @@ --- +slug: /ru/engines/table-engines/integrations/mysql sidebar_position: 4 sidebar_label: MySQL --- diff --git a/docs/ru/engines/table-engines/integrations/odbc.md b/docs/ru/engines/table-engines/integrations/odbc.md index 3e1a762bba8..17d58a23230 100644 --- a/docs/ru/engines/table-engines/integrations/odbc.md +++ b/docs/ru/engines/table-engines/integrations/odbc.md @@ -1,4 +1,5 @@ --- +slug: /ru/engines/table-engines/integrations/odbc sidebar_position: 2 sidebar_label: ODBC --- diff --git a/docs/ru/engines/table-engines/integrations/postgresql.md b/docs/ru/engines/table-engines/integrations/postgresql.md index 28debaf9c23..acf139f415a 100644 --- a/docs/ru/engines/table-engines/integrations/postgresql.md +++ b/docs/ru/engines/table-engines/integrations/postgresql.md @@ -1,4 +1,5 @@ --- +slug: /ru/engines/table-engines/integrations/postgresql sidebar_position: 11 sidebar_label: PostgreSQL --- diff --git a/docs/ru/engines/table-engines/integrations/rabbitmq.md b/docs/ru/engines/table-engines/integrations/rabbitmq.md index 7322f23fe0e..8d21da0a20c 100644 --- a/docs/ru/engines/table-engines/integrations/rabbitmq.md +++ b/docs/ru/engines/table-engines/integrations/rabbitmq.md @@ -1,3 +1,6 @@ +--- +slug: /ru/engines/table-engines/integrations/rabbitmq +--- # RabbitMQ {#rabbitmq-engine} Движок работает с [RabbitMQ](https://www.rabbitmq.com). diff --git a/docs/ru/engines/table-engines/integrations/s3.md b/docs/ru/engines/table-engines/integrations/s3.md index 7411a336302..24735a35382 100644 --- a/docs/ru/engines/table-engines/integrations/s3.md +++ b/docs/ru/engines/table-engines/integrations/s3.md @@ -1,4 +1,5 @@ --- +slug: /ru/engines/table-engines/integrations/s3 sidebar_position: 4 sidebar_label: S3 --- diff --git a/docs/ru/engines/table-engines/integrations/sqlite.md b/docs/ru/engines/table-engines/integrations/sqlite.md index 2aa5fbe3cf3..4d9581515a8 100644 --- a/docs/ru/engines/table-engines/integrations/sqlite.md +++ b/docs/ru/engines/table-engines/integrations/sqlite.md @@ -1,4 +1,5 @@ --- +slug: /ru/engines/table-engines/integrations/sqlite sidebar_position: 7 sidebar_label: SQLite --- diff --git a/docs/ru/engines/table-engines/log-family/index.md b/docs/ru/engines/table-engines/log-family/index.md index e9793838766..9887a850dac 100644 --- a/docs/ru/engines/table-engines/log-family/index.md +++ b/docs/ru/engines/table-engines/log-family/index.md @@ -1,4 +1,5 @@ --- +slug: /ru/engines/table-engines/log-family/ sidebar_label: "Семейство Log" sidebar_position: 29 --- diff --git a/docs/ru/engines/table-engines/log-family/log.md b/docs/ru/engines/table-engines/log-family/log.md index 72e954f7664..297d7db6f0f 100644 --- a/docs/ru/engines/table-engines/log-family/log.md +++ b/docs/ru/engines/table-engines/log-family/log.md @@ -1,4 +1,5 @@ --- +slug: /ru/engines/table-engines/log-family/log sidebar_position: 33 sidebar_label: Log --- diff --git a/docs/ru/engines/table-engines/log-family/stripelog.md b/docs/ru/engines/table-engines/log-family/stripelog.md index 49bc23be517..726df3c16fb 100644 --- a/docs/ru/engines/table-engines/log-family/stripelog.md +++ b/docs/ru/engines/table-engines/log-family/stripelog.md @@ -1,4 +1,5 @@ --- +slug: /ru/engines/table-engines/log-family/stripelog sidebar_position: 32 sidebar_label: StripeLog --- diff --git a/docs/ru/engines/table-engines/log-family/tinylog.md b/docs/ru/engines/table-engines/log-family/tinylog.md index 678a634fff8..55ed587fee8 100644 --- a/docs/ru/engines/table-engines/log-family/tinylog.md +++ b/docs/ru/engines/table-engines/log-family/tinylog.md @@ -1,4 +1,5 @@ --- +slug: /ru/engines/table-engines/log-family/tinylog sidebar_position: 34 sidebar_label: TinyLog --- diff --git a/docs/ru/engines/table-engines/mergetree-family/aggregatingmergetree.md b/docs/ru/engines/table-engines/mergetree-family/aggregatingmergetree.md index 2a802e70400..aa16113192e 100644 --- a/docs/ru/engines/table-engines/mergetree-family/aggregatingmergetree.md +++ b/docs/ru/engines/table-engines/mergetree-family/aggregatingmergetree.md @@ -1,4 +1,5 @@ --- +slug: /ru/engines/table-engines/mergetree-family/aggregatingmergetree sidebar_position: 35 sidebar_label: AggregatingMergeTree --- diff --git a/docs/ru/engines/table-engines/mergetree-family/collapsingmergetree.md b/docs/ru/engines/table-engines/mergetree-family/collapsingmergetree.md index 94b91830900..ecaaa6b8417 100644 --- a/docs/ru/engines/table-engines/mergetree-family/collapsingmergetree.md +++ b/docs/ru/engines/table-engines/mergetree-family/collapsingmergetree.md @@ -1,4 +1,5 @@ --- +slug: /ru/engines/table-engines/mergetree-family/collapsingmergetree sidebar_position: 36 sidebar_label: CollapsingMergeTree --- diff --git a/docs/ru/engines/table-engines/mergetree-family/custom-partitioning-key.md b/docs/ru/engines/table-engines/mergetree-family/custom-partitioning-key.md index e30e771c4df..8b9a02c5a7b 100644 --- a/docs/ru/engines/table-engines/mergetree-family/custom-partitioning-key.md +++ b/docs/ru/engines/table-engines/mergetree-family/custom-partitioning-key.md @@ -1,4 +1,5 @@ --- +slug: /ru/engines/table-engines/mergetree-family/custom-partitioning-key sidebar_position: 32 sidebar_label: "Произвольный ключ партиционирования" --- diff --git a/docs/ru/engines/table-engines/mergetree-family/graphitemergetree.md b/docs/ru/engines/table-engines/mergetree-family/graphitemergetree.md index 2546092a4a5..818f85f7e37 100644 --- a/docs/ru/engines/table-engines/mergetree-family/graphitemergetree.md +++ b/docs/ru/engines/table-engines/mergetree-family/graphitemergetree.md @@ -1,4 +1,5 @@ --- +slug: /ru/engines/table-engines/mergetree-family/graphitemergetree sidebar_position: 38 sidebar_label: GraphiteMergeTree --- diff --git a/docs/ru/engines/table-engines/mergetree-family/index.md b/docs/ru/engines/table-engines/mergetree-family/index.md index e1924a33255..ff55c9ef224 100644 --- a/docs/ru/engines/table-engines/mergetree-family/index.md +++ b/docs/ru/engines/table-engines/mergetree-family/index.md @@ -1,4 +1,5 @@ --- +slug: /ru/engines/table-engines/mergetree-family/ sidebar_label: MergeTree Family sidebar_position: 28 --- diff --git a/docs/ru/engines/table-engines/mergetree-family/mergetree.md b/docs/ru/engines/table-engines/mergetree-family/mergetree.md index e1e1b0b4268..e01e0006b87 100644 --- a/docs/ru/engines/table-engines/mergetree-family/mergetree.md +++ b/docs/ru/engines/table-engines/mergetree-family/mergetree.md @@ -1,4 +1,5 @@ --- +slug: /ru/engines/table-engines/mergetree-family/mergetree sidebar_position: 30 sidebar_label: MergeTree --- diff --git a/docs/ru/engines/table-engines/mergetree-family/replacingmergetree.md b/docs/ru/engines/table-engines/mergetree-family/replacingmergetree.md index 1b0c5ffd103..86626d92863 100644 --- a/docs/ru/engines/table-engines/mergetree-family/replacingmergetree.md +++ b/docs/ru/engines/table-engines/mergetree-family/replacingmergetree.md @@ -1,4 +1,5 @@ --- +slug: /ru/engines/table-engines/mergetree-family/replacingmergetree sidebar_position: 33 sidebar_label: ReplacingMergeTree --- @@ -28,19 +29,65 @@ CREATE TABLE [IF NOT EXISTS] [db.]table_name [ON CLUSTER cluster] Описание параметров запроса смотрите в [описании запроса](../../../engines/table-engines/mergetree-family/replacingmergetree.md). - :::note "Внимание" - Уникальность строк определяется `ORDER BY` секцией таблицы, а не `PRIMARY KEY`. - ::: -**Параметры ReplacingMergeTree** +:::warning "Внимание" +Уникальность строк определяется `ORDER BY` секцией таблицы, а не `PRIMARY KEY`. +::: -- `ver` — столбец с номером версии. Тип `UInt*`, `Date`, `DateTime` или `DateTime64`. Необязательный параметр. +## Параметры ReplacingMergeTree - При слиянии `ReplacingMergeTree` оставляет только строку для каждого уникального ключа сортировки: +### ver + +`ver` — столбец с номером версии. Тип `UInt*`, `Date`, `DateTime` или `DateTime64`. Необязательный параметр. + +При слиянии `ReplacingMergeTree` оставляет только строку для каждого уникального ключа сортировки: - Последнюю в выборке, если `ver` не задан. Под выборкой здесь понимается набор строк в наборе кусков данных, участвующих в слиянии. Последний по времени создания кусок (последняя вставка) будет последним в выборке. Таким образом, после дедупликации для каждого значения ключа сортировки останется самая последняя строка из самой последней вставки. - С максимальной версией, если `ver` задан. Если `ver` одинаковый у нескольких строк, то для них используется правило -- если `ver` не задан, т.е. в результате слияния останется самая последняя строка из самой последней вставки. -**Секции запроса** +Пример: + +```sql +-- without ver - the last inserted 'wins' +CREATE TABLE myFirstReplacingMT +( + `key` Int64, + `someCol` String, + `eventTime` DateTime +) +ENGINE = ReplacingMergeTree +ORDER BY key; + +INSERT INTO myFirstReplacingMT Values (1, 'first', '2020-01-01 01:01:01'); +INSERT INTO myFirstReplacingMT Values (1, 'second', '2020-01-01 00:00:00'); + +SELECT * FROM myFirstReplacingMT FINAL; + +┌─key─┬─someCol─┬───────────eventTime─┐ +│ 1 │ second │ 2020-01-01 00:00:00 │ +└─────┴─────────┴─────────────────────┘ + + +-- with ver - the row with the biggest ver 'wins' +CREATE TABLE mySecondReplacingMT +( + `key` Int64, + `someCol` String, + `eventTime` DateTime +) +ENGINE = ReplacingMergeTree(eventTime) +ORDER BY key; + +INSERT INTO mySecondReplacingMT Values (1, 'first', '2020-01-01 01:01:01'); +INSERT INTO mySecondReplacingMT Values (1, 'second', '2020-01-01 00:00:00'); + +SELECT * FROM mySecondReplacingMT FINAL; + +┌─key─┬─someCol─┬───────────eventTime─┐ +│ 1 │ first │ 2020-01-01 01:01:01 │ +└─────┴─────────┴─────────────────────┘ +``` + +## Секции запроса При создании таблицы `ReplacingMergeTree` используются те же [секции](mergetree.md), что и при создании таблицы `MergeTree`. @@ -48,9 +95,10 @@ CREATE TABLE [IF NOT EXISTS] [db.]table_name [ON CLUSTER cluster] Устаревший способ создания таблицы - :::note "Внимание" - Не используйте этот способ в новых проектах и по возможности переведите старые проекты на способ описанный выше. - ::: +:::warning "Внимание" +Не используйте этот способ в новых проектах и по возможности переведите старые проекты на способ, описанный выше. +::: + ``` sql CREATE TABLE [IF NOT EXISTS] [db.]table_name [ON CLUSTER cluster] ( diff --git a/docs/ru/engines/table-engines/mergetree-family/replication.md b/docs/ru/engines/table-engines/mergetree-family/replication.md index 3de59e61e6e..22cb2196ef1 100644 --- a/docs/ru/engines/table-engines/mergetree-family/replication.md +++ b/docs/ru/engines/table-engines/mergetree-family/replication.md @@ -1,4 +1,5 @@ --- +slug: /ru/engines/table-engines/mergetree-family/replication sidebar_position: 31 sidebar_label: "Репликация данных" --- diff --git a/docs/ru/engines/table-engines/mergetree-family/summingmergetree.md b/docs/ru/engines/table-engines/mergetree-family/summingmergetree.md index 27af647ee36..0d9d268fa46 100644 --- a/docs/ru/engines/table-engines/mergetree-family/summingmergetree.md +++ b/docs/ru/engines/table-engines/mergetree-family/summingmergetree.md @@ -1,4 +1,5 @@ --- +slug: /ru/engines/table-engines/mergetree-family/summingmergetree sidebar_position: 34 sidebar_label: SummingMergeTree --- diff --git a/docs/ru/engines/table-engines/mergetree-family/versionedcollapsingmergetree.md b/docs/ru/engines/table-engines/mergetree-family/versionedcollapsingmergetree.md index a27a2ea99b5..4613b174389 100644 --- a/docs/ru/engines/table-engines/mergetree-family/versionedcollapsingmergetree.md +++ b/docs/ru/engines/table-engines/mergetree-family/versionedcollapsingmergetree.md @@ -1,4 +1,5 @@ --- +slug: /ru/engines/table-engines/mergetree-family/versionedcollapsingmergetree sidebar_position: 37 sidebar_label: VersionedCollapsingMergeTree --- diff --git a/docs/ru/engines/table-engines/special/buffer.md b/docs/ru/engines/table-engines/special/buffer.md index 8682f408b5f..354d2e3cac3 100644 --- a/docs/ru/engines/table-engines/special/buffer.md +++ b/docs/ru/engines/table-engines/special/buffer.md @@ -1,4 +1,5 @@ --- +slug: /ru/engines/table-engines/special/buffer sidebar_position: 45 sidebar_label: Buffer --- diff --git a/docs/ru/engines/table-engines/special/dictionary.md b/docs/ru/engines/table-engines/special/dictionary.md index 1dbc5468e29..6e8282a7da1 100644 --- a/docs/ru/engines/table-engines/special/dictionary.md +++ b/docs/ru/engines/table-engines/special/dictionary.md @@ -1,4 +1,5 @@ --- +slug: /ru/engines/table-engines/special/dictionary sidebar_position: 35 sidebar_label: Dictionary --- diff --git a/docs/ru/engines/table-engines/special/distributed.md b/docs/ru/engines/table-engines/special/distributed.md index 522929f0441..341acf92803 100644 --- a/docs/ru/engines/table-engines/special/distributed.md +++ b/docs/ru/engines/table-engines/special/distributed.md @@ -1,4 +1,5 @@ --- +slug: /ru/engines/table-engines/special/distributed sidebar_position: 33 sidebar_label: Distributed --- diff --git a/docs/ru/engines/table-engines/special/external-data.md b/docs/ru/engines/table-engines/special/external-data.md index 8a25a66be0a..95ae1aa9059 100644 --- a/docs/ru/engines/table-engines/special/external-data.md +++ b/docs/ru/engines/table-engines/special/external-data.md @@ -1,4 +1,5 @@ --- +slug: /ru/engines/table-engines/special/external-data sidebar_position: 45 sidebar_label: "Внешние данные для обработки запроса" --- diff --git a/docs/ru/engines/table-engines/special/file.md b/docs/ru/engines/table-engines/special/file.md index b0b74e70cf8..7c6ea8ebd4e 100644 --- a/docs/ru/engines/table-engines/special/file.md +++ b/docs/ru/engines/table-engines/special/file.md @@ -1,4 +1,5 @@ --- +slug: /ru/engines/table-engines/special/file sidebar_position: 37 sidebar_label: File --- diff --git a/docs/ru/engines/table-engines/special/generate.md b/docs/ru/engines/table-engines/special/generate.md deleted file mode 120000 index 04af4b109a7..00000000000 --- a/docs/ru/engines/table-engines/special/generate.md +++ /dev/null @@ -1 +0,0 @@ -../../../../en/engines/table-engines/special/generate.md \ No newline at end of file diff --git a/docs/ru/engines/table-engines/special/generate.mdx b/docs/ru/engines/table-engines/special/generate.mdx new file mode 100644 index 00000000000..ab42752adf2 --- /dev/null +++ b/docs/ru/engines/table-engines/special/generate.mdx @@ -0,0 +1,10 @@ +--- +slug: /ru/engines/table-engines/special/generate +sidebar_position: 140 +sidebar_label: GenerateRandom +title: "GenerateRandom Table Engine" +--- + +import Content from '@site/docs/en/engines/table-engines/special/generate.md'; + + diff --git a/docs/ru/engines/table-engines/special/index.md b/docs/ru/engines/table-engines/special/index.md index 3039f038150..f6a3161dc5f 100644 --- a/docs/ru/engines/table-engines/special/index.md +++ b/docs/ru/engines/table-engines/special/index.md @@ -1,4 +1,5 @@ --- +slug: /ru/engines/table-engines/special/ sidebar_label: "Специальные движки таблиц" sidebar_position: 31 --- diff --git a/docs/ru/engines/table-engines/special/join.md b/docs/ru/engines/table-engines/special/join.md index fb9afb8fa7c..b9df18ecffa 100644 --- a/docs/ru/engines/table-engines/special/join.md +++ b/docs/ru/engines/table-engines/special/join.md @@ -1,4 +1,5 @@ --- +slug: /ru/engines/table-engines/special/join sidebar_position: 40 sidebar_label: Join --- diff --git a/docs/ru/engines/table-engines/special/materializedview.md b/docs/ru/engines/table-engines/special/materializedview.md index 9500d6197e7..bc9b44f35db 100644 --- a/docs/ru/engines/table-engines/special/materializedview.md +++ b/docs/ru/engines/table-engines/special/materializedview.md @@ -1,4 +1,5 @@ --- +slug: /ru/engines/table-engines/special/materializedview sidebar_position: 43 sidebar_label: MaterializedView --- diff --git a/docs/ru/engines/table-engines/special/memory.md b/docs/ru/engines/table-engines/special/memory.md index 9f0eff34b7c..008b5e4ba0b 100644 --- a/docs/ru/engines/table-engines/special/memory.md +++ b/docs/ru/engines/table-engines/special/memory.md @@ -1,4 +1,5 @@ --- +slug: /ru/engines/table-engines/special/memory sidebar_position: 44 sidebar_label: Memory --- diff --git a/docs/ru/engines/table-engines/special/merge.md b/docs/ru/engines/table-engines/special/merge.md index 2d40ada8b40..ea37654d8c6 100644 --- a/docs/ru/engines/table-engines/special/merge.md +++ b/docs/ru/engines/table-engines/special/merge.md @@ -1,4 +1,5 @@ --- +slug: /ru/engines/table-engines/special/merge sidebar_position: 36 sidebar_label: Merge --- diff --git a/docs/ru/engines/table-engines/special/null.md b/docs/ru/engines/table-engines/special/null.md index 869a304cd5e..ec28df7f570 100644 --- a/docs/ru/engines/table-engines/special/null.md +++ b/docs/ru/engines/table-engines/special/null.md @@ -1,4 +1,5 @@ --- +slug: /ru/engines/table-engines/special/null sidebar_position: 38 sidebar_label: 'Null' --- diff --git a/docs/ru/engines/table-engines/special/set.md b/docs/ru/engines/table-engines/special/set.md index e1a189e844a..bb1afbdf4f7 100644 --- a/docs/ru/engines/table-engines/special/set.md +++ b/docs/ru/engines/table-engines/special/set.md @@ -1,4 +1,5 @@ --- +slug: /ru/engines/table-engines/special/set sidebar_position: 39 sidebar_label: Set --- diff --git a/docs/ru/engines/table-engines/special/url.md b/docs/ru/engines/table-engines/special/url.md index 05cff259794..6173a6851d6 100644 --- a/docs/ru/engines/table-engines/special/url.md +++ b/docs/ru/engines/table-engines/special/url.md @@ -1,4 +1,5 @@ --- +slug: /ru/engines/table-engines/special/url sidebar_position: 41 sidebar_label: URL --- diff --git a/docs/ru/engines/table-engines/special/view.md b/docs/ru/engines/table-engines/special/view.md index da3faa42510..9fbaeffa9fa 100644 --- a/docs/ru/engines/table-engines/special/view.md +++ b/docs/ru/engines/table-engines/special/view.md @@ -1,4 +1,5 @@ --- +slug: /ru/engines/table-engines/special/view sidebar_position: 42 sidebar_label: View --- diff --git a/docs/ru/faq/general/columnar-database.md b/docs/ru/faq/general/columnar-database.md index 3d5bd61cc0a..43a1dafdd9b 100644 --- a/docs/ru/faq/general/columnar-database.md +++ b/docs/ru/faq/general/columnar-database.md @@ -1,4 +1,5 @@ --- +slug: /ru/faq/general/columnar-database title: Что такое столбцовая база данных? sidebar_position: 101 --- diff --git a/docs/ru/faq/general/dbms-naming.md b/docs/ru/faq/general/dbms-naming.md index dd58d89924e..875d24dea2a 100644 --- a/docs/ru/faq/general/dbms-naming.md +++ b/docs/ru/faq/general/dbms-naming.md @@ -1,4 +1,5 @@ --- +slug: /ru/faq/general/dbms-naming title: "Что означает название ClickHouse?" sidebar_position: 10 --- diff --git a/docs/ru/faq/general/how-do-i-contribute-code-to-clickhouse.md b/docs/ru/faq/general/how-do-i-contribute-code-to-clickhouse.md index 1739e03e1bb..95293c5b091 100644 --- a/docs/ru/faq/general/how-do-i-contribute-code-to-clickhouse.md +++ b/docs/ru/faq/general/how-do-i-contribute-code-to-clickhouse.md @@ -1,4 +1,5 @@ --- +slug: /ru/faq/general/how-do-i-contribute-code-to-clickhouse title: How do I contribute code to ClickHouse? sidebar_position: 120 --- diff --git a/docs/ru/faq/general/index.md b/docs/ru/faq/general/index.md index 7d34cc643f2..f6c25fe604b 100644 --- a/docs/ru/faq/general/index.md +++ b/docs/ru/faq/general/index.md @@ -1,4 +1,5 @@ --- +slug: /ru/faq/general/ title: Общие вопросы о ClickHouse toc_hidden_folder: true sidebar_position: 1 diff --git a/docs/ru/faq/general/mapreduce.md b/docs/ru/faq/general/mapreduce.md index 92105ee447c..d4a3ed4284c 100644 --- a/docs/ru/faq/general/mapreduce.md +++ b/docs/ru/faq/general/mapreduce.md @@ -1,4 +1,5 @@ --- +slug: /ru/faq/general/mapreduce title: Why not use something like MapReduce? sidebar_position: 110 --- diff --git a/docs/ru/faq/general/ne-tormozit.md b/docs/ru/faq/general/ne-tormozit.md index 904780689ec..0f888de839f 100644 --- a/docs/ru/faq/general/ne-tormozit.md +++ b/docs/ru/faq/general/ne-tormozit.md @@ -1,4 +1,5 @@ --- +slug: /ru/faq/general/ne-tormozit title: "What does “не тормозит” mean?" sidebar_position: 11 --- diff --git a/docs/ru/faq/general/olap.md b/docs/ru/faq/general/olap.md index 5064df42462..c9021f7c92e 100644 --- a/docs/ru/faq/general/olap.md +++ b/docs/ru/faq/general/olap.md @@ -1,4 +1,5 @@ --- +slug: /ru/faq/general/olap title: Что такое OLAP? sidebar_position: 100 --- diff --git a/docs/ru/faq/general/who-is-using-clickhouse.md b/docs/ru/faq/general/who-is-using-clickhouse.md index 1a48b32352c..1aa84906fe8 100644 --- a/docs/ru/faq/general/who-is-using-clickhouse.md +++ b/docs/ru/faq/general/who-is-using-clickhouse.md @@ -1,4 +1,5 @@ --- +slug: /ru/faq/general/who-is-using-clickhouse title: Кто пользуется ClickHouse? sidebar_position: 9 --- diff --git a/docs/ru/faq/general/why-clickhouse-is-so-fast.md b/docs/ru/faq/general/why-clickhouse-is-so-fast.md index 43b3c818249..2652e93c35e 100644 --- a/docs/ru/faq/general/why-clickhouse-is-so-fast.md +++ b/docs/ru/faq/general/why-clickhouse-is-so-fast.md @@ -1,4 +1,5 @@ --- +slug: /ru/faq/general/why-clickhouse-is-so-fast title: Почему ClickHouse так быстро работает? sidebar_position: 8 --- diff --git a/docs/ru/faq/index.md b/docs/ru/faq/index.md index 1d1dc7df819..71ce43ac945 100644 --- a/docs/ru/faq/index.md +++ b/docs/ru/faq/index.md @@ -1,4 +1,5 @@ --- +slug: /ru/faq/ sidebar_label: F.A.Q. sidebar_position: 76 --- diff --git a/docs/ru/faq/integration/file-export.md b/docs/ru/faq/integration/file-export.md index b2565d0109a..bfcc4b80dbd 100644 --- a/docs/ru/faq/integration/file-export.md +++ b/docs/ru/faq/integration/file-export.md @@ -1,4 +1,5 @@ --- +slug: /ru/faq/integration/file-export title: Как экспортировать данные из ClickHouse в файл? sidebar_position: 10 --- diff --git a/docs/ru/faq/integration/index.md b/docs/ru/faq/integration/index.md index ee01688af6e..aa0579b9375 100644 --- a/docs/ru/faq/integration/index.md +++ b/docs/ru/faq/integration/index.md @@ -1,4 +1,5 @@ --- +slug: /ru/faq/integration/ title: Интеграция ClickHouse с другими системами toc_hidden_folder: true sidebar_position: 4 diff --git a/docs/ru/faq/integration/json-import.md b/docs/ru/faq/integration/json-import.md index 2f938b32a94..bc65b5a614a 100644 --- a/docs/ru/faq/integration/json-import.md +++ b/docs/ru/faq/integration/json-import.md @@ -1,4 +1,5 @@ --- +slug: /ru/faq/integration/json-import title: Как импортировать JSON в ClickHouse? sidebar_position: 11 --- diff --git a/docs/ru/faq/integration/oracle-odbc.md b/docs/ru/faq/integration/oracle-odbc.md index 7e20f2e8edf..f2f9b901770 100644 --- a/docs/ru/faq/integration/oracle-odbc.md +++ b/docs/ru/faq/integration/oracle-odbc.md @@ -1,4 +1,5 @@ --- +slug: /ru/faq/integration/oracle-odbc title: Что делать, если у меня проблема с кодировками при использовании Oracle через ODBC? sidebar_position: 20 --- diff --git a/docs/ru/faq/operations/delete-old-data.md b/docs/ru/faq/operations/delete-old-data.md index ab221f8303b..aad33ce0333 100644 --- a/docs/ru/faq/operations/delete-old-data.md +++ b/docs/ru/faq/operations/delete-old-data.md @@ -1,4 +1,5 @@ --- +slug: /ru/faq/operations/delete-old-data title: Возможно ли удалить старые записи из таблицы ClickHouse? sidebar_position: 20 --- diff --git a/docs/ru/faq/operations/index.md b/docs/ru/faq/operations/index.md index aab31ec3305..c1135716f39 100644 --- a/docs/ru/faq/operations/index.md +++ b/docs/ru/faq/operations/index.md @@ -1,4 +1,5 @@ --- +slug: /ru/faq/operations/ title: Вопросы о производительности серверов и кластеров ClickHouse toc_hidden_folder: true sidebar_position: 3 diff --git a/docs/ru/faq/operations/multi-region-replication.md b/docs/ru/faq/operations/multi-region-replication.md index 6a4b43d0962..bfe3231c247 100644 --- a/docs/ru/faq/operations/multi-region-replication.md +++ b/docs/ru/faq/operations/multi-region-replication.md @@ -1,4 +1,5 @@ --- +slug: /ru/faq/operations/multi-region-replication title: Does ClickHouse support multi-region replication? sidebar_position: 30 --- diff --git a/docs/ru/faq/operations/production.md b/docs/ru/faq/operations/production.md index e7de7e3a9e2..6a8b7bc5fb6 100644 --- a/docs/ru/faq/operations/production.md +++ b/docs/ru/faq/operations/production.md @@ -1,4 +1,5 @@ --- +slug: /ru/faq/operations/production title: Какую версию ClickHouse использовать? sidebar_position: 10 --- @@ -32,7 +33,7 @@ sidebar_position: 10 Второе направление — **автоматизированное тестирование**. Не думайте, что если какой-то запрос отработал успешно один раз, так будет всегда. Считается приемлемым выполнять некоторые юнит-тесты, используя "заглушки" вместо запросов к СУБД. Но вы должны проводить достаточное количество автотестов, где запросы выполняются в реальном ClickHouse, чтобы убедиться, что все важные задачи отрабатывают должным образом. -В продолжение этой темы, вы можете поделиться вашими автотестами и передать их [в открытую тестовую среду ClickHouse](https://github.com/ClickHouse/ClickHouse/tree/master/tests), которая используется для постоянного развития нашей СУБД. Вам придётся потратить немного времени и сил, чтобы научиться [составлять и выполнять тесты](../../development/tests.md), а также чтобы перенести ваши тесты на эту платформу. Наградой за это станет уверенность в том, что новые стабильные релизы ClickHouse будут корректно работать на ваших задачах. Это гораздо лучше, чем тратить время на то, чтобы вновь отлавливать прежние ошибки в новых версиях, а затем ждать, пока их исправят и включат эти исправления в очередной релиз. Некоторые компании уже включили в корпоративные регламенты необходимость передачи своих тестов в ClickHouse, прежде всего стоит упомянуть [правило Beyonce](https://www.oreilly.com/library/view/software-engineering-at/9781492082781/ch01.html#policies_that_scale_well), действующее в Google. +В продолжение этой темы, вы можете поделиться вашими автотестами и передать их [в открытую тестовую среду ClickHouse](https://github.com/ClickHouse/ClickHouse/tree/master/tests), которая используется для постоянного развития нашей СУБД. Вам придётся потратить немного времени и сил, чтобы научиться [составлять и выполнять тесты](../../development/tests.mdx), а также чтобы перенести ваши тесты на эту платформу. Наградой за это станет уверенность в том, что новые стабильные релизы ClickHouse будут корректно работать на ваших задачах. Это гораздо лучше, чем тратить время на то, чтобы вновь отлавливать прежние ошибки в новых версиях, а затем ждать, пока их исправят и включат эти исправления в очередной релиз. Некоторые компании уже включили в корпоративные регламенты необходимость передачи своих тестов в ClickHouse, прежде всего стоит упомянуть [правило Beyonce](https://www.oreilly.com/library/view/software-engineering-at/9781492082781/ch01.html#policies_that_scale_well), действующее в Google. После того, как вы подготовили тестовую среду и инфраструктуру, выбор версии ClickHouse упрощается: @@ -66,4 +67,4 @@ sidebar_position: 10 Часто компании, которые изначально ориентировались на релизы `lts`, позднее переходят на `stable`, поскольку хотят быстрее получать доступ к новым возможностям. :::danger "Важно" - Мы всегда стремимся поддерживать совместимость релизов, но иногда это правило нарушается, и какие-то отдельные возможности в новых релизах становятся недоступны. Перед обновлением ClickHouse обязательно изучите [журнал изменений](../../whats-new/changelog/index.md), чтобы убедиться, что в нем нет объявлений о нарушении обратной совместимости. + Мы всегда стремимся поддерживать совместимость релизов, но иногда это правило нарушается, и какие-то отдельные возможности в новых релизах становятся недоступны. Перед обновлением ClickHouse обязательно изучите [журнал изменений](../../whats-new/changelog/index.mdx), чтобы убедиться, что в нем нет объявлений о нарушении обратной совместимости. diff --git a/docs/ru/faq/use-cases/index.md b/docs/ru/faq/use-cases/index.md index 981a110212e..bd6edd3ea6c 100644 --- a/docs/ru/faq/use-cases/index.md +++ b/docs/ru/faq/use-cases/index.md @@ -1,4 +1,5 @@ --- +slug: /ru/faq/use-cases/ title: Вопросы о применении ClickHouse toc_hidden_folder: true sidebar_position: 2 diff --git a/docs/ru/faq/use-cases/key-value.md b/docs/ru/faq/use-cases/key-value.md index 70ecbb8f660..64bc86502d0 100644 --- a/docs/ru/faq/use-cases/key-value.md +++ b/docs/ru/faq/use-cases/key-value.md @@ -1,4 +1,5 @@ --- +slug: /ru/faq/use-cases/key-value title: Можно ли использовать ClickHouse для хранения данных вида "ключ-значение"? sidebar_position: 101 --- diff --git a/docs/ru/faq/use-cases/time-series.md b/docs/ru/faq/use-cases/time-series.md index d7525214cba..d47a05bd1c7 100644 --- a/docs/ru/faq/use-cases/time-series.md +++ b/docs/ru/faq/use-cases/time-series.md @@ -1,4 +1,5 @@ --- +slug: /ru/faq/use-cases/time-series title: Можно ли использовать ClickHouse как базу данных временных рядов? sidebar_position: 101 --- diff --git a/docs/ru/getting-started/example-datasets/amplab-benchmark.md b/docs/ru/getting-started/example-datasets/amplab-benchmark.md index f12ae2a35fd..9e99d960e63 100644 --- a/docs/ru/getting-started/example-datasets/amplab-benchmark.md +++ b/docs/ru/getting-started/example-datasets/amplab-benchmark.md @@ -1,4 +1,5 @@ --- +slug: /ru/getting-started/example-datasets/amplab-benchmark sidebar_position: 19 sidebar_label: AMPLab Big Data Benchmark --- diff --git a/docs/ru/getting-started/example-datasets/brown-benchmark.md b/docs/ru/getting-started/example-datasets/brown-benchmark.md index 8afda860b72..c830d639095 100644 --- a/docs/ru/getting-started/example-datasets/brown-benchmark.md +++ b/docs/ru/getting-started/example-datasets/brown-benchmark.md @@ -1,4 +1,5 @@ --- +slug: /ru/getting-started/example-datasets/brown-benchmark sidebar_position: 20 sidebar_label: Brown University Benchmark --- diff --git a/docs/ru/getting-started/example-datasets/cell-towers.md b/docs/ru/getting-started/example-datasets/cell-towers.md index 49174994c14..cf1a02ae8f0 100644 --- a/docs/ru/getting-started/example-datasets/cell-towers.md +++ b/docs/ru/getting-started/example-datasets/cell-towers.md @@ -1,4 +1,5 @@ --- +slug: /ru/getting-started/example-datasets/cell-towers sidebar_position: 21 sidebar_label: Вышки сотовой связи --- diff --git a/docs/ru/getting-started/example-datasets/criteo.md b/docs/ru/getting-started/example-datasets/criteo.md index 40214995369..a842e178e12 100644 --- a/docs/ru/getting-started/example-datasets/criteo.md +++ b/docs/ru/getting-started/example-datasets/criteo.md @@ -1,4 +1,5 @@ --- +slug: /ru/getting-started/example-datasets/criteo sidebar_position: 18 sidebar_label: "Терабайт логов кликов от Criteo" --- diff --git a/docs/ru/getting-started/example-datasets/github-events.md b/docs/ru/getting-started/example-datasets/github-events.md deleted file mode 120000 index c9649c0a61f..00000000000 --- a/docs/ru/getting-started/example-datasets/github-events.md +++ /dev/null @@ -1 +0,0 @@ -../../../en/getting-started/example-datasets/github-events.md \ No newline at end of file diff --git a/docs/ru/getting-started/example-datasets/github-events.mdx b/docs/ru/getting-started/example-datasets/github-events.mdx new file mode 100644 index 00000000000..c6e58a9f5a4 --- /dev/null +++ b/docs/ru/getting-started/example-datasets/github-events.mdx @@ -0,0 +1,9 @@ +--- +slug: /ru/getting-started/example-datasets/github-events +sidebar_label: GitHub Events +title: "GitHub Events Dataset" +--- + +import Content from '@site/docs/en/getting-started/example-datasets/github-events.md'; + + diff --git a/docs/ru/getting-started/example-datasets/index.md b/docs/ru/getting-started/example-datasets/index.md index 684a471738d..d975a5910c7 100644 --- a/docs/ru/getting-started/example-datasets/index.md +++ b/docs/ru/getting-started/example-datasets/index.md @@ -1,4 +1,5 @@ --- +slug: /ru/getting-started/example-datasets/ sidebar_label: "Тестовые массивы данных" sidebar_position: 14 --- diff --git a/docs/ru/getting-started/example-datasets/menus.md b/docs/ru/getting-started/example-datasets/menus.md index 5417d859e8b..97800659e34 100644 --- a/docs/ru/getting-started/example-datasets/menus.md +++ b/docs/ru/getting-started/example-datasets/menus.md @@ -1,4 +1,5 @@ --- +slug: /ru/getting-started/example-datasets/menus sidebar_position: 21 sidebar_label: Меню --- diff --git a/docs/ru/getting-started/example-datasets/metrica.md b/docs/ru/getting-started/example-datasets/metrica.md index 771f84e8a05..efa8956bccb 100644 --- a/docs/ru/getting-started/example-datasets/metrica.md +++ b/docs/ru/getting-started/example-datasets/metrica.md @@ -1,4 +1,5 @@ --- +slug: /ru/getting-started/example-datasets/metrica sidebar_position: 15 sidebar_label: "Анонимизированные данные Яндекс.Метрики" --- diff --git a/docs/ru/getting-started/example-datasets/nyc-taxi.md b/docs/ru/getting-started/example-datasets/nyc-taxi.md index 9d9caa43b5e..e8d146324ac 100644 --- a/docs/ru/getting-started/example-datasets/nyc-taxi.md +++ b/docs/ru/getting-started/example-datasets/nyc-taxi.md @@ -1,4 +1,5 @@ --- +slug: /ru/getting-started/example-datasets/nyc-taxi sidebar_position: 20 sidebar_label: "Данные о такси в Нью-Йорке" --- diff --git a/docs/ru/getting-started/example-datasets/ontime.md b/docs/ru/getting-started/example-datasets/ontime.md index 8dab19c7bae..f5c08fe6c70 100644 --- a/docs/ru/getting-started/example-datasets/ontime.md +++ b/docs/ru/getting-started/example-datasets/ontime.md @@ -1,4 +1,5 @@ --- +slug: /ru/getting-started/example-datasets/ontime sidebar_position: 21 sidebar_label: OnTime --- diff --git a/docs/ru/getting-started/example-datasets/opensky.md b/docs/ru/getting-started/example-datasets/opensky.md index fe15c0991a2..578bf8254fa 100644 --- a/docs/ru/getting-started/example-datasets/opensky.md +++ b/docs/ru/getting-started/example-datasets/opensky.md @@ -1,4 +1,5 @@ --- +slug: /ru/getting-started/example-datasets/opensky sidebar_position: 20 sidebar_label: Набор данных о воздушном движении OpenSky Network 2020 --- diff --git a/docs/ru/getting-started/example-datasets/recipes.md b/docs/ru/getting-started/example-datasets/recipes.md index f3b4c8285d7..285d8ff576c 100644 --- a/docs/ru/getting-started/example-datasets/recipes.md +++ b/docs/ru/getting-started/example-datasets/recipes.md @@ -1,4 +1,5 @@ --- +slug: /ru/getting-started/example-datasets/recipes sidebar_position: 16 sidebar_label: Набор данных кулинарных рецептов --- diff --git a/docs/ru/getting-started/example-datasets/star-schema.md b/docs/ru/getting-started/example-datasets/star-schema.md index e6a8f011cde..10bffe67f06 100644 --- a/docs/ru/getting-started/example-datasets/star-schema.md +++ b/docs/ru/getting-started/example-datasets/star-schema.md @@ -1,4 +1,5 @@ --- +slug: /ru/getting-started/example-datasets/star-schema sidebar_position: 16 sidebar_label: Star Schema Benchmark --- diff --git a/docs/ru/getting-started/example-datasets/uk-price-paid.md b/docs/ru/getting-started/example-datasets/uk-price-paid.md index ee010da28ea..df6d97c7207 100644 --- a/docs/ru/getting-started/example-datasets/uk-price-paid.md +++ b/docs/ru/getting-started/example-datasets/uk-price-paid.md @@ -1,4 +1,5 @@ --- +slug: /ru/getting-started/example-datasets/uk-price-paid sidebar_position: 20 sidebar_label: Набор данных о стоимости недвижимости в Великобритании --- diff --git a/docs/ru/getting-started/example-datasets/wikistat.md b/docs/ru/getting-started/example-datasets/wikistat.md index 730ff036fe8..9633e9ceb8a 100644 --- a/docs/ru/getting-started/example-datasets/wikistat.md +++ b/docs/ru/getting-started/example-datasets/wikistat.md @@ -1,4 +1,5 @@ --- +slug: /ru/getting-started/example-datasets/wikistat sidebar_position: 17 sidebar_label: WikiStat --- diff --git a/docs/ru/getting-started/index.md b/docs/ru/getting-started/index.md index e5b4746fe58..c6c4e179755 100644 --- a/docs/ru/getting-started/index.md +++ b/docs/ru/getting-started/index.md @@ -1,4 +1,5 @@ --- +slug: /ru/getting-started/ sidebar_label: "Начало работы" sidebar_position: 8 --- diff --git a/docs/ru/getting-started/install.md b/docs/ru/getting-started/install.md index 9f3eabc73ae..02d8c62669d 100644 --- a/docs/ru/getting-started/install.md +++ b/docs/ru/getting-started/install.md @@ -1,4 +1,5 @@ --- +slug: /ru/getting-started/install sidebar_position: 11 sidebar_label: "Установка" --- @@ -137,8 +138,6 @@ do || curl -fO "https://packages.clickhouse.com/tgz/stable/$PKG-$LATEST_VERSION.tgz" done -exit 0 - tar -xzvf "clickhouse-common-static-$LATEST_VERSION-${ARCH}.tgz" \ || tar -xzvf "clickhouse-common-static-$LATEST_VERSION.tgz" sudo "clickhouse-common-static-$LATEST_VERSION/install/doinst.sh" @@ -149,7 +148,7 @@ sudo "clickhouse-common-static-dbg-$LATEST_VERSION/install/doinst.sh" tar -xzvf "clickhouse-server-$LATEST_VERSION-${ARCH}.tgz" \ || tar -xzvf "clickhouse-server-$LATEST_VERSION.tgz" -sudo "clickhouse-server-$LATEST_VERSION/install/doinst.sh" +sudo "clickhouse-server-$LATEST_VERSION/install/doinst.sh" configure sudo /etc/init.d/clickhouse-server start tar -xzvf "clickhouse-client-$LATEST_VERSION-${ARCH}.tgz" \ @@ -215,7 +214,7 @@ sudo ./clickhouse install ### Из исходного кода {#from-sources} -Для компиляции ClickHouse вручную, используйте инструкцию для [Linux](../development/build.md) или [Mac OS X](../development/build-osx.md). +Для компиляции ClickHouse вручную, используйте инструкцию для [Linux](../development/build.mdx) или [Mac OS X](../development/build-osx.md). Можно скомпилировать пакеты и установить их, либо использовать программы без установки пакетов. Также при ручой сборке можно отключить необходимость поддержки набора инструкций SSE 4.2 или собрать под процессоры архитектуры AArch64. diff --git a/docs/ru/getting-started/playground.md b/docs/ru/getting-started/playground.md index fe1b99bb1ef..c8c987eec9e 100644 --- a/docs/ru/getting-started/playground.md +++ b/docs/ru/getting-started/playground.md @@ -1,4 +1,5 @@ --- +slug: /ru/getting-started/playground sidebar_position: 14 sidebar_label: Playground --- diff --git a/docs/ru/getting-started/tutorial.md b/docs/ru/getting-started/tutorial.md index e7fb9c70e09..b1abc787c5d 100644 --- a/docs/ru/getting-started/tutorial.md +++ b/docs/ru/getting-started/tutorial.md @@ -1,4 +1,5 @@ --- +slug: /ru/getting-started/tutorial sidebar_position: 12 sidebar_label: Tutorial --- diff --git a/docs/ru/guides/apply-catboost-model.md b/docs/ru/guides/apply-catboost-model.md index 6a92a087a28..68d7042df2d 100644 --- a/docs/ru/guides/apply-catboost-model.md +++ b/docs/ru/guides/apply-catboost-model.md @@ -1,4 +1,5 @@ --- +slug: /ru/guides/apply-catboost-model sidebar_position: 41 sidebar_label: "Применение модели CatBoost в ClickHouse" --- diff --git a/docs/ru/guides/index.md b/docs/ru/guides/index.md index 9d807abdfd2..0b5938dfc09 100644 --- a/docs/ru/guides/index.md +++ b/docs/ru/guides/index.md @@ -1,4 +1,5 @@ --- +slug: /ru/guides/ sidebar_position: 38 sidebar_label: "Руководства" --- diff --git a/docs/ru/index.md b/docs/ru/index.md index 584071be592..aded04f9367 100644 --- a/docs/ru/index.md +++ b/docs/ru/index.md @@ -1,4 +1,5 @@ --- +slug: /ru/ sidebar_position: 0 sidebar_label: "Что такое ClickHouse" --- diff --git a/docs/ru/interfaces/cli.md b/docs/ru/interfaces/cli.md index eb43db02698..8a4ec083242 100644 --- a/docs/ru/interfaces/cli.md +++ b/docs/ru/interfaces/cli.md @@ -1,4 +1,5 @@ --- +slug: /ru/interfaces/cli sidebar_position: 17 sidebar_label: "Клиент командной строки" --- diff --git a/docs/ru/interfaces/cpp.md b/docs/ru/interfaces/cpp.md index d303ca939bf..536ad2e97f1 100644 --- a/docs/ru/interfaces/cpp.md +++ b/docs/ru/interfaces/cpp.md @@ -1,4 +1,5 @@ --- +slug: /ru/interfaces/cpp sidebar_position: 24 sidebar_label: "C++ клиентская библиотека" --- diff --git a/docs/ru/interfaces/formats.md b/docs/ru/interfaces/formats.md index 37a6a8688b5..e4665ff0c3e 100644 --- a/docs/ru/interfaces/formats.md +++ b/docs/ru/interfaces/formats.md @@ -1,4 +1,5 @@ --- +slug: /ru/interfaces/formats sidebar_position: 21 sidebar_label: "Форматы входных и выходных данных" --- diff --git a/docs/ru/interfaces/grpc.md b/docs/ru/interfaces/grpc.md index 19f048c74a1..2402aeae4b0 100644 --- a/docs/ru/interfaces/grpc.md +++ b/docs/ru/interfaces/grpc.md @@ -1,4 +1,5 @@ --- +slug: /ru/interfaces/grpc sidebar_position: 18 sidebar_label: gRPC интерфейс --- diff --git a/docs/ru/interfaces/http.md b/docs/ru/interfaces/http.md index 292f06b5544..62e97e3f61d 100644 --- a/docs/ru/interfaces/http.md +++ b/docs/ru/interfaces/http.md @@ -1,4 +1,5 @@ --- +slug: /ru/interfaces/http sidebar_position: 19 sidebar_label: "HTTP-интерфейс" --- diff --git a/docs/ru/interfaces/index.md b/docs/ru/interfaces/index.md index e85b958018b..2935de92b11 100644 --- a/docs/ru/interfaces/index.md +++ b/docs/ru/interfaces/index.md @@ -1,4 +1,5 @@ --- +slug: /ru/interfaces/ sidebar_label: "Интерфейсы" sidebar_position: 14 --- diff --git a/docs/ru/interfaces/jdbc.md b/docs/ru/interfaces/jdbc.md index 83fd1328627..c5891004c0d 100644 --- a/docs/ru/interfaces/jdbc.md +++ b/docs/ru/interfaces/jdbc.md @@ -1,4 +1,5 @@ --- +slug: /ru/interfaces/jdbc sidebar_position: 22 sidebar_label: "JDBC-драйвер" --- diff --git a/docs/ru/interfaces/mysql.md b/docs/ru/interfaces/mysql.md index cbaaf84cfe7..e91df97c9c5 100644 --- a/docs/ru/interfaces/mysql.md +++ b/docs/ru/interfaces/mysql.md @@ -1,4 +1,5 @@ --- +slug: /ru/interfaces/mysql sidebar_position: 20 sidebar_label: "MySQL-интерфейс" --- diff --git a/docs/ru/interfaces/odbc.md b/docs/ru/interfaces/odbc.md index 18a1a18c49c..18d7208de2c 100644 --- a/docs/ru/interfaces/odbc.md +++ b/docs/ru/interfaces/odbc.md @@ -1,4 +1,5 @@ --- +slug: /ru/interfaces/odbc sidebar_position: 23 sidebar_label: "ODBC-драйвер" --- diff --git a/docs/ru/interfaces/tcp.md b/docs/ru/interfaces/tcp.md index d1793728b3d..9179f3e0fe1 100644 --- a/docs/ru/interfaces/tcp.md +++ b/docs/ru/interfaces/tcp.md @@ -1,4 +1,5 @@ --- +slug: /ru/interfaces/tcp sidebar_position: 18 sidebar_label: "Родной интерфейс (TCP)" --- diff --git a/docs/ru/interfaces/third-party/client-libraries.md b/docs/ru/interfaces/third-party/client-libraries.md index ab2c9419b7f..ce9f94d5d74 100644 --- a/docs/ru/interfaces/third-party/client-libraries.md +++ b/docs/ru/interfaces/third-party/client-libraries.md @@ -1,4 +1,5 @@ --- +slug: /ru/interfaces/third-party/client-libraries sidebar_position: 26 sidebar_label: "Клиентские библиотеки от сторонних разработчиков" --- diff --git a/docs/ru/interfaces/third-party/gui.md b/docs/ru/interfaces/third-party/gui.md index 3d68889ee28..038d7883f91 100644 --- a/docs/ru/interfaces/third-party/gui.md +++ b/docs/ru/interfaces/third-party/gui.md @@ -1,4 +1,5 @@ --- +slug: /ru/interfaces/third-party/gui sidebar_position: 28 sidebar_label: "Визуальные интерфейсы от сторонних разработчиков" --- diff --git a/docs/ru/interfaces/third-party/index.md b/docs/ru/interfaces/third-party/index.md index 6de35b3d28c..45f29d781b2 100644 --- a/docs/ru/interfaces/third-party/index.md +++ b/docs/ru/interfaces/third-party/index.md @@ -1,4 +1,5 @@ --- +slug: /ru/interfaces/third-party/ sidebar_label: "Сторонние интерфейсы" sidebar_position: 24 --- diff --git a/docs/ru/interfaces/third-party/integrations.md b/docs/ru/interfaces/third-party/integrations.md index 95d052f6e63..82ba4bd47d6 100644 --- a/docs/ru/interfaces/third-party/integrations.md +++ b/docs/ru/interfaces/third-party/integrations.md @@ -1,4 +1,5 @@ --- +slug: /ru/interfaces/third-party/integrations sidebar_position: 27 sidebar_label: "Библиотеки для интеграции от сторонних разработчиков" --- diff --git a/docs/ru/interfaces/third-party/proxy.md b/docs/ru/interfaces/third-party/proxy.md index 0d789b74808..1a43128ea13 100644 --- a/docs/ru/interfaces/third-party/proxy.md +++ b/docs/ru/interfaces/third-party/proxy.md @@ -1,4 +1,5 @@ --- +slug: /ru/interfaces/third-party/proxy sidebar_position: 29 sidebar_label: "Прокси-серверы от сторонних разработчиков" --- diff --git a/docs/ru/introduction/distinctive-features.md b/docs/ru/introduction/distinctive-features.md index 5a04d83b328..44d92688399 100644 --- a/docs/ru/introduction/distinctive-features.md +++ b/docs/ru/introduction/distinctive-features.md @@ -1,4 +1,5 @@ --- +slug: /ru/introduction/distinctive-features sidebar_position: 4 sidebar_label: "Отличительные возможности ClickHouse" --- @@ -32,9 +33,9 @@ sidebar_label: "Отличительные возможности ClickHouse" ## Поддержка SQL {#sql-support} -ClickHouse поддерживает [декларативный язык запросов на основе SQL](../sql-reference/index.md) и во [многих случаях](../sql-reference/ansi.md) совпадающий с SQL стандартом. +ClickHouse поддерживает [декларативный язык запросов на основе SQL](../sql-reference/index.md) и во [многих случаях](../sql-reference/ansi.mdx) совпадающий с SQL стандартом. -Поддерживаются [GROUP BY](../sql-reference/statements/select/group-by.md), [ORDER BY](../sql-reference/statements/select/order-by.md), подзапросы в секциях [FROM](../sql-reference/statements/select/from.md), [IN](../sql-reference/operators/in.md), [JOIN](../sql-reference/statements/select/join.md), [функции window](../sql-reference/window-functions/index.md), а также скалярные подзапросы. +Поддерживаются [GROUP BY](../sql-reference/statements/select/group-by.md), [ORDER BY](../sql-reference/statements/select/order-by.md), подзапросы в секциях [FROM](../sql-reference/statements/select/from.md), [IN](../sql-reference/operators/in.md), [JOIN](../sql-reference/statements/select/join.md), [функции window](../sql-reference/window-functions/index.mdx), а также скалярные подзапросы. Зависимые подзапросы не поддерживаются, но могут стать доступными в будущем. diff --git a/docs/ru/introduction/history.md b/docs/ru/introduction/history.md index 621ad3b1ee2..9ececa76671 100644 --- a/docs/ru/introduction/history.md +++ b/docs/ru/introduction/history.md @@ -1,4 +1,5 @@ --- +slug: /ru/introduction/history sidebar_position: 7 sidebar_label: "История ClickHouse" --- diff --git a/docs/ru/introduction/performance.md b/docs/ru/introduction/performance.md index ace1c59e943..cbfcf5eb88b 100644 --- a/docs/ru/introduction/performance.md +++ b/docs/ru/introduction/performance.md @@ -1,4 +1,5 @@ --- +slug: /ru/introduction/performance sidebar_position: 6 sidebar_label: "Производительность" --- diff --git a/docs/ru/operations/access-rights.md b/docs/ru/operations/access-rights.md index 29960d1175b..987f7fecc55 100644 --- a/docs/ru/operations/access-rights.md +++ b/docs/ru/operations/access-rights.md @@ -1,4 +1,5 @@ --- +slug: /ru/operations/access-rights sidebar_position: 48 sidebar_label: "Управление доступом" --- diff --git a/docs/ru/operations/backup.md b/docs/ru/operations/backup.md index 2e869c129ba..a8d62f4c9dd 100644 --- a/docs/ru/operations/backup.md +++ b/docs/ru/operations/backup.md @@ -1,4 +1,5 @@ --- +slug: /ru/operations/backup sidebar_position: 49 sidebar_label: "Резервное копирование данных" --- diff --git a/docs/ru/operations/caches.md b/docs/ru/operations/caches.md index 16c35014a08..bfbecd19167 100644 --- a/docs/ru/operations/caches.md +++ b/docs/ru/operations/caches.md @@ -1,4 +1,5 @@ --- +slug: /ru/operations/caches sidebar_position: 65 sidebar_label: Кеши --- diff --git a/docs/ru/operations/clickhouse-keeper.md b/docs/ru/operations/clickhouse-keeper.md index 2b130c2fe16..c356b90e8d2 100644 --- a/docs/ru/operations/clickhouse-keeper.md +++ b/docs/ru/operations/clickhouse-keeper.md @@ -1,4 +1,5 @@ --- +slug: /ru/operations/clickhouse-keeper sidebar_position: 66 sidebar_label: ClickHouse Keeper --- diff --git a/docs/ru/operations/configuration-files.md b/docs/ru/operations/configuration-files.md index 54a2eefe9e1..2b824ce91bd 100644 --- a/docs/ru/operations/configuration-files.md +++ b/docs/ru/operations/configuration-files.md @@ -1,4 +1,5 @@ --- +slug: /ru/operations/configuration-files sidebar_position: 50 sidebar_label: "Конфигурационные файлы" --- diff --git a/docs/ru/operations/external-authenticators/index.md b/docs/ru/operations/external-authenticators/index.md index 90990a374be..88ec5c028ed 100644 --- a/docs/ru/operations/external-authenticators/index.md +++ b/docs/ru/operations/external-authenticators/index.md @@ -1,4 +1,5 @@ --- +slug: /ru/operations/external-authenticators/ sidebar_position: 48 sidebar_label: "Внешние аутентификаторы пользователей и каталоги" --- diff --git a/docs/ru/operations/external-authenticators/kerberos.md b/docs/ru/operations/external-authenticators/kerberos.md index 197bf5a6047..7b0702b2132 100644 --- a/docs/ru/operations/external-authenticators/kerberos.md +++ b/docs/ru/operations/external-authenticators/kerberos.md @@ -1,3 +1,6 @@ +--- +slug: /ru/operations/external-authenticators/kerberos +--- # Kerberos {#external-authenticators-kerberos} ClickHouse предоставляет возможность аутентификации существующих (и правильно сконфигурированных) пользователей с использованием Kerberos. diff --git a/docs/ru/operations/external-authenticators/ldap.md b/docs/ru/operations/external-authenticators/ldap.md index 3a8f2c05ae8..e185cd239c1 100644 --- a/docs/ru/operations/external-authenticators/ldap.md +++ b/docs/ru/operations/external-authenticators/ldap.md @@ -1,3 +1,6 @@ +--- +slug: /ru/operations/external-authenticators/ldap +--- # LDAP {#external-authenticators-ldap} Для аутентификации пользователей ClickHouse можно использовать сервер LDAP. Существуют два подхода: diff --git a/docs/ru/operations/external-authenticators/ssl-x509.md b/docs/ru/operations/external-authenticators/ssl-x509.md index 12ae7e4eec3..affdf87b199 100644 --- a/docs/ru/operations/external-authenticators/ssl-x509.md +++ b/docs/ru/operations/external-authenticators/ssl-x509.md @@ -1,3 +1,6 @@ +--- +slug: /ru/operations/external-authenticators/ssl-x509 +--- # Аутентификация по сертификату SSL X.509 {#ssl-external-authentication} [Опция 'strict'](../server-configuration-parameters/settings.md#server_configuration_parameters-openssl) включает обязательную проверку сертификатов входящих соединений в библиотеке `SSL`. В этом случае могут быть установлены только соединения, представившие действительный сертификат. Соединения с недоверенными сертификатами будут отвергнуты. Таким образом, проверка сертификата позволяет однозначно аутентифицировать входящее соединение. Идентификация пользователя осуществляется по полю `Common Name` сертификата. Это позволяет ассоциировать несколько сертификатов с одним и тем же пользователем. Дополнительно, перевыпуск и отзыв сертификата не требуют изменения конфигурации ClickHouse. diff --git a/docs/ru/operations/index.md b/docs/ru/operations/index.md index c351461c4cf..19b1e6b0089 100644 --- a/docs/ru/operations/index.md +++ b/docs/ru/operations/index.md @@ -1,4 +1,5 @@ --- +slug: /ru/operations/ sidebar_position: 41 sidebar_label: "Эксплуатация" --- @@ -10,7 +11,7 @@ sidebar_label: "Эксплуатация" - [Требования](requirements.md) - [Мониторинг](monitoring.md) - [Решение проблем](troubleshooting.md) -- [Советы по эксплуатации](tips.md) +- [Советы по эксплуатации](tips.mdx) - [Процедура обновления](update.md) - [Права доступа](access-rights.md) - [Резервное копирование](backup.md) @@ -18,7 +19,7 @@ sidebar_label: "Эксплуатация" - [Квоты](quotas.md) - [Системные таблицы](system-tables/index.md) - [Конфигурационные параметры сервера](server-configuration-parameters/index.md) -- [Тестирование серверов с помощью ClickHouse](performance-test.md) +- [Тестирование серверов с помощью ClickHouse](performance-test.mdx) - [Настройки](settings/index.md#settings) - [Утилиты](utilities/index.md) diff --git a/docs/ru/operations/monitoring.md b/docs/ru/operations/monitoring.md index cd3f56d07fc..fb92852ae7b 100644 --- a/docs/ru/operations/monitoring.md +++ b/docs/ru/operations/monitoring.md @@ -1,4 +1,5 @@ --- +slug: /ru/operations/monitoring sidebar_position: 45 sidebar_label: "Мониторинг" --- diff --git a/docs/ru/operations/named-collections.md b/docs/ru/operations/named-collections.md index 92e49b2cde5..330c132f726 100644 --- a/docs/ru/operations/named-collections.md +++ b/docs/ru/operations/named-collections.md @@ -1,4 +1,5 @@ --- +slug: /ru/operations/named-collections sidebar_position: 69 sidebar_label: "Именованные соединения" --- diff --git a/docs/ru/operations/opentelemetry.md b/docs/ru/operations/opentelemetry.md index 16bfda690a9..033ca405d9b 100644 --- a/docs/ru/operations/opentelemetry.md +++ b/docs/ru/operations/opentelemetry.md @@ -1,4 +1,5 @@ --- +slug: /ru/operations/opentelemetry sidebar_position: 62 sidebar_label: Поддержка OpenTelemetry --- diff --git a/docs/ru/operations/optimizing-performance/index.md b/docs/ru/operations/optimizing-performance/index.md index d8e0a8336fc..460ea211d4c 100644 --- a/docs/ru/operations/optimizing-performance/index.md +++ b/docs/ru/operations/optimizing-performance/index.md @@ -1,4 +1,5 @@ --- +slug: /ru/operations/optimizing-performance/ sidebar_label: Optimizing Performance sidebar_position: 52 --- \ No newline at end of file diff --git a/docs/ru/operations/optimizing-performance/sampling-query-profiler.md b/docs/ru/operations/optimizing-performance/sampling-query-profiler.md index 0c6e79e036d..c77f6a1f290 100644 --- a/docs/ru/operations/optimizing-performance/sampling-query-profiler.md +++ b/docs/ru/operations/optimizing-performance/sampling-query-profiler.md @@ -1,4 +1,5 @@ --- +slug: /ru/operations/optimizing-performance/sampling-query-profiler sidebar_position: 54 sidebar_label: Query Profiling --- diff --git a/docs/ru/operations/performance-test.md b/docs/ru/operations/performance-test.md deleted file mode 120000 index a5b151dc417..00000000000 --- a/docs/ru/operations/performance-test.md +++ /dev/null @@ -1 +0,0 @@ -../../en/operations/performance-test.md \ No newline at end of file diff --git a/docs/ru/operations/performance-test.mdx b/docs/ru/operations/performance-test.mdx new file mode 100644 index 00000000000..ec564cec3bd --- /dev/null +++ b/docs/ru/operations/performance-test.mdx @@ -0,0 +1,10 @@ +--- +slug: /ru/operations/performance-test +sidebar_position: 54 +sidebar_label: Testing Hardware +title: "How to Test Your Hardware with ClickHouse" +--- + +import Content from '@site/docs/en/operations/performance-test.md'; + + diff --git a/docs/ru/operations/quotas.md b/docs/ru/operations/quotas.md index dfa131279d5..8470c8c0ef6 100644 --- a/docs/ru/operations/quotas.md +++ b/docs/ru/operations/quotas.md @@ -1,4 +1,5 @@ --- +slug: /ru/operations/quotas sidebar_position: 51 sidebar_label: "Квоты" --- diff --git a/docs/ru/operations/requirements.md b/docs/ru/operations/requirements.md index 4fa915bce64..bb6816740d7 100644 --- a/docs/ru/operations/requirements.md +++ b/docs/ru/operations/requirements.md @@ -1,4 +1,5 @@ --- +slug: /ru/operations/requirements sidebar_position: 44 sidebar_label: "Требования" --- diff --git a/docs/ru/operations/server-configuration-parameters/index.md b/docs/ru/operations/server-configuration-parameters/index.md index ffb0e16f960..3d8313b2261 100644 --- a/docs/ru/operations/server-configuration-parameters/index.md +++ b/docs/ru/operations/server-configuration-parameters/index.md @@ -1,4 +1,5 @@ --- +slug: /ru/operations/server-configuration-parameters/ sidebar_label: "Конфигурационные параметры сервера" sidebar_position: 54 --- diff --git a/docs/ru/operations/server-configuration-parameters/settings.md b/docs/ru/operations/server-configuration-parameters/settings.md index f1d7280892e..832b19f9fe6 100644 --- a/docs/ru/operations/server-configuration-parameters/settings.md +++ b/docs/ru/operations/server-configuration-parameters/settings.md @@ -1,4 +1,5 @@ --- +slug: /ru/operations/server-configuration-parameters/settings sidebar_position: 57 sidebar_label: "Конфигурационные параметры сервера" --- @@ -411,6 +412,8 @@ ClickHouse проверяет условия для `min_part_size` и `min_part ## interserver_listen_host {#interserver-listen-host} Ограничение по хостам, для обмена между серверами ClickHouse. +Если используется Keeper, то такое же ограничение будет применяться к обмену данными +между различными экземплярами Keeper. Значение по умолчанию совпадает со значением параметра listen_host Примеры: diff --git a/docs/ru/operations/settings/constraints-on-settings.md b/docs/ru/operations/settings/constraints-on-settings.md index 42c00f2a396..21c5a87806a 100644 --- a/docs/ru/operations/settings/constraints-on-settings.md +++ b/docs/ru/operations/settings/constraints-on-settings.md @@ -1,4 +1,5 @@ --- +slug: /ru/operations/settings/constraints-on-settings sidebar_position: 62 sidebar_label: "Ограничения на изменение настроек" --- diff --git a/docs/ru/operations/settings/merge-tree-settings.md b/docs/ru/operations/settings/merge-tree-settings.md index e30539498b3..046cb15a066 100644 --- a/docs/ru/operations/settings/merge-tree-settings.md +++ b/docs/ru/operations/settings/merge-tree-settings.md @@ -1,3 +1,6 @@ +--- +slug: /ru/operations/settings/merge-tree-settings +--- # Настройки MergeTree таблиц {#merge-tree-settings} Значения настроек всех MergeTree таблиц собраны в таблице `system.merge_tree_settings`. Их можно переопределить в разделе `merge_tree` файла `config.xml` или задать в секции `SETTINGS` каждой таблицы. diff --git a/docs/ru/operations/settings/permissions-for-queries.md b/docs/ru/operations/settings/permissions-for-queries.md index 5ad9f1e606a..aa9d8d56d35 100644 --- a/docs/ru/operations/settings/permissions-for-queries.md +++ b/docs/ru/operations/settings/permissions-for-queries.md @@ -1,4 +1,5 @@ --- +slug: /ru/operations/settings/permissions-for-queries sidebar_position: 58 sidebar_label: "Разрешения для запросов" --- diff --git a/docs/ru/operations/settings/query-complexity.md b/docs/ru/operations/settings/query-complexity.md index 0d73afc3448..b9d8aef9032 100644 --- a/docs/ru/operations/settings/query-complexity.md +++ b/docs/ru/operations/settings/query-complexity.md @@ -1,4 +1,5 @@ --- +slug: /ru/operations/settings/query-complexity sidebar_position: 59 sidebar_label: "Ограничения на сложность запроса" --- diff --git a/docs/ru/operations/settings/settings-profiles.md b/docs/ru/operations/settings/settings-profiles.md index 673503e6572..8a143c18021 100644 --- a/docs/ru/operations/settings/settings-profiles.md +++ b/docs/ru/operations/settings/settings-profiles.md @@ -1,4 +1,5 @@ --- +slug: /ru/operations/settings/settings-profiles sidebar_position: 61 sidebar_label: "Профили настроек" --- diff --git a/docs/ru/operations/settings/settings-users.md b/docs/ru/operations/settings/settings-users.md index bdaea22fa8d..3a7600b1457 100644 --- a/docs/ru/operations/settings/settings-users.md +++ b/docs/ru/operations/settings/settings-users.md @@ -1,4 +1,5 @@ --- +slug: /ru/operations/settings/settings-users sidebar_position: 63 sidebar_label: "Настройки пользователей" --- diff --git a/docs/ru/operations/settings/settings.md b/docs/ru/operations/settings/settings.md index abaa05639c6..5ddc684ce2a 100644 --- a/docs/ru/operations/settings/settings.md +++ b/docs/ru/operations/settings/settings.md @@ -999,7 +999,7 @@ log_query_threads=1 Задаёт значение поля `log_comment` таблицы [system.query_log](../system-tables/query_log.md) и текст комментария в логе сервера. -Может быть использована для улучшения читабельности логов сервера. Кроме того, помогает быстро выделить связанные с тестом запросы из `system.query_log` после запуска [clickhouse-test](../../development/tests.md). +Может быть использована для улучшения читабельности логов сервера. Кроме того, помогает быстро выделить связанные с тестом запросы из `system.query_log` после запуска [clickhouse-test](../../development/tests.mdx). Возможные значения: diff --git a/docs/ru/operations/storing-data.md b/docs/ru/operations/storing-data.md index e5d7586c120..2f5c9c95ea4 100644 --- a/docs/ru/operations/storing-data.md +++ b/docs/ru/operations/storing-data.md @@ -1,4 +1,5 @@ --- +slug: /ru/operations/storing-data sidebar_position: 68 sidebar_label: "Хранение данных на внешних дисках" --- diff --git a/docs/ru/operations/system-tables/asynchronous_metric_log.md b/docs/ru/operations/system-tables/asynchronous_metric_log.md index 8bb371de230..6355131a509 100644 --- a/docs/ru/operations/system-tables/asynchronous_metric_log.md +++ b/docs/ru/operations/system-tables/asynchronous_metric_log.md @@ -1,3 +1,6 @@ +--- +slug: /ru/operations/system-tables/asynchronous_metric_log +--- ## system.asynchronous_metric_log {#system-tables-async-log} Содержит исторические значения метрик из таблицы `system.asynchronous_metrics`, которые сохраняются раз в минуту. По умолчанию включена. diff --git a/docs/ru/operations/system-tables/asynchronous_metrics.md b/docs/ru/operations/system-tables/asynchronous_metrics.md index faefdf0eee5..05f69a5b1ba 100644 --- a/docs/ru/operations/system-tables/asynchronous_metrics.md +++ b/docs/ru/operations/system-tables/asynchronous_metrics.md @@ -1,3 +1,6 @@ +--- +slug: /ru/operations/system-tables/asynchronous_metrics +--- # system.asynchronous_metrics {#system_tables-asynchronous_metrics} Содержит метрики, которые периодически вычисляются в фоновом режиме. Например, объём используемой оперативной памяти. diff --git a/docs/ru/operations/system-tables/clusters.md b/docs/ru/operations/system-tables/clusters.md index 832f5090ce5..2ec13c5064d 100644 --- a/docs/ru/operations/system-tables/clusters.md +++ b/docs/ru/operations/system-tables/clusters.md @@ -1,3 +1,6 @@ +--- +slug: /ru/operations/system-tables/clusters +--- # system.clusters {#system-clusters} Содержит информацию о доступных в конфигурационном файле кластерах и серверах, которые в них входят. diff --git a/docs/ru/operations/system-tables/columns.md b/docs/ru/operations/system-tables/columns.md index b5108386ce8..818da3d6ac6 100644 --- a/docs/ru/operations/system-tables/columns.md +++ b/docs/ru/operations/system-tables/columns.md @@ -1,3 +1,6 @@ +--- +slug: /ru/operations/system-tables/columns +--- # system.columns {#system-columns} Содержит информацию о столбцах всех таблиц. diff --git a/docs/ru/operations/system-tables/contributors.md b/docs/ru/operations/system-tables/contributors.md index 6e11219e044..57cf9d3d8fc 100644 --- a/docs/ru/operations/system-tables/contributors.md +++ b/docs/ru/operations/system-tables/contributors.md @@ -1,3 +1,6 @@ +--- +slug: /ru/operations/system-tables/contributors +--- # system.contributors {#system-contributors} Содержит информацию о контрибьютерах. Контрибьютеры расположены в таблице в случайном порядке. Порядок определяется заново при каждом запросе. diff --git a/docs/ru/operations/system-tables/crash-log.md b/docs/ru/operations/system-tables/crash-log.md index 7aaac343585..4ca8be5a199 100644 --- a/docs/ru/operations/system-tables/crash-log.md +++ b/docs/ru/operations/system-tables/crash-log.md @@ -1,3 +1,6 @@ +--- +slug: /ru/operations/system-tables/crash-log +--- # system.crash_log {#system-tables_crash_log} Содержит информацию о трассировках стека для фатальных ошибок. Таблица не содержится в базе данных по умолчанию, а создается только при возникновении фатальных ошибок. diff --git a/docs/ru/operations/system-tables/current-roles.md b/docs/ru/operations/system-tables/current-roles.md index ee9cbb08b3d..ea59ce29716 100644 --- a/docs/ru/operations/system-tables/current-roles.md +++ b/docs/ru/operations/system-tables/current-roles.md @@ -1,3 +1,6 @@ +--- +slug: /ru/operations/system-tables/current-roles +--- # system.current_roles {#system_tables-current_roles} Содержит активные роли текущего пользователя. `SET ROLE` изменяет содержимое этой таблицы. diff --git a/docs/ru/operations/system-tables/data_skipping_indices.md b/docs/ru/operations/system-tables/data_skipping_indices.md index d57d62cf08b..ae47c43445f 100644 --- a/docs/ru/operations/system-tables/data_skipping_indices.md +++ b/docs/ru/operations/system-tables/data_skipping_indices.md @@ -1,3 +1,6 @@ +--- +slug: /ru/operations/system-tables/data_skipping_indices +--- # system.data_skipping_indices {#system-data-skipping-indices} Содержит информацию о существующих индексах пропуска данных во всех таблицах. diff --git a/docs/ru/operations/system-tables/data_type_families.md b/docs/ru/operations/system-tables/data_type_families.md index ba4e5e64ec3..5a096f294cf 100644 --- a/docs/ru/operations/system-tables/data_type_families.md +++ b/docs/ru/operations/system-tables/data_type_families.md @@ -1,3 +1,6 @@ +--- +slug: /ru/operations/system-tables/data_type_families +--- # system.data_type_families {#system_tables-data_type_families} Содержит информацию о поддерживаемых [типах данных](../../sql-reference/data-types/index.md). diff --git a/docs/ru/operations/system-tables/databases.md b/docs/ru/operations/system-tables/databases.md index 026f49c0d5d..0dbff7cb7aa 100644 --- a/docs/ru/operations/system-tables/databases.md +++ b/docs/ru/operations/system-tables/databases.md @@ -1,3 +1,6 @@ +--- +slug: /ru/operations/system-tables/databases +--- # system.databases {#system-databases} Таблица содержит один столбец name типа String - имя базы данных. diff --git a/docs/ru/operations/system-tables/detached_parts.md b/docs/ru/operations/system-tables/detached_parts.md index 7abed6500aa..ded89c1dcc2 100644 --- a/docs/ru/operations/system-tables/detached_parts.md +++ b/docs/ru/operations/system-tables/detached_parts.md @@ -1,3 +1,6 @@ +--- +slug: /ru/operations/system-tables/detached_parts +--- # system.detached_parts {#system_tables-detached_parts} Содержит информацию об отсоединённых кусках таблиц семейства [MergeTree](../../engines/table-engines/mergetree-family/mergetree.md). Столбец `reason` содержит причину, по которой кусок был отсоединён. Для кусов, отсоединённых пользователем, `reason` содержит пустую строку. diff --git a/docs/ru/operations/system-tables/dictionaries.md b/docs/ru/operations/system-tables/dictionaries.md index ae4e5055ce8..75e678360ef 100644 --- a/docs/ru/operations/system-tables/dictionaries.md +++ b/docs/ru/operations/system-tables/dictionaries.md @@ -1,3 +1,6 @@ +--- +slug: /ru/operations/system-tables/dictionaries +--- # system.dictionaries {#system_tables-dictionaries} Содержит информацию о [внешних словарях](../../sql-reference/dictionaries/external-dictionaries/external-dicts.md). diff --git a/docs/ru/operations/system-tables/disks.md b/docs/ru/operations/system-tables/disks.md index 186dfbd7819..fc4c370cc1a 100644 --- a/docs/ru/operations/system-tables/disks.md +++ b/docs/ru/operations/system-tables/disks.md @@ -1,3 +1,6 @@ +--- +slug: /ru/operations/system-tables/disks +--- # system.disks {#system_tables-disks} Cодержит информацию о дисках, заданных в [конфигурации сервера](../../engines/table-engines/mergetree-family/mergetree.md#table_engine-mergetree-multiple-volumes_configure). diff --git a/docs/ru/operations/system-tables/distributed_ddl_queue.md b/docs/ru/operations/system-tables/distributed_ddl_queue.md index b384243834f..6a20b5d6a19 100644 --- a/docs/ru/operations/system-tables/distributed_ddl_queue.md +++ b/docs/ru/operations/system-tables/distributed_ddl_queue.md @@ -1,3 +1,6 @@ +--- +slug: /ru/operations/system-tables/distributed_ddl_queue +--- # system.distributed_ddl_queue {#system_tables-distributed_ddl_queue} Содержит информацию о [распределенных ddl запросах (секция ON CLUSTER)](../../sql-reference/distributed-ddl.md), которые были выполнены на кластере. @@ -15,7 +18,7 @@ - `query_start_time` ([DateTime](../../sql-reference/data-types/datetime.md)) — время начала запроса. - `query_finish_time` ([DateTime](../../sql-reference/data-types/datetime.md)) — время окончания запроса. - `query_duration_ms` ([UInt64](../../sql-reference/data-types/int-uint.md)) — продолжительность выполнения запроса (в миллисекундах). -- `exception_code` ([Enum8](../../sql-reference/data-types/enum.md)) — код исключения из [ZooKeeper](../../operations/tips.md#zookeeper). +- `exception_code` ([Enum8](../../sql-reference/data-types/enum.md)) — код исключения из [ZooKeeper](../../operations/tips.mdx#zookeeper). **Пример** diff --git a/docs/ru/operations/system-tables/distribution_queue.md b/docs/ru/operations/system-tables/distribution_queue.md index 08f99d77343..d58e9481b00 100644 --- a/docs/ru/operations/system-tables/distribution_queue.md +++ b/docs/ru/operations/system-tables/distribution_queue.md @@ -1,3 +1,6 @@ +--- +slug: /ru/operations/system-tables/distribution_queue +--- # system.distribution_queue {#system_tables-distribution_queue} Содержит информацию о локальных файлах, которые находятся в очереди для отправки на шарды. Эти локальные файлы содержат новые куски, которые создаются путем вставки новых данных в Distributed таблицу в асинхронном режиме. diff --git a/docs/ru/operations/system-tables/enabled-roles.md b/docs/ru/operations/system-tables/enabled-roles.md index 2208f96e812..a28c0f9aae2 100644 --- a/docs/ru/operations/system-tables/enabled-roles.md +++ b/docs/ru/operations/system-tables/enabled-roles.md @@ -1,3 +1,6 @@ +--- +slug: /ru/operations/system-tables/enabled-roles +--- # system.enabled_roles {#system_tables-enabled_roles} Содержит все активные роли на данный момент, включая текущую роль текущего пользователя и роли, назначенные для текущей роли. diff --git a/docs/ru/operations/system-tables/errors.md b/docs/ru/operations/system-tables/errors.md index 3a824c8c834..8eccb7fd1e0 100644 --- a/docs/ru/operations/system-tables/errors.md +++ b/docs/ru/operations/system-tables/errors.md @@ -1,3 +1,6 @@ +--- +slug: /ru/operations/system-tables/errors +--- # system.errors {#system_tables-errors} Содержит коды ошибок с указанием количества срабатываний. diff --git a/docs/ru/operations/system-tables/events.md b/docs/ru/operations/system-tables/events.md index c05be74eea6..4e295419da3 100644 --- a/docs/ru/operations/system-tables/events.md +++ b/docs/ru/operations/system-tables/events.md @@ -1,3 +1,6 @@ +--- +slug: /ru/operations/system-tables/events +--- # system.events {#system_tables-events} Содержит информацию о количестве событий, произошедших в системе. Например, в таблице можно найти, сколько запросов `SELECT` обработано с момента запуска сервера ClickHouse. diff --git a/docs/ru/operations/system-tables/functions.md b/docs/ru/operations/system-tables/functions.md index de752e2018c..a588f1ff495 100644 --- a/docs/ru/operations/system-tables/functions.md +++ b/docs/ru/operations/system-tables/functions.md @@ -1,3 +1,6 @@ +--- +slug: /ru/operations/system-tables/functions +--- # system.functions {#system-functions} Содержит информацию об обычных и агрегатных функциях. diff --git a/docs/ru/operations/system-tables/grants.md b/docs/ru/operations/system-tables/grants.md index 76a014f62dd..d48c89883fc 100644 --- a/docs/ru/operations/system-tables/grants.md +++ b/docs/ru/operations/system-tables/grants.md @@ -1,3 +1,6 @@ +--- +slug: /ru/operations/system-tables/grants +--- # system.grants {#system_tables-grants} Привилегии пользовательских аккаунтов ClickHouse. diff --git a/docs/ru/operations/system-tables/graphite_retentions.md b/docs/ru/operations/system-tables/graphite_retentions.md index 1098a29aac6..d0d53f87071 100644 --- a/docs/ru/operations/system-tables/graphite_retentions.md +++ b/docs/ru/operations/system-tables/graphite_retentions.md @@ -1,3 +1,6 @@ +--- +slug: /ru/operations/system-tables/graphite_retentions +--- # system.graphite_retentions {#system-graphite-retentions} Содержит информацию о том, какие параметры [graphite_rollup](../server-configuration-parameters/settings.md#server_configuration_parameters-graphite) используются в таблицах с движками [\*GraphiteMergeTree](../../engines/table-engines/mergetree-family/graphitemergetree.md). diff --git a/docs/ru/operations/system-tables/index.md b/docs/ru/operations/system-tables/index.md index 85a28e49846..4da0e716f09 100644 --- a/docs/ru/operations/system-tables/index.md +++ b/docs/ru/operations/system-tables/index.md @@ -1,4 +1,5 @@ --- +slug: /ru/operations/system-tables/ sidebar_position: 52 sidebar_label: "Системные таблицы" --- diff --git a/docs/ru/operations/system-tables/information_schema.md b/docs/ru/operations/system-tables/information_schema.md index b61418931bd..6a9b8134dad 100644 --- a/docs/ru/operations/system-tables/information_schema.md +++ b/docs/ru/operations/system-tables/information_schema.md @@ -1,3 +1,6 @@ +--- +slug: /ru/operations/system-tables/information_schema +--- # INFORMATION_SCHEMA {#information-schema} `INFORMATION_SCHEMA` (`information_schema`) — это системная база данных, содержащая представления. Используя эти представления, вы можете получить информацию о метаданных объектов базы данных. Эти представления считывают данные из столбцов системных таблиц [system.columns](../../operations/system-tables/columns.md), [system.databases](../../operations/system-tables/databases.md) и [system.tables](../../operations/system-tables/tables.md). diff --git a/docs/ru/operations/system-tables/licenses.md b/docs/ru/operations/system-tables/licenses.md index b22dc73b666..63b8d7a436f 100644 --- a/docs/ru/operations/system-tables/licenses.md +++ b/docs/ru/operations/system-tables/licenses.md @@ -1,3 +1,6 @@ +--- +slug: /ru/operations/system-tables/licenses +--- # system.licenses {#system-tables_system.licenses} Содержит информацию о лицензиях сторонних библиотек, которые находятся в директории [contrib](https://github.com/ClickHouse/ClickHouse/tree/master/contrib) исходных кодов ClickHouse. diff --git a/docs/ru/operations/system-tables/merge_tree_settings.md b/docs/ru/operations/system-tables/merge_tree_settings.md deleted file mode 120000 index dbff2462867..00000000000 --- a/docs/ru/operations/system-tables/merge_tree_settings.md +++ /dev/null @@ -1 +0,0 @@ -../../../en/operations/system-tables/merge_tree_settings.md \ No newline at end of file diff --git a/docs/ru/operations/system-tables/merge_tree_settings.mdx b/docs/ru/operations/system-tables/merge_tree_settings.mdx new file mode 100644 index 00000000000..c908ff29543 --- /dev/null +++ b/docs/ru/operations/system-tables/merge_tree_settings.mdx @@ -0,0 +1,9 @@ +--- +slug: /ru/operations/system-tables/merge_tree_settings +sidebar_label: MergeTree settings +title: "MergeTree settings" +--- + +import Content from '@site/docs/en/operations/system-tables/merge_tree_settings.md'; + + diff --git a/docs/ru/operations/system-tables/merges.md b/docs/ru/operations/system-tables/merges.md index f48f0d1ac27..3ed4fe979d6 100644 --- a/docs/ru/operations/system-tables/merges.md +++ b/docs/ru/operations/system-tables/merges.md @@ -1,3 +1,6 @@ +--- +slug: /ru/operations/system-tables/merges +--- # system.merges {#system-merges} Содержит информацию о производящихся прямо сейчас слияниях и мутациях кусков для таблиц семейства MergeTree. diff --git a/docs/ru/operations/system-tables/metric_log.md b/docs/ru/operations/system-tables/metric_log.md index 5160b32927b..155f4f7afad 100644 --- a/docs/ru/operations/system-tables/metric_log.md +++ b/docs/ru/operations/system-tables/metric_log.md @@ -1,3 +1,6 @@ +--- +slug: /ru/operations/system-tables/metric_log +--- # system.metric_log {#system_tables-metric_log} Содержит историю значений метрик из таблиц `system.metrics` и `system.events`, периодически сбрасываемую на диск. diff --git a/docs/ru/operations/system-tables/metrics.md b/docs/ru/operations/system-tables/metrics.md index e0ff6ba8278..b54c2be7f8c 100644 --- a/docs/ru/operations/system-tables/metrics.md +++ b/docs/ru/operations/system-tables/metrics.md @@ -1,3 +1,6 @@ +--- +slug: /ru/operations/system-tables/metrics +--- # system.metrics {#system_tables-metrics} Содержит метрики, которые могут быть рассчитаны мгновенно или имеют текущее значение. Например, число одновременно обрабатываемых запросов или текущее значение задержки реплики. Эта таблица всегда актуальна. diff --git a/docs/ru/operations/system-tables/mutations.md b/docs/ru/operations/system-tables/mutations.md index f3810e29698..20e4ebfdaf1 100644 --- a/docs/ru/operations/system-tables/mutations.md +++ b/docs/ru/operations/system-tables/mutations.md @@ -1,3 +1,6 @@ +--- +slug: /ru/operations/system-tables/mutations +--- # system.mutations {#system_tables-mutations} Таблица содержит информацию о ходе выполнения [мутаций](../../sql-reference/statements/alter/index.md#mutations) таблиц семейства MergeTree. Каждой команде мутации соответствует одна строка таблицы. diff --git a/docs/ru/operations/system-tables/numbers.md b/docs/ru/operations/system-tables/numbers.md index 0be4a4ce05d..6d3ed81973e 100644 --- a/docs/ru/operations/system-tables/numbers.md +++ b/docs/ru/operations/system-tables/numbers.md @@ -1,3 +1,6 @@ +--- +slug: /ru/operations/system-tables/numbers +--- # system.numbers {#system-numbers} Таблица содержит один столбец с именем number типа UInt64, содержащим почти все натуральные числа, начиная с нуля. diff --git a/docs/ru/operations/system-tables/numbers_mt.md b/docs/ru/operations/system-tables/numbers_mt.md index d66c4515ddb..4853ef211d4 100644 --- a/docs/ru/operations/system-tables/numbers_mt.md +++ b/docs/ru/operations/system-tables/numbers_mt.md @@ -1,3 +1,6 @@ +--- +slug: /ru/operations/system-tables/numbers_mt +--- # system.numbers_mt {#system-numbers-mt} То же самое, что и [system.numbers](../../operations/system-tables/numbers.md), но чтение распараллеливается. Числа могут возвращаться в произвольном порядке. diff --git a/docs/ru/operations/system-tables/one.md b/docs/ru/operations/system-tables/one.md index 5cb297f06d4..145985830ae 100644 --- a/docs/ru/operations/system-tables/one.md +++ b/docs/ru/operations/system-tables/one.md @@ -1,3 +1,6 @@ +--- +slug: /ru/operations/system-tables/one +--- # system.one {#system-one} Таблица содержит одну строку с одним столбцом `dummy` типа UInt8, содержащим значение 0. diff --git a/docs/ru/operations/system-tables/opentelemetry_span_log.md b/docs/ru/operations/system-tables/opentelemetry_span_log.md index 5c96f22b6c2..8c272975d65 100644 --- a/docs/ru/operations/system-tables/opentelemetry_span_log.md +++ b/docs/ru/operations/system-tables/opentelemetry_span_log.md @@ -1,3 +1,6 @@ +--- +slug: /ru/operations/system-tables/opentelemetry_span_log +--- # system.opentelemetry_span_log {#system_tables-opentelemetry_span_log} Содержит информацию о [trace spans](https://opentracing.io/docs/overview/spans/) для выполненных запросов. diff --git a/docs/ru/operations/system-tables/part_log.md b/docs/ru/operations/system-tables/part_log.md index 73ded8c7355..9d05b011285 100644 --- a/docs/ru/operations/system-tables/part_log.md +++ b/docs/ru/operations/system-tables/part_log.md @@ -1,3 +1,6 @@ +--- +slug: /ru/operations/system-tables/part_log +--- # system.part_log {#system_tables-part-log} Системная таблица `system.part_log` создается только в том случае, если задана серверная настройка [part_log](../server-configuration-parameters/settings.md#server_configuration_parameters-part-log). diff --git a/docs/ru/operations/system-tables/parts.md b/docs/ru/operations/system-tables/parts.md index ec806597604..b67c38b5e81 100644 --- a/docs/ru/operations/system-tables/parts.md +++ b/docs/ru/operations/system-tables/parts.md @@ -1,3 +1,6 @@ +--- +slug: /ru/operations/system-tables/parts +--- # system.parts {#system_tables-parts} Содержит информацию о кусках данных таблиц семейства [MergeTree](../../engines/table-engines/mergetree-family/mergetree.md). diff --git a/docs/ru/operations/system-tables/parts_columns.md b/docs/ru/operations/system-tables/parts_columns.md index 04220ea480f..7190dc5306c 100644 --- a/docs/ru/operations/system-tables/parts_columns.md +++ b/docs/ru/operations/system-tables/parts_columns.md @@ -1,3 +1,6 @@ +--- +slug: /ru/operations/system-tables/parts_columns +--- # system.parts_columns {#system_tables-parts_columns} Содержит информацию о кусках данных и столбцах таблиц семейства [MergeTree](../../engines/table-engines/mergetree-family/mergetree.md). diff --git a/docs/ru/operations/system-tables/processes.md b/docs/ru/operations/system-tables/processes.md index 682b174c483..c2c2cf5b4f2 100644 --- a/docs/ru/operations/system-tables/processes.md +++ b/docs/ru/operations/system-tables/processes.md @@ -1,3 +1,6 @@ +--- +slug: /ru/operations/system-tables/processes +--- # system.processes {#system_tables-processes} Используется для реализации запроса `SHOW PROCESSLIST`. diff --git a/docs/ru/operations/system-tables/query_log.md b/docs/ru/operations/system-tables/query_log.md index c9f9fc25b8f..a55528bd829 100644 --- a/docs/ru/operations/system-tables/query_log.md +++ b/docs/ru/operations/system-tables/query_log.md @@ -1,3 +1,6 @@ +--- +slug: /ru/operations/system-tables/query_log +--- # system.query_log {#system_tables-query_log} Содержит информацию о выполняемых запросах, например, время начала обработки, продолжительность обработки, сообщения об ошибках. diff --git a/docs/ru/operations/system-tables/query_thread_log.md b/docs/ru/operations/system-tables/query_thread_log.md index c23d2828520..c9aabb02cad 100644 --- a/docs/ru/operations/system-tables/query_thread_log.md +++ b/docs/ru/operations/system-tables/query_thread_log.md @@ -1,3 +1,6 @@ +--- +slug: /ru/operations/system-tables/query_thread_log +--- # system.query_thread_log {#system_tables-query_thread_log} Содержит информацию о потоках, которые выполняют запросы, например, имя потока, время его запуска, продолжительность обработки запроса. diff --git a/docs/ru/operations/system-tables/query_views_log.md b/docs/ru/operations/system-tables/query_views_log.md index 8b1a8d387a6..337ae59433b 100644 --- a/docs/ru/operations/system-tables/query_views_log.md +++ b/docs/ru/operations/system-tables/query_views_log.md @@ -1,3 +1,6 @@ +--- +slug: /ru/operations/system-tables/query_views_log +--- # system.query_views_log {#system_tables-query_views_log} Содержит информацию о зависимых представлениях, выполняемых при выполнении запроса, например, тип представления или время выполнения. diff --git a/docs/ru/operations/system-tables/quota_limits.md b/docs/ru/operations/system-tables/quota_limits.md index 21505b7d2c5..20f66e1447e 100644 --- a/docs/ru/operations/system-tables/quota_limits.md +++ b/docs/ru/operations/system-tables/quota_limits.md @@ -1,3 +1,6 @@ +--- +slug: /ru/operations/system-tables/quota_limits +--- # system.quota_limits {#system_tables-quota_limits} Содержит информацию о максимумах для всех интервалов всех квот. Одной квоте могут соответствовать любое количество строк или ноль. @@ -17,4 +20,4 @@ - `max_result_bytes` ([Nullable](../../sql-reference/data-types/nullable.md)([UInt64](../../sql-reference/data-types/int-uint.md))) — максимальный объем оперативной памяти в байтах, используемый для хранения результата запроса. - `max_read_rows` ([Nullable](../../sql-reference/data-types/nullable.md)([UInt64](../../sql-reference/data-types/int-uint.md))) — максимальное количество строк, считываемых из всех таблиц и табличных функций, участвующих в запросе. - `max_read_bytes` ([Nullable](../../sql-reference/data-types/nullable.md)([UInt64](../../sql-reference/data-types/int-uint.md))) — максимальное количество байтов, считываемых из всех таблиц и табличных функций, участвующих в запросе. -- `max_execution_time` ([Nullable](../../sql-reference/data-types/nullable.md)([Float64](../../sql-reference/data-types/float.md))) — максимальное время выполнения запроса, в секундах. \ No newline at end of file +- `max_execution_time` ([Nullable](../../sql-reference/data-types/nullable.md)([Float64](../../sql-reference/data-types/float.md))) — максимальное время выполнения запроса, в секундах. diff --git a/docs/ru/operations/system-tables/quota_usage.md b/docs/ru/operations/system-tables/quota_usage.md index 19e9397ebaa..96f6debd24e 100644 --- a/docs/ru/operations/system-tables/quota_usage.md +++ b/docs/ru/operations/system-tables/quota_usage.md @@ -1,3 +1,6 @@ +--- +slug: /ru/operations/system-tables/quota_usage +--- # system.quota_usage {#system_tables-quota_usage} Использование квоты текущим пользователем: сколько используется и сколько осталось. @@ -28,4 +31,4 @@ ## Смотрите также {#see-also} -- [SHOW QUOTA](../../sql-reference/statements/show.md#show-quota-statement) \ No newline at end of file +- [SHOW QUOTA](../../sql-reference/statements/show.md#show-quota-statement) diff --git a/docs/ru/operations/system-tables/quotas.md b/docs/ru/operations/system-tables/quotas.md index 6c8b5a3eebf..df105fd10b5 100644 --- a/docs/ru/operations/system-tables/quotas.md +++ b/docs/ru/operations/system-tables/quotas.md @@ -1,3 +1,6 @@ +--- +slug: /ru/operations/system-tables/quotas +--- # system.quotas {#system_tables-quotas} Содержит информацию о [квотах](quotas.md). diff --git a/docs/ru/operations/system-tables/quotas_usage.md b/docs/ru/operations/system-tables/quotas_usage.md index fe066e38add..27e7cdf8abe 100644 --- a/docs/ru/operations/system-tables/quotas_usage.md +++ b/docs/ru/operations/system-tables/quotas_usage.md @@ -1,3 +1,6 @@ +--- +slug: /ru/operations/system-tables/quotas_usage +--- # system.quotas_usage {#system_tables-quotas_usage} Использование квот всеми пользователями. @@ -31,4 +34,4 @@ ## Смотрите также {#see-also} -- [SHOW QUOTA](../../sql-reference/statements/show.md#show-quota-statement) \ No newline at end of file +- [SHOW QUOTA](../../sql-reference/statements/show.md#show-quota-statement) diff --git a/docs/ru/operations/system-tables/replicas.md b/docs/ru/operations/system-tables/replicas.md index 8f86f94dedd..57e958f2b7a 100644 --- a/docs/ru/operations/system-tables/replicas.md +++ b/docs/ru/operations/system-tables/replicas.md @@ -1,3 +1,6 @@ +--- +slug: /ru/operations/system-tables/replicas +--- # system.replicas {#system_tables-replicas} Содержит информацию и статус для реплицируемых таблиц, расположенных на локальном сервере. diff --git a/docs/ru/operations/system-tables/replicated_fetches.md b/docs/ru/operations/system-tables/replicated_fetches.md index 31d5a5cfe08..aff8f8cbe00 100644 --- a/docs/ru/operations/system-tables/replicated_fetches.md +++ b/docs/ru/operations/system-tables/replicated_fetches.md @@ -1,3 +1,6 @@ +--- +slug: /ru/operations/system-tables/replicated_fetches +--- # system.replicated_fetches {#system_tables-replicated_fetches} Содержит информацию о выполняемых в данный момент фоновых операциях скачивания кусков данных с других реплик. diff --git a/docs/ru/operations/system-tables/replication_queue.md b/docs/ru/operations/system-tables/replication_queue.md index 661962e83c4..25de174e98f 100644 --- a/docs/ru/operations/system-tables/replication_queue.md +++ b/docs/ru/operations/system-tables/replication_queue.md @@ -1,3 +1,6 @@ +--- +slug: /ru/operations/system-tables/replication_queue +--- # system.replication_queue {#system_tables-replication_queue} Содержит информацию о задачах из очередей репликации, хранящихся в ZooKeeper, для таблиц семейства `ReplicatedMergeTree`. diff --git a/docs/ru/operations/system-tables/role-grants.md b/docs/ru/operations/system-tables/role-grants.md index e392349af48..a9d114fc508 100644 --- a/docs/ru/operations/system-tables/role-grants.md +++ b/docs/ru/operations/system-tables/role-grants.md @@ -1,3 +1,6 @@ +--- +slug: /ru/operations/system-tables/role-grants +--- # system.role_grants {#system_tables-role_grants} Содержит [гранты](../../sql-reference/statements/grant.md) ролей для пользователей и ролей. Чтобы добавить записи в эту таблицу, используйте команду `GRANT role TO user`. diff --git a/docs/ru/operations/system-tables/roles.md b/docs/ru/operations/system-tables/roles.md index c2b94214012..97a90059e0c 100644 --- a/docs/ru/operations/system-tables/roles.md +++ b/docs/ru/operations/system-tables/roles.md @@ -1,3 +1,6 @@ +--- +slug: /ru/operations/system-tables/roles +--- # system.roles {#system_tables-roles} Содержит сведения о [ролях](../../operations/access-rights.md#role-management). diff --git a/docs/ru/operations/system-tables/row_policies.md b/docs/ru/operations/system-tables/row_policies.md index f1e84a201cb..bfff008f2c1 100644 --- a/docs/ru/operations/system-tables/row_policies.md +++ b/docs/ru/operations/system-tables/row_policies.md @@ -1,3 +1,6 @@ +--- +slug: /ru/operations/system-tables/row_policies +--- # system.row_policies {#system_tables-row_policies} Содержит фильтры безопасности уровня строк (политики строк) для каждой таблицы, а также список ролей и/или пользователей, к которым применяются эти политики. diff --git a/docs/ru/operations/system-tables/session_log.md b/docs/ru/operations/system-tables/session_log.md index bb359d15fc5..1f313e7815a 100644 --- a/docs/ru/operations/system-tables/session_log.md +++ b/docs/ru/operations/system-tables/session_log.md @@ -1,3 +1,6 @@ +--- +slug: /ru/operations/system-tables/session_log +--- # system.session_log {#system_tables-session_log} Содержит информацию о всех успешных и неудачных событиях входа и выхода из системы. diff --git a/docs/ru/operations/system-tables/settings.md b/docs/ru/operations/system-tables/settings.md index c9d63d336b6..e7a4d91788c 100644 --- a/docs/ru/operations/system-tables/settings.md +++ b/docs/ru/operations/system-tables/settings.md @@ -1,3 +1,6 @@ +--- +slug: /ru/operations/system-tables/settings +--- # system.settings {#system-tables-system-settings} Содержит информацию о сессионных настройках для текущего пользователя. diff --git a/docs/ru/operations/system-tables/settings_profile_elements.md b/docs/ru/operations/system-tables/settings_profile_elements.md index 8a1461c6bb0..8bde8453c17 100644 --- a/docs/ru/operations/system-tables/settings_profile_elements.md +++ b/docs/ru/operations/system-tables/settings_profile_elements.md @@ -1,3 +1,6 @@ +--- +slug: /ru/operations/system-tables/settings_profile_elements +--- # system.settings_profile_elements {#system_tables-settings_profile_elements} Описывает содержимое профиля настроек: diff --git a/docs/ru/operations/system-tables/settings_profiles.md b/docs/ru/operations/system-tables/settings_profiles.md index 8e0a8fde702..55e3b408c10 100644 --- a/docs/ru/operations/system-tables/settings_profiles.md +++ b/docs/ru/operations/system-tables/settings_profiles.md @@ -1,3 +1,6 @@ +--- +slug: /ru/operations/system-tables/settings_profiles +--- # system.settings_profiles {#system_tables-settings_profiles} Содержит свойства сконфигурированных профилей настроек. diff --git a/docs/ru/operations/system-tables/stack_trace.md b/docs/ru/operations/system-tables/stack_trace.md index 338c14534cf..817f66d1af0 100644 --- a/docs/ru/operations/system-tables/stack_trace.md +++ b/docs/ru/operations/system-tables/stack_trace.md @@ -1,3 +1,6 @@ +--- +slug: /ru/operations/system-tables/stack_trace +--- # system.stack_trace {#system-tables_stack_trace} Содержит трассировки стека всех серверных потоков. Позволяет разработчикам анализировать состояние сервера. diff --git a/docs/ru/operations/system-tables/storage_policies.md b/docs/ru/operations/system-tables/storage_policies.md index 0f9a4814a92..6e9ed3b34d3 100644 --- a/docs/ru/operations/system-tables/storage_policies.md +++ b/docs/ru/operations/system-tables/storage_policies.md @@ -1,3 +1,6 @@ +--- +slug: /ru/operations/system-tables/storage_policies +--- # system.storage_policies {#system_tables-storage_policies} Содержит информацию о политиках хранения и томах, заданных в [конфигурации сервера](../../engines/table-engines/mergetree-family/mergetree.md#table_engine-mergetree-multiple-volumes_configure). diff --git a/docs/ru/operations/system-tables/table_engines.md b/docs/ru/operations/system-tables/table_engines.md index b6f6d3decc2..d5b2bc8fc1f 100644 --- a/docs/ru/operations/system-tables/table_engines.md +++ b/docs/ru/operations/system-tables/table_engines.md @@ -1,3 +1,6 @@ +--- +slug: /ru/operations/system-tables/table_engines +--- # system.table_engines {#system-table-engines} Содержит информацию про движки таблиц, поддерживаемые сервером, а также об их возможностях. diff --git a/docs/ru/operations/system-tables/tables.md b/docs/ru/operations/system-tables/tables.md index ae5ca586a88..b007b250cfb 100644 --- a/docs/ru/operations/system-tables/tables.md +++ b/docs/ru/operations/system-tables/tables.md @@ -1,3 +1,6 @@ +--- +slug: /ru/operations/system-tables/tables +--- # system.tables {#system-tables} Содержит метаданные каждой таблицы, о которой знает сервер. diff --git a/docs/ru/operations/system-tables/text_log.md b/docs/ru/operations/system-tables/text_log.md index 4936edc663b..ea7764a3155 100644 --- a/docs/ru/operations/system-tables/text_log.md +++ b/docs/ru/operations/system-tables/text_log.md @@ -1,3 +1,6 @@ +--- +slug: /ru/operations/system-tables/text_log +--- # system.text_log {#system_tables-text_log} Содержит записи логов. Уровень логирования для таблицы может быть ограничен параметром сервера `text_log.level`. diff --git a/docs/ru/operations/system-tables/time_zones.md b/docs/ru/operations/system-tables/time_zones.md deleted file mode 120000 index d7b0f07d326..00000000000 --- a/docs/ru/operations/system-tables/time_zones.md +++ /dev/null @@ -1 +0,0 @@ -../../../en/operations/system-tables/time_zones.md \ No newline at end of file diff --git a/docs/ru/operations/system-tables/time_zones.mdx b/docs/ru/operations/system-tables/time_zones.mdx new file mode 100644 index 00000000000..1bc9fcd9a35 --- /dev/null +++ b/docs/ru/operations/system-tables/time_zones.mdx @@ -0,0 +1,9 @@ +--- +slug: /ru/operations/system-tables/time_zones +sidebar_label: time_zones +title: time_zones +--- + +import Content from '@site/docs/en/operations/system-tables/time_zones.md'; + + diff --git a/docs/ru/operations/system-tables/trace_log.md b/docs/ru/operations/system-tables/trace_log.md index c43617ca7cf..f358fd48cf9 100644 --- a/docs/ru/operations/system-tables/trace_log.md +++ b/docs/ru/operations/system-tables/trace_log.md @@ -1,3 +1,6 @@ +--- +slug: /ru/operations/system-tables/trace_log +--- # system.trace_log {#system_tables-trace_log} Содержит экземпляры трассировки стека адресов вызова, собранные с помощью семплирующего профайлера запросов. diff --git a/docs/ru/operations/system-tables/users.md b/docs/ru/operations/system-tables/users.md index ba31382cc02..a1eae11fd7c 100644 --- a/docs/ru/operations/system-tables/users.md +++ b/docs/ru/operations/system-tables/users.md @@ -1,3 +1,6 @@ +--- +slug: /ru/operations/system-tables/users +--- # system.users {#system_tables-users} Содержит список [аккаунтов пользователей](../../operations/access-rights.md#user-account-management), настроенных на сервере. diff --git a/docs/ru/operations/system-tables/zookeeper.md b/docs/ru/operations/system-tables/zookeeper.md index a6ce62a9d4e..df0a5660c73 100644 --- a/docs/ru/operations/system-tables/zookeeper.md +++ b/docs/ru/operations/system-tables/zookeeper.md @@ -1,3 +1,6 @@ +--- +slug: /ru/operations/system-tables/zookeeper +--- # system.zookeeper {#system-zookeeper} Таблицы не существует, если ZooKeeper не сконфигурирован. Позволяет читать данные из ZooKeeper кластера, описанного в конфигурации. diff --git a/docs/ru/operations/system-tables/zookeeper_log.md b/docs/ru/operations/system-tables/zookeeper_log.md index 16f02cb0489..ccbdd5110ad 100644 --- a/docs/ru/operations/system-tables/zookeeper_log.md +++ b/docs/ru/operations/system-tables/zookeeper_log.md @@ -1,3 +1,6 @@ +--- +slug: /ru/operations/system-tables/zookeeper_log +--- # system.zookeeper_log {#system-zookeeper_log} Эта таблица содержит информацию о параметрах запроса к серверу ZooKeeper и ответа от него. @@ -125,5 +128,5 @@ children: ['query-0000000006','query-0000000005','query-0000000004','que **См. также** -- [ZooKeeper](../../operations/tips.md#zookeeper) +- [ZooKeeper](../../operations/tips.mdx#zookeeper) - [Руководство по ZooKeeper](https://zookeeper.apache.org/doc/r3.3.3/zookeeperProgrammers.html) diff --git a/docs/ru/operations/tips.md b/docs/ru/operations/tips.md deleted file mode 120000 index 9b3413bdbc3..00000000000 --- a/docs/ru/operations/tips.md +++ /dev/null @@ -1 +0,0 @@ -../../en/operations/tips.md \ No newline at end of file diff --git a/docs/ru/operations/tips.mdx b/docs/ru/operations/tips.mdx new file mode 100644 index 00000000000..128466415ec --- /dev/null +++ b/docs/ru/operations/tips.mdx @@ -0,0 +1,10 @@ +--- +slug: /ru/operations/tips +sidebar_position: 58 +sidebar_label: Usage Recommendations +title: "Usage Recommendations" +--- + +import Content from '@site/docs/en/operations/tips.md'; + + diff --git a/docs/ru/operations/troubleshooting.md b/docs/ru/operations/troubleshooting.md index ce7f9a4ab82..96a9ca5f5b9 100644 --- a/docs/ru/operations/troubleshooting.md +++ b/docs/ru/operations/troubleshooting.md @@ -1,4 +1,5 @@ --- +slug: /ru/operations/troubleshooting sidebar_position: 46 sidebar_label: "Устранение неисправностей" --- diff --git a/docs/ru/operations/update.md b/docs/ru/operations/update.md index 5feb6db6c9d..4ad4d8d942b 100644 --- a/docs/ru/operations/update.md +++ b/docs/ru/operations/update.md @@ -1,4 +1,5 @@ --- +slug: /ru/operations/update sidebar_position: 47 sidebar_label: "Обновление ClickHouse" --- diff --git a/docs/ru/operations/utilities/clickhouse-benchmark.md b/docs/ru/operations/utilities/clickhouse-benchmark.md index d8537991143..3018732de38 100644 --- a/docs/ru/operations/utilities/clickhouse-benchmark.md +++ b/docs/ru/operations/utilities/clickhouse-benchmark.md @@ -1,4 +1,5 @@ --- +slug: /ru/operations/utilities/clickhouse-benchmark sidebar_position: 61 sidebar_label: clickhouse-benchmark --- diff --git a/docs/ru/operations/utilities/clickhouse-compressor.md b/docs/ru/operations/utilities/clickhouse-compressor.md index d7f6862a62c..20d1f5e1cd4 100644 --- a/docs/ru/operations/utilities/clickhouse-compressor.md +++ b/docs/ru/operations/utilities/clickhouse-compressor.md @@ -1,3 +1,6 @@ +--- +slug: /ru/operations/utilities/clickhouse-compressor +--- ## ClickHouse compressor Simple program for data compression and decompression in ClickHouse way. diff --git a/docs/ru/operations/utilities/clickhouse-copier.md b/docs/ru/operations/utilities/clickhouse-copier.md index 5cbcc05ba56..a712eb7b45b 100644 --- a/docs/ru/operations/utilities/clickhouse-copier.md +++ b/docs/ru/operations/utilities/clickhouse-copier.md @@ -1,4 +1,5 @@ --- +slug: /ru/operations/utilities/clickhouse-copier sidebar_position: 59 sidebar_label: clickhouse-copier --- diff --git a/docs/ru/operations/utilities/clickhouse-format.md b/docs/ru/operations/utilities/clickhouse-format.md index 994091c6f29..af66930b368 100644 --- a/docs/ru/operations/utilities/clickhouse-format.md +++ b/docs/ru/operations/utilities/clickhouse-format.md @@ -1,4 +1,5 @@ --- +slug: /ru/operations/utilities/clickhouse-format sidebar_position: 65 sidebar_label: clickhouse-format --- diff --git a/docs/ru/operations/utilities/clickhouse-local.md b/docs/ru/operations/utilities/clickhouse-local.md index e463c31eb0c..61fba2dd7cc 100644 --- a/docs/ru/operations/utilities/clickhouse-local.md +++ b/docs/ru/operations/utilities/clickhouse-local.md @@ -1,4 +1,5 @@ --- +slug: /ru/operations/utilities/clickhouse-local sidebar_position: 60 sidebar_label: clickhouse-local --- diff --git a/docs/ru/operations/utilities/clickhouse-obfuscator.md b/docs/ru/operations/utilities/clickhouse-obfuscator.md index ff1fdc70288..30cbadb7cfc 100644 --- a/docs/ru/operations/utilities/clickhouse-obfuscator.md +++ b/docs/ru/operations/utilities/clickhouse-obfuscator.md @@ -1,3 +1,6 @@ +--- +slug: /ru/operations/utilities/clickhouse-obfuscator +--- # Обфускатор ClickHouse Простой инструмент для обфускации табличных данных. diff --git a/docs/ru/operations/utilities/index.md b/docs/ru/operations/utilities/index.md index 78fb03adc2c..b12d58a6d83 100644 --- a/docs/ru/operations/utilities/index.md +++ b/docs/ru/operations/utilities/index.md @@ -1,4 +1,5 @@ --- +slug: /ru/operations/utilities/ sidebar_label: "Утилиты" sidebar_position: 56 --- diff --git a/docs/ru/operations/utilities/odbc-bridge.md b/docs/ru/operations/utilities/odbc-bridge.md index 39c796c10c1..4d52dda4e93 100644 --- a/docs/ru/operations/utilities/odbc-bridge.md +++ b/docs/ru/operations/utilities/odbc-bridge.md @@ -1,3 +1,6 @@ +--- +slug: /ru/operations/utilities/odbc-bridge +--- # clickhouse-odbc-bridge Simple HTTP-server which works like a proxy for ODBC driver. The main motivation diff --git a/docs/ru/sql-reference/aggregate-functions/combinators.md b/docs/ru/sql-reference/aggregate-functions/combinators.md index 8f681379909..3a7ff571f99 100644 --- a/docs/ru/sql-reference/aggregate-functions/combinators.md +++ b/docs/ru/sql-reference/aggregate-functions/combinators.md @@ -1,4 +1,5 @@ --- +slug: /ru/sql-reference/aggregate-functions/combinators sidebar_position: 37 sidebar_label: "Комбинаторы агрегатных функций" --- diff --git a/docs/ru/sql-reference/aggregate-functions/index.md b/docs/ru/sql-reference/aggregate-functions/index.md index f4a13295a0c..c969b10422b 100644 --- a/docs/ru/sql-reference/aggregate-functions/index.md +++ b/docs/ru/sql-reference/aggregate-functions/index.md @@ -1,4 +1,5 @@ --- +slug: /ru/sql-reference/aggregate-functions/ sidebar_label: "Агрегатные функции" sidebar_position: 33 --- diff --git a/docs/ru/sql-reference/aggregate-functions/parametric-functions.md b/docs/ru/sql-reference/aggregate-functions/parametric-functions.md index 648a5716e2b..f7c7f98a8dd 100644 --- a/docs/ru/sql-reference/aggregate-functions/parametric-functions.md +++ b/docs/ru/sql-reference/aggregate-functions/parametric-functions.md @@ -1,4 +1,5 @@ --- +slug: /ru/sql-reference/aggregate-functions/parametric-functions sidebar_position: 38 sidebar_label: "Параметрические агрегатные функции" --- @@ -29,13 +30,13 @@ histogram(number_of_bins)(values) - [Массив](../../sql-reference/data-types/array.md) [кортежей](../../sql-reference/data-types/tuple.md) следующего вида: - ``` - [(lower_1, upper_1, height_1), ... (lower_N, upper_N, height_N)] - ``` +``` +[(lower_1, upper_1, height_1), ... (lower_N, upper_N, height_N)] +``` - - `lower` — нижняя граница корзины. - - `upper` — верхняя граница корзины. - - `height` — количество значений в корзине. +- `lower` — нижняя граница корзины. +- `upper` — верхняя граница корзины. +- `height` — количество значений в корзине. **Пример** @@ -91,6 +92,7 @@ sequenceMatch(pattern)(timestamp, cond1, cond2, ...) :::danger "Предупреждение" События, произошедшие в одну и ту же секунду, располагаются в последовательности в неопределенном порядке, что может повлиять на результат работы функции. +::: **Аргументы** @@ -176,6 +178,7 @@ SELECT sequenceMatch('(?1)(?2)')(time, number = 1, number = 2, number = 4) FROM :::danger "Предупреждение" События, произошедшие в одну и ту же секунду, располагаются в последовательности в неопределенном порядке, что может повлиять на результат работы функции. +::: ``` sql sequenceCount(pattern)(timestamp, cond1, cond2, ...) diff --git a/docs/ru/sql-reference/aggregate-functions/reference/any.md b/docs/ru/sql-reference/aggregate-functions/reference/any.md index f0544099f6b..ec69a24886d 100644 --- a/docs/ru/sql-reference/aggregate-functions/reference/any.md +++ b/docs/ru/sql-reference/aggregate-functions/reference/any.md @@ -1,4 +1,5 @@ --- +slug: /ru/sql-reference/aggregate-functions/reference/any sidebar_position: 6 --- diff --git a/docs/ru/sql-reference/aggregate-functions/reference/anyheavy.md b/docs/ru/sql-reference/aggregate-functions/reference/anyheavy.md index 60368b589bc..2845e5f56ed 100644 --- a/docs/ru/sql-reference/aggregate-functions/reference/anyheavy.md +++ b/docs/ru/sql-reference/aggregate-functions/reference/anyheavy.md @@ -1,4 +1,5 @@ --- +slug: /ru/sql-reference/aggregate-functions/reference/anyheavy sidebar_position: 103 --- diff --git a/docs/ru/sql-reference/aggregate-functions/reference/anylast.md b/docs/ru/sql-reference/aggregate-functions/reference/anylast.md index 62955cc6c18..6b1d5963d5c 100644 --- a/docs/ru/sql-reference/aggregate-functions/reference/anylast.md +++ b/docs/ru/sql-reference/aggregate-functions/reference/anylast.md @@ -1,4 +1,5 @@ --- +slug: /ru/sql-reference/aggregate-functions/reference/anylast sidebar_position: 104 --- diff --git a/docs/ru/sql-reference/aggregate-functions/reference/argmax.md b/docs/ru/sql-reference/aggregate-functions/reference/argmax.md index 31da594fc84..15beb0069a9 100644 --- a/docs/ru/sql-reference/aggregate-functions/reference/argmax.md +++ b/docs/ru/sql-reference/aggregate-functions/reference/argmax.md @@ -1,4 +1,5 @@ --- +slug: /ru/sql-reference/aggregate-functions/reference/argmax sidebar_position: 106 --- diff --git a/docs/ru/sql-reference/aggregate-functions/reference/argmin.md b/docs/ru/sql-reference/aggregate-functions/reference/argmin.md index 7b3683a23de..076f3a6985a 100644 --- a/docs/ru/sql-reference/aggregate-functions/reference/argmin.md +++ b/docs/ru/sql-reference/aggregate-functions/reference/argmin.md @@ -1,4 +1,5 @@ --- +slug: /ru/sql-reference/aggregate-functions/reference/argmin sidebar_position: 105 --- diff --git a/docs/ru/sql-reference/aggregate-functions/reference/avg.md b/docs/ru/sql-reference/aggregate-functions/reference/avg.md index f043e4a73d4..92a611daf0a 100644 --- a/docs/ru/sql-reference/aggregate-functions/reference/avg.md +++ b/docs/ru/sql-reference/aggregate-functions/reference/avg.md @@ -1,4 +1,5 @@ --- +slug: /ru/sql-reference/aggregate-functions/reference/avg sidebar_position: 5 --- diff --git a/docs/ru/sql-reference/aggregate-functions/reference/avgweighted.md b/docs/ru/sql-reference/aggregate-functions/reference/avgweighted.md index faa1d60b134..4605d19cc19 100644 --- a/docs/ru/sql-reference/aggregate-functions/reference/avgweighted.md +++ b/docs/ru/sql-reference/aggregate-functions/reference/avgweighted.md @@ -1,4 +1,5 @@ --- +slug: /ru/sql-reference/aggregate-functions/reference/avgweighted sidebar_position: 107 --- diff --git a/docs/ru/sql-reference/aggregate-functions/reference/categoricalinformationvalue.md b/docs/ru/sql-reference/aggregate-functions/reference/categoricalinformationvalue.md deleted file mode 120000 index 22a84e3e5d2..00000000000 --- a/docs/ru/sql-reference/aggregate-functions/reference/categoricalinformationvalue.md +++ /dev/null @@ -1 +0,0 @@ -../../../../en/sql-reference/aggregate-functions/reference/categoricalinformationvalue.md \ No newline at end of file diff --git a/docs/ru/sql-reference/aggregate-functions/reference/categoricalinformationvalue.mdx b/docs/ru/sql-reference/aggregate-functions/reference/categoricalinformationvalue.mdx new file mode 100644 index 00000000000..b8dc4847b26 --- /dev/null +++ b/docs/ru/sql-reference/aggregate-functions/reference/categoricalinformationvalue.mdx @@ -0,0 +1,10 @@ +--- +slug: /ru/sql-reference/aggregate-functions/reference/categoricalinformationvalue +sidebar_position: 250 +sidebad_label: categoricalInformationValue +title: categoricalInformationValue +--- + +import Content from '@site/docs/en/sql-reference/aggregate-functions/reference/categoricalinformationvalue.md'; + + diff --git a/docs/ru/sql-reference/aggregate-functions/reference/corr.md b/docs/ru/sql-reference/aggregate-functions/reference/corr.md index 96e8c17ccca..7871a04a4ff 100644 --- a/docs/ru/sql-reference/aggregate-functions/reference/corr.md +++ b/docs/ru/sql-reference/aggregate-functions/reference/corr.md @@ -1,4 +1,5 @@ --- +slug: /ru/sql-reference/aggregate-functions/reference/corr sidebar_position: 107 --- diff --git a/docs/ru/sql-reference/aggregate-functions/reference/count.md b/docs/ru/sql-reference/aggregate-functions/reference/count.md index 73bab0c56c2..78f37efd0c5 100644 --- a/docs/ru/sql-reference/aggregate-functions/reference/count.md +++ b/docs/ru/sql-reference/aggregate-functions/reference/count.md @@ -1,4 +1,5 @@ --- +slug: /ru/sql-reference/aggregate-functions/reference/count sidebar_position: 1 --- diff --git a/docs/ru/sql-reference/aggregate-functions/reference/covarpop.md b/docs/ru/sql-reference/aggregate-functions/reference/covarpop.md index 02b0f575526..d1c96a9a8e3 100644 --- a/docs/ru/sql-reference/aggregate-functions/reference/covarpop.md +++ b/docs/ru/sql-reference/aggregate-functions/reference/covarpop.md @@ -1,4 +1,5 @@ --- +slug: /ru/sql-reference/aggregate-functions/reference/covarpop sidebar_position: 36 --- diff --git a/docs/ru/sql-reference/aggregate-functions/reference/covarsamp.md b/docs/ru/sql-reference/aggregate-functions/reference/covarsamp.md index 72c7648822c..b04b2c1c89d 100644 --- a/docs/ru/sql-reference/aggregate-functions/reference/covarsamp.md +++ b/docs/ru/sql-reference/aggregate-functions/reference/covarsamp.md @@ -1,4 +1,5 @@ --- +slug: /ru/sql-reference/aggregate-functions/reference/covarsamp sidebar_position: 37 --- diff --git a/docs/ru/sql-reference/aggregate-functions/reference/deltasum.md b/docs/ru/sql-reference/aggregate-functions/reference/deltasum.md index 49edc3932e0..3816fec9dce 100644 --- a/docs/ru/sql-reference/aggregate-functions/reference/deltasum.md +++ b/docs/ru/sql-reference/aggregate-functions/reference/deltasum.md @@ -1,4 +1,5 @@ --- +slug: /ru/sql-reference/aggregate-functions/reference/deltasum sidebar_position: 141 --- diff --git a/docs/ru/sql-reference/aggregate-functions/reference/deltasumtimestamp.md b/docs/ru/sql-reference/aggregate-functions/reference/deltasumtimestamp.md index 3e0908a1de7..7be933d67d7 100644 --- a/docs/ru/sql-reference/aggregate-functions/reference/deltasumtimestamp.md +++ b/docs/ru/sql-reference/aggregate-functions/reference/deltasumtimestamp.md @@ -1,4 +1,5 @@ --- +slug: /ru/sql-reference/aggregate-functions/reference/deltasumtimestamp sidebar_position: 141 --- diff --git a/docs/ru/sql-reference/aggregate-functions/reference/entropy.md b/docs/ru/sql-reference/aggregate-functions/reference/entropy.md index 57d5792fdbe..eb7b11cdfe7 100644 --- a/docs/ru/sql-reference/aggregate-functions/reference/entropy.md +++ b/docs/ru/sql-reference/aggregate-functions/reference/entropy.md @@ -1,4 +1,5 @@ --- +slug: /ru/sql-reference/aggregate-functions/reference/entropy sidebar_position: 302 --- diff --git a/docs/ru/sql-reference/aggregate-functions/reference/exponentialmovingaverage.md b/docs/ru/sql-reference/aggregate-functions/reference/exponentialmovingaverage.md index e072bce1255..863e193b6fd 100644 --- a/docs/ru/sql-reference/aggregate-functions/reference/exponentialmovingaverage.md +++ b/docs/ru/sql-reference/aggregate-functions/reference/exponentialmovingaverage.md @@ -1,4 +1,5 @@ --- +slug: /ru/sql-reference/aggregate-functions/reference/exponentialmovingaverage sidebar_position: 108 --- diff --git a/docs/ru/sql-reference/aggregate-functions/reference/grouparray.md b/docs/ru/sql-reference/aggregate-functions/reference/grouparray.md index 0a0fe9bc341..15c8abdfa49 100644 --- a/docs/ru/sql-reference/aggregate-functions/reference/grouparray.md +++ b/docs/ru/sql-reference/aggregate-functions/reference/grouparray.md @@ -1,4 +1,5 @@ --- +slug: /ru/sql-reference/aggregate-functions/reference/grouparray sidebar_position: 110 --- diff --git a/docs/ru/sql-reference/aggregate-functions/reference/grouparrayinsertat.md b/docs/ru/sql-reference/aggregate-functions/reference/grouparrayinsertat.md index 6279b046272..933ccd90ea9 100644 --- a/docs/ru/sql-reference/aggregate-functions/reference/grouparrayinsertat.md +++ b/docs/ru/sql-reference/aggregate-functions/reference/grouparrayinsertat.md @@ -1,4 +1,5 @@ --- +slug: /ru/sql-reference/aggregate-functions/reference/grouparrayinsertat sidebar_position: 112 --- diff --git a/docs/ru/sql-reference/aggregate-functions/reference/grouparraymovingavg.md b/docs/ru/sql-reference/aggregate-functions/reference/grouparraymovingavg.md index eded1e948d4..03ef5c37561 100644 --- a/docs/ru/sql-reference/aggregate-functions/reference/grouparraymovingavg.md +++ b/docs/ru/sql-reference/aggregate-functions/reference/grouparraymovingavg.md @@ -1,4 +1,5 @@ --- +slug: /ru/sql-reference/aggregate-functions/reference/grouparraymovingavg sidebar_position: 114 --- diff --git a/docs/ru/sql-reference/aggregate-functions/reference/grouparraymovingsum.md b/docs/ru/sql-reference/aggregate-functions/reference/grouparraymovingsum.md index 37223812279..bbe950356ab 100644 --- a/docs/ru/sql-reference/aggregate-functions/reference/grouparraymovingsum.md +++ b/docs/ru/sql-reference/aggregate-functions/reference/grouparraymovingsum.md @@ -1,4 +1,5 @@ --- +slug: /ru/sql-reference/aggregate-functions/reference/grouparraymovingsum sidebar_position: 113 --- diff --git a/docs/ru/sql-reference/aggregate-functions/reference/grouparraysample.md b/docs/ru/sql-reference/aggregate-functions/reference/grouparraysample.md index 106548b5e95..44275062e89 100644 --- a/docs/ru/sql-reference/aggregate-functions/reference/grouparraysample.md +++ b/docs/ru/sql-reference/aggregate-functions/reference/grouparraysample.md @@ -1,4 +1,5 @@ --- +slug: /ru/sql-reference/aggregate-functions/reference/grouparraysample sidebar_position: 114 --- diff --git a/docs/ru/sql-reference/aggregate-functions/reference/groupbitand.md b/docs/ru/sql-reference/aggregate-functions/reference/groupbitand.md index 286fd9a2faf..f338d130314 100644 --- a/docs/ru/sql-reference/aggregate-functions/reference/groupbitand.md +++ b/docs/ru/sql-reference/aggregate-functions/reference/groupbitand.md @@ -1,4 +1,5 @@ --- +slug: /ru/sql-reference/aggregate-functions/reference/groupbitand sidebar_position: 125 --- diff --git a/docs/ru/sql-reference/aggregate-functions/reference/groupbitmap.md b/docs/ru/sql-reference/aggregate-functions/reference/groupbitmap.md index 48586846fc2..395cfbb9925 100644 --- a/docs/ru/sql-reference/aggregate-functions/reference/groupbitmap.md +++ b/docs/ru/sql-reference/aggregate-functions/reference/groupbitmap.md @@ -1,4 +1,5 @@ --- +slug: /ru/sql-reference/aggregate-functions/reference/groupbitmap sidebar_position: 128 --- diff --git a/docs/ru/sql-reference/aggregate-functions/reference/groupbitmapand.md b/docs/ru/sql-reference/aggregate-functions/reference/groupbitmapand.md deleted file mode 120000 index 861ac2c3dd3..00000000000 --- a/docs/ru/sql-reference/aggregate-functions/reference/groupbitmapand.md +++ /dev/null @@ -1 +0,0 @@ -../../../../en/sql-reference/aggregate-functions/reference/groupbitmapand.md \ No newline at end of file diff --git a/docs/ru/sql-reference/aggregate-functions/reference/groupbitmapand.mdx b/docs/ru/sql-reference/aggregate-functions/reference/groupbitmapand.mdx new file mode 100644 index 00000000000..c4f10a74020 --- /dev/null +++ b/docs/ru/sql-reference/aggregate-functions/reference/groupbitmapand.mdx @@ -0,0 +1,10 @@ +--- +slug: /ru/sql-reference/aggregate-functions/reference/groupbitmapand +sidebar_position: 129 +sidebar_label: groupBitmapAnd +title: groupBitmapAnd +--- + +import Content from '@site/docs/en/sql-reference/aggregate-functions/reference/groupbitmapand.md'; + + diff --git a/docs/ru/sql-reference/aggregate-functions/reference/groupbitmapor.md b/docs/ru/sql-reference/aggregate-functions/reference/groupbitmapor.md deleted file mode 120000 index 0fcb851d0a4..00000000000 --- a/docs/ru/sql-reference/aggregate-functions/reference/groupbitmapor.md +++ /dev/null @@ -1 +0,0 @@ -../../../../en/sql-reference/aggregate-functions/reference/groupbitmapor.md \ No newline at end of file diff --git a/docs/ru/sql-reference/aggregate-functions/reference/groupbitmapor.mdx b/docs/ru/sql-reference/aggregate-functions/reference/groupbitmapor.mdx new file mode 100644 index 00000000000..153f22ac496 --- /dev/null +++ b/docs/ru/sql-reference/aggregate-functions/reference/groupbitmapor.mdx @@ -0,0 +1,10 @@ +--- +slug: /ru/sql-reference/aggregate-functions/reference/groupbitmapor +sidebar_position: 130 +sidebar_label: groupBitmapOr +title: groupBitmapOr +--- + +import Content from '@site/docs/en/sql-reference/aggregate-functions/reference/groupbitmapor.md'; + + diff --git a/docs/ru/sql-reference/aggregate-functions/reference/groupbitmapxor.md b/docs/ru/sql-reference/aggregate-functions/reference/groupbitmapxor.md deleted file mode 120000 index 13c79b37200..00000000000 --- a/docs/ru/sql-reference/aggregate-functions/reference/groupbitmapxor.md +++ /dev/null @@ -1 +0,0 @@ -../../../../en/sql-reference/aggregate-functions/reference/groupbitmapxor.md \ No newline at end of file diff --git a/docs/ru/sql-reference/aggregate-functions/reference/groupbitmapxor.mdx b/docs/ru/sql-reference/aggregate-functions/reference/groupbitmapxor.mdx new file mode 100644 index 00000000000..56bcb9e7487 --- /dev/null +++ b/docs/ru/sql-reference/aggregate-functions/reference/groupbitmapxor.mdx @@ -0,0 +1,10 @@ +--- +slug: /ru/sql-reference/aggregate-functions/reference/groupbitmapxor +sidebar_position: 131 +sidebar_label: groupBitmapXor +title: groupBitmapXor +--- + +import Content from '@site/docs/en/sql-reference/aggregate-functions/reference/groupbitmapxor.md'; + + diff --git a/docs/ru/sql-reference/aggregate-functions/reference/groupbitor.md b/docs/ru/sql-reference/aggregate-functions/reference/groupbitor.md index a70c48dedbc..97a25f0e2bc 100644 --- a/docs/ru/sql-reference/aggregate-functions/reference/groupbitor.md +++ b/docs/ru/sql-reference/aggregate-functions/reference/groupbitor.md @@ -1,4 +1,5 @@ --- +slug: /ru/sql-reference/aggregate-functions/reference/groupbitor sidebar_position: 126 --- diff --git a/docs/ru/sql-reference/aggregate-functions/reference/groupbitxor.md b/docs/ru/sql-reference/aggregate-functions/reference/groupbitxor.md index 744eb3d51bf..226edda689d 100644 --- a/docs/ru/sql-reference/aggregate-functions/reference/groupbitxor.md +++ b/docs/ru/sql-reference/aggregate-functions/reference/groupbitxor.md @@ -1,4 +1,5 @@ --- +slug: /ru/sql-reference/aggregate-functions/reference/groupbitxor sidebar_position: 127 --- diff --git a/docs/ru/sql-reference/aggregate-functions/reference/groupuniqarray.md b/docs/ru/sql-reference/aggregate-functions/reference/groupuniqarray.md index 36e2ba9ff24..5cf14351e8e 100644 --- a/docs/ru/sql-reference/aggregate-functions/reference/groupuniqarray.md +++ b/docs/ru/sql-reference/aggregate-functions/reference/groupuniqarray.md @@ -1,4 +1,5 @@ --- +slug: /ru/sql-reference/aggregate-functions/reference/groupuniqarray sidebar_position: 111 --- diff --git a/docs/ru/sql-reference/aggregate-functions/reference/index.md b/docs/ru/sql-reference/aggregate-functions/reference/index.md index fd43c4d49fb..aa75f98666b 100644 --- a/docs/ru/sql-reference/aggregate-functions/reference/index.md +++ b/docs/ru/sql-reference/aggregate-functions/reference/index.md @@ -1,4 +1,5 @@ --- +slug: /ru/sql-reference/aggregate-functions/reference/ sidebar_label: "Справочник" sidebar_position: 36 --- @@ -8,8 +9,8 @@ sidebar_position: 36 Стандартные агрегатные функции: - [count](../../../sql-reference/aggregate-functions/reference/count.md) -- [min](../../../sql-reference/aggregate-functions/reference/min.md) -- [max](../../../sql-reference/aggregate-functions/reference/max.md) +- [min](../../../sql-reference/aggregate-functions/reference/min.mdx) +- [max](../../../sql-reference/aggregate-functions/reference/max.mdx) - [sum](../../../sql-reference/aggregate-functions/reference/sum.md) - [avg](../../../sql-reference/aggregate-functions/reference/avg.md) - [any](../../../sql-reference/aggregate-functions/reference/any.md) diff --git a/docs/ru/sql-reference/aggregate-functions/reference/intervalLengthSum.md b/docs/ru/sql-reference/aggregate-functions/reference/intervalLengthSum.md index 45b41bd13a3..dfe0ea7a4f3 100644 --- a/docs/ru/sql-reference/aggregate-functions/reference/intervalLengthSum.md +++ b/docs/ru/sql-reference/aggregate-functions/reference/intervalLengthSum.md @@ -1,4 +1,5 @@ --- +slug: /ru/sql-reference/aggregate-functions/reference/intervalLengthSum sidebar_position: 146 sidebar_label: intervalLengthSum --- diff --git a/docs/ru/sql-reference/aggregate-functions/reference/kurtpop.md b/docs/ru/sql-reference/aggregate-functions/reference/kurtpop.md index 49cb1120d34..0e090813320 100644 --- a/docs/ru/sql-reference/aggregate-functions/reference/kurtpop.md +++ b/docs/ru/sql-reference/aggregate-functions/reference/kurtpop.md @@ -1,4 +1,5 @@ --- +slug: /ru/sql-reference/aggregate-functions/reference/kurtpop sidebar_position: 153 --- diff --git a/docs/ru/sql-reference/aggregate-functions/reference/kurtsamp.md b/docs/ru/sql-reference/aggregate-functions/reference/kurtsamp.md index 10d68e5188c..83782d69735 100644 --- a/docs/ru/sql-reference/aggregate-functions/reference/kurtsamp.md +++ b/docs/ru/sql-reference/aggregate-functions/reference/kurtsamp.md @@ -1,4 +1,5 @@ --- +slug: /ru/sql-reference/aggregate-functions/reference/kurtsamp sidebar_position: 154 --- diff --git a/docs/ru/sql-reference/aggregate-functions/reference/mannwhitneyutest.md b/docs/ru/sql-reference/aggregate-functions/reference/mannwhitneyutest.md index ee5e1779754..0da00b320a0 100644 --- a/docs/ru/sql-reference/aggregate-functions/reference/mannwhitneyutest.md +++ b/docs/ru/sql-reference/aggregate-functions/reference/mannwhitneyutest.md @@ -1,4 +1,5 @@ --- +slug: /ru/sql-reference/aggregate-functions/reference/mannwhitneyutest sidebar_position: 310 sidebar_label: mannWhitneyUTest --- diff --git a/docs/ru/sql-reference/aggregate-functions/reference/max.md b/docs/ru/sql-reference/aggregate-functions/reference/max.md deleted file mode 120000 index ae47679c80e..00000000000 --- a/docs/ru/sql-reference/aggregate-functions/reference/max.md +++ /dev/null @@ -1 +0,0 @@ -../../../../en/sql-reference/aggregate-functions/reference/max.md \ No newline at end of file diff --git a/docs/ru/sql-reference/aggregate-functions/reference/max.mdx b/docs/ru/sql-reference/aggregate-functions/reference/max.mdx new file mode 100644 index 00000000000..2aa35ba049c --- /dev/null +++ b/docs/ru/sql-reference/aggregate-functions/reference/max.mdx @@ -0,0 +1,10 @@ +--- +slug: /ru/sql-reference/aggregate-functions/reference/max +sidebar_position: 3 +sidebar_label: max +title: max +--- + +import Content from '@site/docs/en/sql-reference/aggregate-functions/reference/max.md'; + + diff --git a/docs/ru/sql-reference/aggregate-functions/reference/maxmap.md b/docs/ru/sql-reference/aggregate-functions/reference/maxmap.md index 8e24e936b0d..8cf7f7c1a86 100644 --- a/docs/ru/sql-reference/aggregate-functions/reference/maxmap.md +++ b/docs/ru/sql-reference/aggregate-functions/reference/maxmap.md @@ -1,4 +1,5 @@ --- +slug: /ru/sql-reference/aggregate-functions/reference/maxmap sidebar_position: 143 --- diff --git a/docs/ru/sql-reference/aggregate-functions/reference/median.md b/docs/ru/sql-reference/aggregate-functions/reference/median.md index 0c4b0db12c5..8b84ffeb65b 100644 --- a/docs/ru/sql-reference/aggregate-functions/reference/median.md +++ b/docs/ru/sql-reference/aggregate-functions/reference/median.md @@ -1,3 +1,6 @@ +--- +slug: /ru/sql-reference/aggregate-functions/reference/median +--- # median {#median} Функции `median*` — синонимы для соответствущих функций `quantile*`. Они вычисляют медиану числовой последовательности. diff --git a/docs/ru/sql-reference/aggregate-functions/reference/min.md b/docs/ru/sql-reference/aggregate-functions/reference/min.md deleted file mode 120000 index 61417b347a8..00000000000 --- a/docs/ru/sql-reference/aggregate-functions/reference/min.md +++ /dev/null @@ -1 +0,0 @@ -../../../../en/sql-reference/aggregate-functions/reference/min.md \ No newline at end of file diff --git a/docs/ru/sql-reference/aggregate-functions/reference/min.mdx b/docs/ru/sql-reference/aggregate-functions/reference/min.mdx new file mode 100644 index 00000000000..8a7f74b7b7a --- /dev/null +++ b/docs/ru/sql-reference/aggregate-functions/reference/min.mdx @@ -0,0 +1,10 @@ +--- +slug: /ru/sql-reference/aggregate-functions/reference/min +sidebar_position: 2 +sidebar_label: min +title: min +--- + +import Content from '@site/docs/en/sql-reference/aggregate-functions/reference/min.md'; + + diff --git a/docs/ru/sql-reference/aggregate-functions/reference/minmap.md b/docs/ru/sql-reference/aggregate-functions/reference/minmap.md index e4474a15ce1..bc4603fcaa7 100644 --- a/docs/ru/sql-reference/aggregate-functions/reference/minmap.md +++ b/docs/ru/sql-reference/aggregate-functions/reference/minmap.md @@ -1,4 +1,5 @@ --- +slug: /ru/sql-reference/aggregate-functions/reference/minmap sidebar_position: 142 --- diff --git a/docs/ru/sql-reference/aggregate-functions/reference/quantile.md b/docs/ru/sql-reference/aggregate-functions/reference/quantile.md index 9ec59a94f07..43e95898f85 100644 --- a/docs/ru/sql-reference/aggregate-functions/reference/quantile.md +++ b/docs/ru/sql-reference/aggregate-functions/reference/quantile.md @@ -1,4 +1,5 @@ --- +slug: /ru/sql-reference/aggregate-functions/reference/quantile sidebar_position: 200 --- diff --git a/docs/ru/sql-reference/aggregate-functions/reference/quantilebfloat16.md b/docs/ru/sql-reference/aggregate-functions/reference/quantilebfloat16.md index 09767be257c..3ca14e4c6d7 100644 --- a/docs/ru/sql-reference/aggregate-functions/reference/quantilebfloat16.md +++ b/docs/ru/sql-reference/aggregate-functions/reference/quantilebfloat16.md @@ -1,4 +1,5 @@ --- +slug: /ru/sql-reference/aggregate-functions/reference/quantilebfloat16 sidebar_position: 209 --- diff --git a/docs/ru/sql-reference/aggregate-functions/reference/quantiledeterministic.md b/docs/ru/sql-reference/aggregate-functions/reference/quantiledeterministic.md index 76d41bfb779..f9da1665a1e 100644 --- a/docs/ru/sql-reference/aggregate-functions/reference/quantiledeterministic.md +++ b/docs/ru/sql-reference/aggregate-functions/reference/quantiledeterministic.md @@ -1,4 +1,5 @@ --- +slug: /ru/sql-reference/aggregate-functions/reference/quantiledeterministic sidebar_position: 206 --- diff --git a/docs/ru/sql-reference/aggregate-functions/reference/quantileexact.md b/docs/ru/sql-reference/aggregate-functions/reference/quantileexact.md index 1ebaa672f17..eea66eb2a7b 100644 --- a/docs/ru/sql-reference/aggregate-functions/reference/quantileexact.md +++ b/docs/ru/sql-reference/aggregate-functions/reference/quantileexact.md @@ -1,4 +1,5 @@ --- +slug: /ru/sql-reference/aggregate-functions/reference/quantileexact sidebar_position: 202 --- diff --git a/docs/ru/sql-reference/aggregate-functions/reference/quantileexactweighted.md b/docs/ru/sql-reference/aggregate-functions/reference/quantileexactweighted.md index b4adf373ef5..b91ef75dfcf 100644 --- a/docs/ru/sql-reference/aggregate-functions/reference/quantileexactweighted.md +++ b/docs/ru/sql-reference/aggregate-functions/reference/quantileexactweighted.md @@ -1,4 +1,5 @@ --- +slug: /ru/sql-reference/aggregate-functions/reference/quantileexactweighted sidebar_position: 203 --- diff --git a/docs/ru/sql-reference/aggregate-functions/reference/quantiles.md b/docs/ru/sql-reference/aggregate-functions/reference/quantiles.md index f40527b15dc..fed0f8b328b 100644 --- a/docs/ru/sql-reference/aggregate-functions/reference/quantiles.md +++ b/docs/ru/sql-reference/aggregate-functions/reference/quantiles.md @@ -1,4 +1,5 @@ --- +slug: /ru/sql-reference/aggregate-functions/reference/quantiles sidebar_position: 201 --- diff --git a/docs/ru/sql-reference/aggregate-functions/reference/quantiletdigest.md b/docs/ru/sql-reference/aggregate-functions/reference/quantiletdigest.md index b3780c5896e..1db2eb6e2ef 100644 --- a/docs/ru/sql-reference/aggregate-functions/reference/quantiletdigest.md +++ b/docs/ru/sql-reference/aggregate-functions/reference/quantiletdigest.md @@ -1,4 +1,5 @@ --- +slug: /ru/sql-reference/aggregate-functions/reference/quantiletdigest sidebar_position: 207 --- diff --git a/docs/ru/sql-reference/aggregate-functions/reference/quantiletdigestweighted.md b/docs/ru/sql-reference/aggregate-functions/reference/quantiletdigestweighted.md index 98e38996e14..ef1abeaa26c 100644 --- a/docs/ru/sql-reference/aggregate-functions/reference/quantiletdigestweighted.md +++ b/docs/ru/sql-reference/aggregate-functions/reference/quantiletdigestweighted.md @@ -1,4 +1,5 @@ --- +slug: /ru/sql-reference/aggregate-functions/reference/quantiletdigestweighted sidebar_position: 208 --- diff --git a/docs/ru/sql-reference/aggregate-functions/reference/quantiletiming.md b/docs/ru/sql-reference/aggregate-functions/reference/quantiletiming.md index 7481b133d17..d9752f757fc 100644 --- a/docs/ru/sql-reference/aggregate-functions/reference/quantiletiming.md +++ b/docs/ru/sql-reference/aggregate-functions/reference/quantiletiming.md @@ -1,4 +1,5 @@ --- +slug: /ru/sql-reference/aggregate-functions/reference/quantiletiming sidebar_position: 204 --- diff --git a/docs/ru/sql-reference/aggregate-functions/reference/quantiletimingweighted.md b/docs/ru/sql-reference/aggregate-functions/reference/quantiletimingweighted.md index dd928bf9ae2..7cd97ddf239 100644 --- a/docs/ru/sql-reference/aggregate-functions/reference/quantiletimingweighted.md +++ b/docs/ru/sql-reference/aggregate-functions/reference/quantiletimingweighted.md @@ -1,4 +1,5 @@ --- +slug: /ru/sql-reference/aggregate-functions/reference/quantiletimingweighted sidebar_position: 205 --- diff --git a/docs/ru/sql-reference/aggregate-functions/reference/rankCorr.md b/docs/ru/sql-reference/aggregate-functions/reference/rankCorr.md index a5e75302be2..a00ed391e96 100644 --- a/docs/ru/sql-reference/aggregate-functions/reference/rankCorr.md +++ b/docs/ru/sql-reference/aggregate-functions/reference/rankCorr.md @@ -1,4 +1,5 @@ --- +slug: /ru/sql-reference/aggregate-functions/reference/rankCorr sidebar_position: 145 --- diff --git a/docs/ru/sql-reference/aggregate-functions/reference/simplelinearregression.md b/docs/ru/sql-reference/aggregate-functions/reference/simplelinearregression.md index f60f5ca7d60..2b5158d4270 100644 --- a/docs/ru/sql-reference/aggregate-functions/reference/simplelinearregression.md +++ b/docs/ru/sql-reference/aggregate-functions/reference/simplelinearregression.md @@ -1,4 +1,5 @@ --- +slug: /ru/sql-reference/aggregate-functions/reference/simplelinearregression sidebar_position: 220 --- diff --git a/docs/ru/sql-reference/aggregate-functions/reference/skewpop.md b/docs/ru/sql-reference/aggregate-functions/reference/skewpop.md index ceb13e3a1fb..6b4d5633458 100644 --- a/docs/ru/sql-reference/aggregate-functions/reference/skewpop.md +++ b/docs/ru/sql-reference/aggregate-functions/reference/skewpop.md @@ -1,4 +1,5 @@ --- +slug: /ru/sql-reference/aggregate-functions/reference/skewpop sidebar_position: 150 --- diff --git a/docs/ru/sql-reference/aggregate-functions/reference/skewsamp.md b/docs/ru/sql-reference/aggregate-functions/reference/skewsamp.md index e8fd74854a7..a7037dd65bb 100644 --- a/docs/ru/sql-reference/aggregate-functions/reference/skewsamp.md +++ b/docs/ru/sql-reference/aggregate-functions/reference/skewsamp.md @@ -1,4 +1,5 @@ --- +slug: /ru/sql-reference/aggregate-functions/reference/skewsamp sidebar_position: 151 --- diff --git a/docs/ru/sql-reference/aggregate-functions/reference/sparkbar.md b/docs/ru/sql-reference/aggregate-functions/reference/sparkbar.md index 5ed7f7b11d8..7a9fc033542 100644 --- a/docs/ru/sql-reference/aggregate-functions/reference/sparkbar.md +++ b/docs/ru/sql-reference/aggregate-functions/reference/sparkbar.md @@ -1,4 +1,5 @@ --- +slug: /ru/sql-reference/aggregate-functions/reference/sparkbar sidebar_position: 311 sidebar_label: sparkbar --- diff --git a/docs/ru/sql-reference/aggregate-functions/reference/stddevpop.md b/docs/ru/sql-reference/aggregate-functions/reference/stddevpop.md index 20ffb4c0dc2..bb6b43e716c 100644 --- a/docs/ru/sql-reference/aggregate-functions/reference/stddevpop.md +++ b/docs/ru/sql-reference/aggregate-functions/reference/stddevpop.md @@ -1,4 +1,5 @@ --- +slug: /ru/sql-reference/aggregate-functions/reference/stddevpop sidebar_position: 30 --- diff --git a/docs/ru/sql-reference/aggregate-functions/reference/stddevsamp.md b/docs/ru/sql-reference/aggregate-functions/reference/stddevsamp.md index c1a652541a4..c8048f2d3dc 100644 --- a/docs/ru/sql-reference/aggregate-functions/reference/stddevsamp.md +++ b/docs/ru/sql-reference/aggregate-functions/reference/stddevsamp.md @@ -1,4 +1,5 @@ --- +slug: /ru/sql-reference/aggregate-functions/reference/stddevsamp sidebar_position: 31 --- diff --git a/docs/ru/sql-reference/aggregate-functions/reference/stochasticlinearregression.md b/docs/ru/sql-reference/aggregate-functions/reference/stochasticlinearregression.md index 6c92cd73ad6..b9672be7ab9 100644 --- a/docs/ru/sql-reference/aggregate-functions/reference/stochasticlinearregression.md +++ b/docs/ru/sql-reference/aggregate-functions/reference/stochasticlinearregression.md @@ -1,4 +1,5 @@ --- +slug: /ru/sql-reference/aggregate-functions/reference/stochasticlinearregression sidebar_position: 221 --- diff --git a/docs/ru/sql-reference/aggregate-functions/reference/stochasticlogisticregression.md b/docs/ru/sql-reference/aggregate-functions/reference/stochasticlogisticregression.md index 3e48427ce12..7f676f01c4e 100644 --- a/docs/ru/sql-reference/aggregate-functions/reference/stochasticlogisticregression.md +++ b/docs/ru/sql-reference/aggregate-functions/reference/stochasticlogisticregression.md @@ -1,4 +1,5 @@ --- +slug: /ru/sql-reference/aggregate-functions/reference/stochasticlogisticregression sidebar_position: 222 --- diff --git a/docs/ru/sql-reference/aggregate-functions/reference/studentttest.md b/docs/ru/sql-reference/aggregate-functions/reference/studentttest.md index 3eb54dd4b09..017cd598b02 100644 --- a/docs/ru/sql-reference/aggregate-functions/reference/studentttest.md +++ b/docs/ru/sql-reference/aggregate-functions/reference/studentttest.md @@ -1,4 +1,5 @@ --- +slug: /ru/sql-reference/aggregate-functions/reference/studentttest sidebar_position: 300 sidebar_label: studentTTest --- diff --git a/docs/ru/sql-reference/aggregate-functions/reference/sum.md b/docs/ru/sql-reference/aggregate-functions/reference/sum.md index e4fdf466ee7..0ab1370a3d9 100644 --- a/docs/ru/sql-reference/aggregate-functions/reference/sum.md +++ b/docs/ru/sql-reference/aggregate-functions/reference/sum.md @@ -1,4 +1,5 @@ --- +slug: /ru/sql-reference/aggregate-functions/reference/sum sidebar_position: 4 --- diff --git a/docs/ru/sql-reference/aggregate-functions/reference/sumcount.md b/docs/ru/sql-reference/aggregate-functions/reference/sumcount.md index 69108bfc51d..f62317b865e 100644 --- a/docs/ru/sql-reference/aggregate-functions/reference/sumcount.md +++ b/docs/ru/sql-reference/aggregate-functions/reference/sumcount.md @@ -1,4 +1,5 @@ --- +slug: /ru/sql-reference/aggregate-functions/reference/sumcount sidebar_position: 144 --- diff --git a/docs/ru/sql-reference/aggregate-functions/reference/sumkahan.md b/docs/ru/sql-reference/aggregate-functions/reference/sumkahan.md index f165c2a5d1c..f92fddf23d9 100644 --- a/docs/ru/sql-reference/aggregate-functions/reference/sumkahan.md +++ b/docs/ru/sql-reference/aggregate-functions/reference/sumkahan.md @@ -1,4 +1,5 @@ --- +slug: /ru/sql-reference/aggregate-functions/reference/sumkahan sidebar_position: 145 --- diff --git a/docs/ru/sql-reference/aggregate-functions/reference/summap.md b/docs/ru/sql-reference/aggregate-functions/reference/summap.md index 17728e47bb1..fb25698f69e 100644 --- a/docs/ru/sql-reference/aggregate-functions/reference/summap.md +++ b/docs/ru/sql-reference/aggregate-functions/reference/summap.md @@ -1,4 +1,5 @@ --- +slug: /ru/sql-reference/aggregate-functions/reference/summap sidebar_position: 141 --- diff --git a/docs/ru/sql-reference/aggregate-functions/reference/sumwithoverflow.md b/docs/ru/sql-reference/aggregate-functions/reference/sumwithoverflow.md index ffaca94512a..da9ab7912ed 100644 --- a/docs/ru/sql-reference/aggregate-functions/reference/sumwithoverflow.md +++ b/docs/ru/sql-reference/aggregate-functions/reference/sumwithoverflow.md @@ -1,4 +1,5 @@ --- +slug: /ru/sql-reference/aggregate-functions/reference/sumwithoverflow sidebar_position: 140 --- diff --git a/docs/ru/sql-reference/aggregate-functions/reference/topk.md b/docs/ru/sql-reference/aggregate-functions/reference/topk.md index 972f7adc924..270db6a3ebb 100644 --- a/docs/ru/sql-reference/aggregate-functions/reference/topk.md +++ b/docs/ru/sql-reference/aggregate-functions/reference/topk.md @@ -1,4 +1,5 @@ --- +slug: /ru/sql-reference/aggregate-functions/reference/topk sidebar_position: 108 --- diff --git a/docs/ru/sql-reference/aggregate-functions/reference/topkweighted.md b/docs/ru/sql-reference/aggregate-functions/reference/topkweighted.md index 433167bbe5f..29cb2e01f7b 100644 --- a/docs/ru/sql-reference/aggregate-functions/reference/topkweighted.md +++ b/docs/ru/sql-reference/aggregate-functions/reference/topkweighted.md @@ -1,4 +1,5 @@ --- +slug: /ru/sql-reference/aggregate-functions/reference/topkweighted sidebar_position: 109 --- diff --git a/docs/ru/sql-reference/aggregate-functions/reference/uniq.md b/docs/ru/sql-reference/aggregate-functions/reference/uniq.md index 56a5ea33d67..3e933307c03 100644 --- a/docs/ru/sql-reference/aggregate-functions/reference/uniq.md +++ b/docs/ru/sql-reference/aggregate-functions/reference/uniq.md @@ -1,4 +1,5 @@ --- +slug: /ru/sql-reference/aggregate-functions/reference/uniq sidebar_position: 190 --- diff --git a/docs/ru/sql-reference/aggregate-functions/reference/uniqcombined.md b/docs/ru/sql-reference/aggregate-functions/reference/uniqcombined.md index fff6e3504f6..1fa30876391 100644 --- a/docs/ru/sql-reference/aggregate-functions/reference/uniqcombined.md +++ b/docs/ru/sql-reference/aggregate-functions/reference/uniqcombined.md @@ -1,4 +1,5 @@ --- +slug: /ru/sql-reference/aggregate-functions/reference/uniqcombined sidebar_position: 192 --- diff --git a/docs/ru/sql-reference/aggregate-functions/reference/uniqcombined64.md b/docs/ru/sql-reference/aggregate-functions/reference/uniqcombined64.md index 7fb9e66cd57..fd0ceae754f 100644 --- a/docs/ru/sql-reference/aggregate-functions/reference/uniqcombined64.md +++ b/docs/ru/sql-reference/aggregate-functions/reference/uniqcombined64.md @@ -1,4 +1,5 @@ --- +slug: /ru/sql-reference/aggregate-functions/reference/uniqcombined64 sidebar_position: 193 --- diff --git a/docs/ru/sql-reference/aggregate-functions/reference/uniqexact.md b/docs/ru/sql-reference/aggregate-functions/reference/uniqexact.md index b07754dfe2d..6f97b6b8346 100644 --- a/docs/ru/sql-reference/aggregate-functions/reference/uniqexact.md +++ b/docs/ru/sql-reference/aggregate-functions/reference/uniqexact.md @@ -1,4 +1,5 @@ --- +slug: /ru/sql-reference/aggregate-functions/reference/uniqexact sidebar_position: 191 --- diff --git a/docs/ru/sql-reference/aggregate-functions/reference/uniqhll12.md b/docs/ru/sql-reference/aggregate-functions/reference/uniqhll12.md index efe7c035ec6..471848f75a4 100644 --- a/docs/ru/sql-reference/aggregate-functions/reference/uniqhll12.md +++ b/docs/ru/sql-reference/aggregate-functions/reference/uniqhll12.md @@ -1,4 +1,5 @@ --- +slug: /ru/sql-reference/aggregate-functions/reference/uniqhll12 sidebar_position: 194 --- diff --git a/docs/ru/sql-reference/aggregate-functions/reference/uniqthetasketch.md b/docs/ru/sql-reference/aggregate-functions/reference/uniqthetasketch.md deleted file mode 120000 index ac61a28677f..00000000000 --- a/docs/ru/sql-reference/aggregate-functions/reference/uniqthetasketch.md +++ /dev/null @@ -1 +0,0 @@ -../../../../en/sql-reference/aggregate-functions/reference/uniqthetasketch.md \ No newline at end of file diff --git a/docs/ru/sql-reference/aggregate-functions/reference/uniqthetasketch.mdx b/docs/ru/sql-reference/aggregate-functions/reference/uniqthetasketch.mdx new file mode 100644 index 00000000000..f420cf63bce --- /dev/null +++ b/docs/ru/sql-reference/aggregate-functions/reference/uniqthetasketch.mdx @@ -0,0 +1,10 @@ +--- +slug: /ru/sql-reference/aggregate-functions/reference/uniqthetasketch +sidebar_position: 195 +sidebar_label: uniqTheta +title: uniqTheta +--- + +import Content from '@site/docs/en/sql-reference/aggregate-functions/reference/uniqthetasketch.md'; + + diff --git a/docs/ru/sql-reference/aggregate-functions/reference/varpop.md b/docs/ru/sql-reference/aggregate-functions/reference/varpop.md index 41ff49487c9..ba1719151f2 100644 --- a/docs/ru/sql-reference/aggregate-functions/reference/varpop.md +++ b/docs/ru/sql-reference/aggregate-functions/reference/varpop.md @@ -1,4 +1,5 @@ --- +slug: /ru/sql-reference/aggregate-functions/reference/varpop sidebar_position: 32 --- diff --git a/docs/ru/sql-reference/aggregate-functions/reference/varsamp.md b/docs/ru/sql-reference/aggregate-functions/reference/varsamp.md index 644c48446da..d81b94d1b13 100644 --- a/docs/ru/sql-reference/aggregate-functions/reference/varsamp.md +++ b/docs/ru/sql-reference/aggregate-functions/reference/varsamp.md @@ -1,4 +1,5 @@ --- +slug: /ru/sql-reference/aggregate-functions/reference/varsamp sidebar_position: 33 --- diff --git a/docs/ru/sql-reference/aggregate-functions/reference/welchttest.md b/docs/ru/sql-reference/aggregate-functions/reference/welchttest.md index f6b26a58e69..6953750edb4 100644 --- a/docs/ru/sql-reference/aggregate-functions/reference/welchttest.md +++ b/docs/ru/sql-reference/aggregate-functions/reference/welchttest.md @@ -1,4 +1,5 @@ --- +slug: /ru/sql-reference/aggregate-functions/reference/welchttest sidebar_position: 301 sidebar_label: welchTTest --- diff --git a/docs/ru/sql-reference/ansi.md b/docs/ru/sql-reference/ansi.md deleted file mode 120000 index 3cf6bffed67..00000000000 --- a/docs/ru/sql-reference/ansi.md +++ /dev/null @@ -1 +0,0 @@ -../../en/sql-reference/ansi.md \ No newline at end of file diff --git a/docs/ru/sql-reference/ansi.mdx b/docs/ru/sql-reference/ansi.mdx new file mode 100644 index 00000000000..3fcdeeda773 --- /dev/null +++ b/docs/ru/sql-reference/ansi.mdx @@ -0,0 +1,10 @@ +--- +slug: /ru/sql-reference/ansi +sidebar_position: 40 +sidebar_label: ANSI Compatibility +title: "ANSI Compatibility" +--- + +import Content from '@site/docs/en/sql-reference/ansi.md'; + + diff --git a/docs/ru/sql-reference/data-types/aggregatefunction.md b/docs/ru/sql-reference/data-types/aggregatefunction.md index f177f1a98b7..21b452acb1d 100644 --- a/docs/ru/sql-reference/data-types/aggregatefunction.md +++ b/docs/ru/sql-reference/data-types/aggregatefunction.md @@ -1,4 +1,5 @@ --- +slug: /ru/sql-reference/data-types/aggregatefunction sidebar_position: 53 sidebar_label: AggregateFunction --- diff --git a/docs/ru/sql-reference/data-types/array.md b/docs/ru/sql-reference/data-types/array.md index 27f4bf94a64..0b1d0f6aa95 100644 --- a/docs/ru/sql-reference/data-types/array.md +++ b/docs/ru/sql-reference/data-types/array.md @@ -1,4 +1,5 @@ --- +slug: /ru/sql-reference/data-types/array sidebar_position: 52 sidebar_label: Array(T) --- diff --git a/docs/ru/sql-reference/data-types/boolean.md b/docs/ru/sql-reference/data-types/boolean.md index 621d3550eaa..c63b7f41118 100644 --- a/docs/ru/sql-reference/data-types/boolean.md +++ b/docs/ru/sql-reference/data-types/boolean.md @@ -1,4 +1,5 @@ --- +slug: /ru/sql-reference/data-types/boolean sidebar_position: 43 sidebar_label: "Булевы значения" --- diff --git a/docs/ru/sql-reference/data-types/date.md b/docs/ru/sql-reference/data-types/date.md index 46f73bc8cb7..7254b82f461 100644 --- a/docs/ru/sql-reference/data-types/date.md +++ b/docs/ru/sql-reference/data-types/date.md @@ -1,11 +1,12 @@ --- +slug: /ru/sql-reference/data-types/date sidebar_position: 47 sidebar_label: Date --- # Date {#data-type-date} -Дата. Хранится в двух байтах в виде (беззнакового) числа дней, прошедших от 1970-01-01. Позволяет хранить значения от чуть больше, чем начала unix-эпохи до верхнего порога, определяющегося константой на этапе компиляции (сейчас - до 2106 года, последний полностью поддерживаемый год - 2105). +Дата. Хранится в двух байтах в виде (беззнакового) числа дней, прошедших от 1970-01-01. Позволяет хранить значения от чуть больше, чем начала unix-эпохи до верхнего порога, определяющегося константой на этапе компиляции (сейчас - до 2149 года, последний полностью поддерживаемый год - 2148). Диапазон значений: \[1970-01-01, 2149-06-06\]. diff --git a/docs/ru/sql-reference/data-types/date32.md b/docs/ru/sql-reference/data-types/date32.md index 1fc5ff6e5e2..fcb7d688c20 100644 --- a/docs/ru/sql-reference/data-types/date32.md +++ b/docs/ru/sql-reference/data-types/date32.md @@ -1,4 +1,5 @@ --- +slug: /ru/sql-reference/data-types/date32 sidebar_position: 48 sidebar_label: Date32 --- diff --git a/docs/ru/sql-reference/data-types/datetime.md b/docs/ru/sql-reference/data-types/datetime.md index eeffa8501b2..6fb58c447fd 100644 --- a/docs/ru/sql-reference/data-types/datetime.md +++ b/docs/ru/sql-reference/data-types/datetime.md @@ -1,4 +1,5 @@ --- +slug: /ru/sql-reference/data-types/datetime sidebar_position: 48 sidebar_label: DateTime --- diff --git a/docs/ru/sql-reference/data-types/datetime64.md b/docs/ru/sql-reference/data-types/datetime64.md index 0473d8256e9..da2f81f4828 100644 --- a/docs/ru/sql-reference/data-types/datetime64.md +++ b/docs/ru/sql-reference/data-types/datetime64.md @@ -1,4 +1,5 @@ --- +slug: /ru/sql-reference/data-types/datetime64 sidebar_position: 49 sidebar_label: DateTime64 --- diff --git a/docs/ru/sql-reference/data-types/decimal.md b/docs/ru/sql-reference/data-types/decimal.md index 9bb03d895df..d3ae0cb5be6 100644 --- a/docs/ru/sql-reference/data-types/decimal.md +++ b/docs/ru/sql-reference/data-types/decimal.md @@ -1,4 +1,5 @@ --- +slug: /ru/sql-reference/data-types/decimal sidebar_position: 42 sidebar_label: Decimal --- diff --git a/docs/ru/sql-reference/data-types/domains/index.md b/docs/ru/sql-reference/data-types/domains/index.md index 24937fb99f0..68723b18e2a 100644 --- a/docs/ru/sql-reference/data-types/domains/index.md +++ b/docs/ru/sql-reference/data-types/domains/index.md @@ -1,4 +1,5 @@ --- +slug: /ru/sql-reference/data-types/domains/ sidebar_label: "Домены" sidebar_position: 56 --- diff --git a/docs/ru/sql-reference/data-types/domains/ipv4.md b/docs/ru/sql-reference/data-types/domains/ipv4.md index 786f77d649d..028b6d9318e 100644 --- a/docs/ru/sql-reference/data-types/domains/ipv4.md +++ b/docs/ru/sql-reference/data-types/domains/ipv4.md @@ -1,4 +1,5 @@ --- +slug: /ru/sql-reference/data-types/domains/ipv4 sidebar_position: 59 sidebar_label: IPv4 --- diff --git a/docs/ru/sql-reference/data-types/domains/ipv6.md b/docs/ru/sql-reference/data-types/domains/ipv6.md index c6d590d8f87..27cca3a46f1 100644 --- a/docs/ru/sql-reference/data-types/domains/ipv6.md +++ b/docs/ru/sql-reference/data-types/domains/ipv6.md @@ -1,4 +1,5 @@ --- +slug: /ru/sql-reference/data-types/domains/ipv6 sidebar_position: 60 sidebar_label: IPv6 --- diff --git a/docs/ru/sql-reference/data-types/enum.md b/docs/ru/sql-reference/data-types/enum.md index 82736aa221c..2b24b9b183c 100644 --- a/docs/ru/sql-reference/data-types/enum.md +++ b/docs/ru/sql-reference/data-types/enum.md @@ -1,4 +1,5 @@ --- +slug: /ru/sql-reference/data-types/enum sidebar_position: 50 sidebar_label: Enum --- diff --git a/docs/ru/sql-reference/data-types/fixedstring.md b/docs/ru/sql-reference/data-types/fixedstring.md index fcea42c9e38..0752180e6c9 100644 --- a/docs/ru/sql-reference/data-types/fixedstring.md +++ b/docs/ru/sql-reference/data-types/fixedstring.md @@ -1,4 +1,5 @@ --- +slug: /ru/sql-reference/data-types/fixedstring sidebar_position: 45 sidebar_label: FixedString(N) --- diff --git a/docs/ru/sql-reference/data-types/float.md b/docs/ru/sql-reference/data-types/float.md index c234369c60f..5c8c703d166 100644 --- a/docs/ru/sql-reference/data-types/float.md +++ b/docs/ru/sql-reference/data-types/float.md @@ -1,4 +1,5 @@ --- +slug: /ru/sql-reference/data-types/float sidebar_position: 41 sidebar_label: Float32, Float64 --- diff --git a/docs/ru/sql-reference/data-types/geo.md b/docs/ru/sql-reference/data-types/geo.md index 9ff91d373b0..954886e4dda 100644 --- a/docs/ru/sql-reference/data-types/geo.md +++ b/docs/ru/sql-reference/data-types/geo.md @@ -1,4 +1,5 @@ --- +slug: /ru/sql-reference/data-types/geo sidebar_position: 62 sidebar_label: Географические структуры --- diff --git a/docs/ru/sql-reference/data-types/index.md b/docs/ru/sql-reference/data-types/index.md index 45b30e256eb..ce1d19e8229 100644 --- a/docs/ru/sql-reference/data-types/index.md +++ b/docs/ru/sql-reference/data-types/index.md @@ -1,4 +1,5 @@ --- +slug: /ru/sql-reference/data-types/ sidebar_label: "Типы данных" sidebar_position: 37 --- diff --git a/docs/ru/sql-reference/data-types/int-uint.md b/docs/ru/sql-reference/data-types/int-uint.md index 06d6727afa4..9c30214a85e 100644 --- a/docs/ru/sql-reference/data-types/int-uint.md +++ b/docs/ru/sql-reference/data-types/int-uint.md @@ -1,4 +1,5 @@ --- +slug: /ru/sql-reference/data-types/int-uint sidebar_position: 40 sidebar_label: UInt8, UInt16, UInt32, UInt64, Int8, Int16, Int32, Int64 --- diff --git a/docs/ru/sql-reference/data-types/lowcardinality.md b/docs/ru/sql-reference/data-types/lowcardinality.md index 2b9abd0ab2d..b6d16f86d8b 100644 --- a/docs/ru/sql-reference/data-types/lowcardinality.md +++ b/docs/ru/sql-reference/data-types/lowcardinality.md @@ -1,4 +1,5 @@ --- +slug: /ru/sql-reference/data-types/lowcardinality sidebar_position: 51 sidebar_label: LowCardinality --- diff --git a/docs/ru/sql-reference/data-types/map.md b/docs/ru/sql-reference/data-types/map.md index 010e27d8477..ddf9cca43bb 100644 --- a/docs/ru/sql-reference/data-types/map.md +++ b/docs/ru/sql-reference/data-types/map.md @@ -1,4 +1,5 @@ --- +slug: /ru/sql-reference/data-types/map sidebar_position: 65 sidebar_label: Map(key, value) --- diff --git a/docs/ru/sql-reference/data-types/multiword-types.md b/docs/ru/sql-reference/data-types/multiword-types.md index 60e73263199..22d118bbef8 100644 --- a/docs/ru/sql-reference/data-types/multiword-types.md +++ b/docs/ru/sql-reference/data-types/multiword-types.md @@ -1,4 +1,5 @@ --- +slug: /ru/sql-reference/data-types/multiword-types sidebar_position: 61 sidebar_label: Составные типы --- diff --git a/docs/ru/sql-reference/data-types/nested-data-structures/index.md b/docs/ru/sql-reference/data-types/nested-data-structures/index.md index 9a7f35e2f19..50c0d1b54cf 100644 --- a/docs/ru/sql-reference/data-types/nested-data-structures/index.md +++ b/docs/ru/sql-reference/data-types/nested-data-structures/index.md @@ -1,4 +1,5 @@ --- +slug: /ru/sql-reference/data-types/nested-data-structures/ sidebar_label: "Вложенные структуры данных" sidebar_position: 54 --- diff --git a/docs/ru/sql-reference/data-types/nested-data-structures/nested.md b/docs/ru/sql-reference/data-types/nested-data-structures/nested.md index db957e57502..4ec8333d563 100644 --- a/docs/ru/sql-reference/data-types/nested-data-structures/nested.md +++ b/docs/ru/sql-reference/data-types/nested-data-structures/nested.md @@ -1,3 +1,6 @@ +--- +slug: /ru/sql-reference/data-types/nested-data-structures/nested +--- # Nested {#nested} ## Nested(Name1 Type1, Name2 Type2, …) {#nestedname1-type1-name2-type2} diff --git a/docs/ru/sql-reference/data-types/nullable.md b/docs/ru/sql-reference/data-types/nullable.md index 31a3674af6b..f6d6bb1f9c6 100644 --- a/docs/ru/sql-reference/data-types/nullable.md +++ b/docs/ru/sql-reference/data-types/nullable.md @@ -1,4 +1,5 @@ --- +slug: /ru/sql-reference/data-types/nullable sidebar_position: 55 sidebar_label: Nullable --- diff --git a/docs/ru/sql-reference/data-types/simpleaggregatefunction.md b/docs/ru/sql-reference/data-types/simpleaggregatefunction.md index 773cbe16b99..41f073f8153 100644 --- a/docs/ru/sql-reference/data-types/simpleaggregatefunction.md +++ b/docs/ru/sql-reference/data-types/simpleaggregatefunction.md @@ -1,3 +1,6 @@ +--- +slug: /ru/sql-reference/data-types/simpleaggregatefunction +--- # SimpleAggregateFunction(func, type) {#data-type-simpleaggregatefunction} Хранит только текущее значение агрегатной функции и не сохраняет ее полное состояние, как это делает [`AggregateFunction`](../../sql-reference/data-types/aggregatefunction.md). Такая оптимизация может быть применена к функциям, которые обладают следующим свойством: результат выполнения функции `f` к набору строк `S1 UNION ALL S2` может быть получен путем выполнения `f` к отдельным частям набора строк, @@ -9,8 +12,8 @@ - [`any`](../../sql-reference/aggregate-functions/reference/any.md#agg_function-any) - [`anyLast`](../../sql-reference/aggregate-functions/reference/anylast.md#anylastx) -- [`min`](../../sql-reference/aggregate-functions/reference/min.md#agg_function-min) -- [`max`](../../sql-reference/aggregate-functions/reference/max.md#agg_function-max) +- [`min`](../../sql-reference/aggregate-functions/reference/min.mdx#agg_function-min) +- [`max`](../../sql-reference/aggregate-functions/reference/max.mdx#agg_function-max) - [`sum`](../../sql-reference/aggregate-functions/reference/sum.md#agg_function-sum) - [`sumWithOverflow`](../../sql-reference/aggregate-functions/reference/sumwithoverflow.md#sumwithoverflowx) - [`groupBitAnd`](../../sql-reference/aggregate-functions/reference/groupbitand.md#groupbitand) diff --git a/docs/ru/sql-reference/data-types/special-data-types/expression.md b/docs/ru/sql-reference/data-types/special-data-types/expression.md index f2bc08872d2..379aa4cb0bf 100644 --- a/docs/ru/sql-reference/data-types/special-data-types/expression.md +++ b/docs/ru/sql-reference/data-types/special-data-types/expression.md @@ -1,4 +1,5 @@ --- +slug: /ru/sql-reference/data-types/special-data-types/expression sidebar_position: 58 sidebar_label: Expression --- diff --git a/docs/ru/sql-reference/data-types/special-data-types/index.md b/docs/ru/sql-reference/data-types/special-data-types/index.md index d266ac644b6..4339df6148d 100644 --- a/docs/ru/sql-reference/data-types/special-data-types/index.md +++ b/docs/ru/sql-reference/data-types/special-data-types/index.md @@ -1,4 +1,5 @@ --- +slug: /ru/sql-reference/data-types/special-data-types/ sidebar_label: "Служебные типы данных" sidebar_position: 55 --- diff --git a/docs/ru/sql-reference/data-types/special-data-types/interval.md b/docs/ru/sql-reference/data-types/special-data-types/interval.md index d557cc4aac3..856275ed8f2 100644 --- a/docs/ru/sql-reference/data-types/special-data-types/interval.md +++ b/docs/ru/sql-reference/data-types/special-data-types/interval.md @@ -1,4 +1,5 @@ --- +slug: /ru/sql-reference/data-types/special-data-types/interval sidebar_position: 61 sidebar_label: Interval --- diff --git a/docs/ru/sql-reference/data-types/special-data-types/nothing.md b/docs/ru/sql-reference/data-types/special-data-types/nothing.md index 0448223d6b9..65388f8af75 100644 --- a/docs/ru/sql-reference/data-types/special-data-types/nothing.md +++ b/docs/ru/sql-reference/data-types/special-data-types/nothing.md @@ -1,4 +1,5 @@ --- +slug: /ru/sql-reference/data-types/special-data-types/nothing sidebar_position: 60 sidebar_label: Nothing --- diff --git a/docs/ru/sql-reference/data-types/special-data-types/set.md b/docs/ru/sql-reference/data-types/special-data-types/set.md index 13d4effaf95..049d512a6bc 100644 --- a/docs/ru/sql-reference/data-types/special-data-types/set.md +++ b/docs/ru/sql-reference/data-types/special-data-types/set.md @@ -1,4 +1,5 @@ --- +slug: /ru/sql-reference/data-types/special-data-types/set sidebar_position: 59 sidebar_label: Set --- diff --git a/docs/ru/sql-reference/data-types/string.md b/docs/ru/sql-reference/data-types/string.md index 3f567f6c86a..2032ee9ff09 100644 --- a/docs/ru/sql-reference/data-types/string.md +++ b/docs/ru/sql-reference/data-types/string.md @@ -1,4 +1,5 @@ --- +slug: /ru/sql-reference/data-types/string sidebar_position: 44 sidebar_label: String --- diff --git a/docs/ru/sql-reference/data-types/tuple.md b/docs/ru/sql-reference/data-types/tuple.md index d1f9af8d6ed..76370d01c0d 100644 --- a/docs/ru/sql-reference/data-types/tuple.md +++ b/docs/ru/sql-reference/data-types/tuple.md @@ -1,4 +1,5 @@ --- +slug: /ru/sql-reference/data-types/tuple sidebar_position: 54 sidebar_label: Tuple(T1, T2, ...) --- diff --git a/docs/ru/sql-reference/data-types/uuid.md b/docs/ru/sql-reference/data-types/uuid.md index 57e3cb77ee0..f83c72489ca 100644 --- a/docs/ru/sql-reference/data-types/uuid.md +++ b/docs/ru/sql-reference/data-types/uuid.md @@ -1,4 +1,5 @@ --- +slug: /ru/sql-reference/data-types/uuid sidebar_position: 46 sidebar_label: UUID --- diff --git a/docs/ru/sql-reference/dictionaries/external-dictionaries/external-dicts-dict-hierarchical.md b/docs/ru/sql-reference/dictionaries/external-dictionaries/external-dicts-dict-hierarchical.md index e19f9d6745d..acdccdf032a 100644 --- a/docs/ru/sql-reference/dictionaries/external-dictionaries/external-dicts-dict-hierarchical.md +++ b/docs/ru/sql-reference/dictionaries/external-dictionaries/external-dicts-dict-hierarchical.md @@ -1,4 +1,5 @@ --- +slug: /ru/sql-reference/dictionaries/external-dictionaries/external-dicts-dict-hierarchical sidebar_position: 45 sidebar_label: "Иерархические словари" --- diff --git a/docs/ru/sql-reference/dictionaries/external-dictionaries/external-dicts-dict-layout.md b/docs/ru/sql-reference/dictionaries/external-dictionaries/external-dicts-dict-layout.md index 35c8cc0842a..26bd379b8f3 100644 --- a/docs/ru/sql-reference/dictionaries/external-dictionaries/external-dicts-dict-layout.md +++ b/docs/ru/sql-reference/dictionaries/external-dictionaries/external-dicts-dict-layout.md @@ -1,4 +1,5 @@ --- +slug: /ru/sql-reference/dictionaries/external-dictionaries/external-dicts-dict-layout sidebar_position: 41 sidebar_label: "Хранение словарей в памяти" --- @@ -444,6 +445,7 @@ LAYOUT(CACHE(SIZE_IN_CELLS 1000000000)) :::danger "Warning" Не используйте в качестве источника ClickHouse, поскольку он медленно обрабатывает запросы со случайным чтением. +::: ### complex_key_cache {#complex-key-cache} diff --git a/docs/ru/sql-reference/dictionaries/external-dictionaries/external-dicts-dict-lifetime.md b/docs/ru/sql-reference/dictionaries/external-dictionaries/external-dicts-dict-lifetime.md index d821a2eeb99..08d12a1c598 100644 --- a/docs/ru/sql-reference/dictionaries/external-dictionaries/external-dicts-dict-lifetime.md +++ b/docs/ru/sql-reference/dictionaries/external-dictionaries/external-dicts-dict-lifetime.md @@ -1,4 +1,5 @@ --- +slug: /ru/sql-reference/dictionaries/external-dictionaries/external-dicts-dict-lifetime sidebar_position: 42 sidebar_label: "Обновление словарей" --- diff --git a/docs/ru/sql-reference/dictionaries/external-dictionaries/external-dicts-dict-polygon.md b/docs/ru/sql-reference/dictionaries/external-dictionaries/external-dicts-dict-polygon.md index 22366682a75..4dbab00db76 100644 --- a/docs/ru/sql-reference/dictionaries/external-dictionaries/external-dicts-dict-polygon.md +++ b/docs/ru/sql-reference/dictionaries/external-dictionaries/external-dicts-dict-polygon.md @@ -1,4 +1,5 @@ --- +slug: /ru/sql-reference/dictionaries/external-dictionaries/external-dicts-dict-polygon sidebar_position: 46 sidebar_label: Cловари полигонов --- diff --git a/docs/ru/sql-reference/dictionaries/external-dictionaries/external-dicts-dict-sources.md b/docs/ru/sql-reference/dictionaries/external-dictionaries/external-dicts-dict-sources.md index a80fedfbb24..2aaae6b7075 100644 --- a/docs/ru/sql-reference/dictionaries/external-dictionaries/external-dicts-dict-sources.md +++ b/docs/ru/sql-reference/dictionaries/external-dictionaries/external-dicts-dict-sources.md @@ -1,4 +1,5 @@ --- +slug: /ru/sql-reference/dictionaries/external-dictionaries/external-dicts-dict-sources sidebar_position: 43 sidebar_label: "Источники внешних словарей" --- diff --git a/docs/ru/sql-reference/dictionaries/external-dictionaries/external-dicts-dict-structure.md b/docs/ru/sql-reference/dictionaries/external-dictionaries/external-dicts-dict-structure.md index b1b37b5ebac..8c01b8295bf 100644 --- a/docs/ru/sql-reference/dictionaries/external-dictionaries/external-dicts-dict-structure.md +++ b/docs/ru/sql-reference/dictionaries/external-dictionaries/external-dicts-dict-structure.md @@ -1,4 +1,5 @@ --- +slug: /ru/sql-reference/dictionaries/external-dictionaries/external-dicts-dict-structure sidebar_position: 44 sidebar_label: "Ключ и поля словаря" --- diff --git a/docs/ru/sql-reference/dictionaries/external-dictionaries/external-dicts-dict.md b/docs/ru/sql-reference/dictionaries/external-dictionaries/external-dicts-dict.md index 6a59504e840..91510f0d450 100644 --- a/docs/ru/sql-reference/dictionaries/external-dictionaries/external-dicts-dict.md +++ b/docs/ru/sql-reference/dictionaries/external-dictionaries/external-dicts-dict.md @@ -1,4 +1,5 @@ --- +slug: /ru/sql-reference/dictionaries/external-dictionaries/external-dicts-dict sidebar_position: 40 sidebar_label: "Настройка внешнего словаря" --- diff --git a/docs/ru/sql-reference/dictionaries/external-dictionaries/external-dicts.md b/docs/ru/sql-reference/dictionaries/external-dictionaries/external-dicts.md index 8df6945f87e..0d9d1cf1130 100644 --- a/docs/ru/sql-reference/dictionaries/external-dictionaries/external-dicts.md +++ b/docs/ru/sql-reference/dictionaries/external-dictionaries/external-dicts.md @@ -1,4 +1,5 @@ --- +slug: /ru/sql-reference/dictionaries/external-dictionaries/external-dicts sidebar_position: 39 sidebar_label: "Внешние словари" --- diff --git a/docs/ru/sql-reference/dictionaries/external-dictionaries/index.md b/docs/ru/sql-reference/dictionaries/external-dictionaries/index.md index 733076fe4ce..d8c2cf46682 100644 --- a/docs/ru/sql-reference/dictionaries/external-dictionaries/index.md +++ b/docs/ru/sql-reference/dictionaries/external-dictionaries/index.md @@ -1,4 +1,5 @@ --- +slug: /ru/sql-reference/dictionaries/external-dictionaries/ sidebar_label: "Внешние словари" sidebar_position: 37 --- diff --git a/docs/ru/sql-reference/dictionaries/index.md b/docs/ru/sql-reference/dictionaries/index.md index 41b730a3264..905e8751765 100644 --- a/docs/ru/sql-reference/dictionaries/index.md +++ b/docs/ru/sql-reference/dictionaries/index.md @@ -1,4 +1,5 @@ --- +slug: /ru/sql-reference/dictionaries/ sidebar_label: "Словари" sidebar_position: 35 --- diff --git a/docs/ru/sql-reference/dictionaries/internal-dicts.md b/docs/ru/sql-reference/dictionaries/internal-dicts.md index d7393b30b59..462aca4206b 100644 --- a/docs/ru/sql-reference/dictionaries/internal-dicts.md +++ b/docs/ru/sql-reference/dictionaries/internal-dicts.md @@ -1,4 +1,5 @@ --- +slug: /ru/sql-reference/dictionaries/internal-dicts sidebar_position: 39 sidebar_label: "Встроенные словари" --- diff --git a/docs/ru/sql-reference/distributed-ddl.md b/docs/ru/sql-reference/distributed-ddl.md index 08021a8641b..862fb1f1f97 100644 --- a/docs/ru/sql-reference/distributed-ddl.md +++ b/docs/ru/sql-reference/distributed-ddl.md @@ -1,4 +1,5 @@ --- +slug: /ru/sql-reference/distributed-ddl sidebar_position: 32 sidebar_label: "Распределенные DDL запросы" --- diff --git a/docs/ru/sql-reference/functions/arithmetic-functions.md b/docs/ru/sql-reference/functions/arithmetic-functions.md index ba4340093b4..bc1d0a55128 100644 --- a/docs/ru/sql-reference/functions/arithmetic-functions.md +++ b/docs/ru/sql-reference/functions/arithmetic-functions.md @@ -1,4 +1,5 @@ --- +slug: /ru/sql-reference/functions/arithmetic-functions sidebar_position: 34 sidebar_label: "Арифметические функции" --- diff --git a/docs/ru/sql-reference/functions/array-functions.md b/docs/ru/sql-reference/functions/array-functions.md index dcc539a3594..bb2716b2741 100644 --- a/docs/ru/sql-reference/functions/array-functions.md +++ b/docs/ru/sql-reference/functions/array-functions.md @@ -1,4 +1,5 @@ --- +slug: /ru/sql-reference/functions/array-functions sidebar_position: 35 sidebar_label: "Массивы" --- @@ -17,9 +18,10 @@ empty([x]) Массив считается пустым, если он не содержит ни одного элемента. - :::note "Примечание" +:::note "Примечание" Функцию можно оптимизировать, если включить настройку [optimize_functions_to_subcolumns](../../operations/settings/settings.md#optimize-functions-to-subcolumns). При `optimize_functions_to_subcolumns = 1` функция читает только подстолбец [size0](../../sql-reference/data-types/array.md#array-size) вместо чтения и обработки всего столбца массива. Запрос `SELECT empty(arr) FROM TABLE` преобразуется к запросу `SELECT arr.size0 = 0 FROM TABLE`. - ::: +::: + Функция также поддерживает работу с типами [String](string-functions.md#empty) и [UUID](uuid-functions.md#empty). **Параметры** @@ -60,9 +62,10 @@ notEmpty([x]) Массив считается непустым, если он содержит хотя бы один элемент. - :::note "Примечание" +:::note "Примечание" Функцию можно оптимизировать, если включить настройку [optimize_functions_to_subcolumns](../../operations/settings/settings.md#optimize-functions-to-subcolumns). При `optimize_functions_to_subcolumns = 1` функция читает только подстолбец [size0](../../sql-reference/data-types/array.md#array-size) вместо чтения и обработки всего столбца массива. Запрос `SELECT notEmpty(arr) FROM table` преобразуется к запросу `SELECT arr.size0 != 0 FROM TABLE`. - ::: +::: + Функция также поддерживает работу с типами [String](string-functions.md#notempty) и [UUID](uuid-functions.md#notempty). **Параметры** @@ -689,9 +692,10 @@ SELECT arraySort((x, y) -> -y, [0, 1, 2], [1, 2, 3]) as res; └─────────┘ ``` - :::note "Примечание" +:::note "Примечание" Для улучшения эффективности сортировки применяется [преобразование Шварца](https://ru.wikipedia.org/wiki/%D0%9F%D1%80%D0%B5%D0%BE%D0%B1%D1%80%D0%B0%D0%B7%D0%BE%D0%B2%D0%B0%D0%BD%D0%B8%D0%B5_%D0%A8%D0%B2%D0%B0%D1%80%D1%86%D0%B0). - ::: +::: + ## arrayReverseSort(\[func,\] arr, …) {#array_functions-reverse-sort} Возвращает массив `arr`, отсортированный в нисходящем порядке. Если указана функция `func`, то массив `arr` сначала сортируется в порядке, который определяется функцией `func`, а затем отсортированный массив переворачивается. Если функция `func` принимает несколько аргументов, то в функцию `arrayReverseSort` необходимо передавать несколько массивов, которые будут соответствовать аргументам функции `func`. Подробные примеры рассмотрены в конце описания функции `arrayReverseSort`. diff --git a/docs/ru/sql-reference/functions/array-join.md b/docs/ru/sql-reference/functions/array-join.md index 330a22f6553..38464012090 100644 --- a/docs/ru/sql-reference/functions/array-join.md +++ b/docs/ru/sql-reference/functions/array-join.md @@ -1,4 +1,5 @@ --- +slug: /ru/sql-reference/functions/array-join sidebar_position: 61 sidebar_label: "Функция ArrayJoin" --- diff --git a/docs/ru/sql-reference/functions/bit-functions.md b/docs/ru/sql-reference/functions/bit-functions.md index efe2cb4969f..f7dc1ab5bc2 100644 --- a/docs/ru/sql-reference/functions/bit-functions.md +++ b/docs/ru/sql-reference/functions/bit-functions.md @@ -1,4 +1,5 @@ --- +slug: /ru/sql-reference/functions/bit-functions sidebar_position: 48 sidebar_label: "Битовые функции" --- diff --git a/docs/ru/sql-reference/functions/bitmap-functions.md b/docs/ru/sql-reference/functions/bitmap-functions.md index ff0589381ea..c32c532ea45 100644 --- a/docs/ru/sql-reference/functions/bitmap-functions.md +++ b/docs/ru/sql-reference/functions/bitmap-functions.md @@ -1,4 +1,5 @@ --- +slug: /ru/sql-reference/functions/bitmap-functions sidebar_position: 49 sidebar_label: "Функции для битмапов" --- diff --git a/docs/ru/sql-reference/functions/comparison-functions.md b/docs/ru/sql-reference/functions/comparison-functions.md index 34b5f55530a..62ad58fa5f8 100644 --- a/docs/ru/sql-reference/functions/comparison-functions.md +++ b/docs/ru/sql-reference/functions/comparison-functions.md @@ -1,4 +1,5 @@ --- +slug: /ru/sql-reference/functions/comparison-functions sidebar_position: 36 sidebar_label: "Функции сравнения" --- diff --git a/docs/ru/sql-reference/functions/conditional-functions.md b/docs/ru/sql-reference/functions/conditional-functions.md index d7c6a454d3d..24f42c4fb84 100644 --- a/docs/ru/sql-reference/functions/conditional-functions.md +++ b/docs/ru/sql-reference/functions/conditional-functions.md @@ -1,4 +1,5 @@ --- +slug: /ru/sql-reference/functions/conditional-functions sidebar_position: 43 sidebar_label: "Условные функции" --- diff --git a/docs/ru/sql-reference/functions/date-time-functions.md b/docs/ru/sql-reference/functions/date-time-functions.md index c9371e361bc..cfffcfb6021 100644 --- a/docs/ru/sql-reference/functions/date-time-functions.md +++ b/docs/ru/sql-reference/functions/date-time-functions.md @@ -1,4 +1,5 @@ --- +slug: /ru/sql-reference/functions/date-time-functions sidebar_position: 39 sidebar_label: "Функции для работы с датами и временем" --- @@ -266,9 +267,25 @@ SELECT toUnixTimestamp('2017-11-05 08:07:47', 'Asia/Tokyo') AS unix_timestamp; └────────────────┘ ``` - :::note "Attention" - `Date` или `DateTime` это возвращаемый тип функций `toStartOf*`, который описан ниже. Несмотря на то, что эти функции могут принимать `DateTime64` в качестве аргумента, если переданное значение типа `DateTime64` выходит за пределы нормального диапазона (с 1900 по 2299 год), то это даст неверный результат. - ::: +:::Attention +Тип возвращаемого описанными далее функциями `toStartOf*`, `toMonday` значения - `Date` или `DateTime`. +Хотя эти функции могут принимать значения типа `Date32` или `DateTime64` в качестве аргумента, при обработке аргумента вне нормального диапазона значений (`1970` - `2148` для `Date` и `1970-01-01 00:00:00`-`2106-02-07 08:28:15` для `DateTime`) будет получен некорректный результат. +Возвращаемые значения для значений вне нормального диапазона: +* `1970-01-01 (00:00:00)` будет возвращён для моментов времени до 1970 года, +* `2106-02-07 08:28:15` будет взят в качестве аргумента, если полученный аргумент превосходит данное значение и возвращаемый тип - `DateTime`, +* `2149-06-06` будет взят в качестве аргумента, если полученный аргумент превосходит данное значение и возвращаемый тип - `Date`, +* `2149-05-31` будет результатом функции `toLastDayOfMonth` при обработке аргумента больше `2149-05-31`. +::: + +:::Attention +Тип возвращаемого описанными далее функциями `toStartOf*`, `toLastDayOfMonth`, `toMonday` значения - `Date` или `DateTime`. +Хотя эти функции могут принимать значения типа `Date32` или `DateTime64` в качестве аргумента, при обработке аргумента вне нормального диапазона значений (`1970` - `2148` для `Date` и `1970-01-01 00:00:00`-`2106-02-07 08:28:15` для `DateTime`) будет получен некорректный результат. +Возвращаемые значения для значений вне нормального диапазона: +* `1970-01-01 (00:00:00)` будет возвращён для моментов времени до 1970 года, +* `2106-02-07 08:28:15` будет взят в качестве аргумента, если полученный аргумент превосходит данное значение и возвращаемый тип - `DateTime`, +* `2149-06-06` будет взят в качестве аргумента, если полученный аргумент превосходит данное значение и возвращаемый тип - `Date`. + ::: +* ## toStartOfYear {#tostartofyear} Округляет дату или дату-с-временем вниз до первого дня года. @@ -302,19 +319,23 @@ SELECT toStartOfISOYear(toDate('2017-01-01')) AS ISOYear20170101; Округляет дату или дату-с-временем вниз до первого дня месяца. Возвращается дата. - :::note "Attention" - Возвращаемое значение для некорректных дат зависит от реализации. ClickHouse может вернуть нулевую дату, выбросить исключение, или выполнить «естественное» перетекание дат между месяцами. - ::: +## toLastDayOfMonth + +Округляет дату или дату-с-временем до последнего числа месяца. +Возвращается дата. + ## toMonday {#tomonday} Округляет дату или дату-с-временем вниз до ближайшего понедельника. +Частный случай: для дат `1970-01-01`, `1970-01-02`, `1970-01-03` и `1970-01-04` результатом будет `1970-01-01`. Возвращается дата. ## toStartOfWeek(t[,mode]) {#tostartofweek} Округляет дату или дату со временем до ближайшего воскресенья или понедельника в соответствии с mode. Возвращается дата. -Аргумент mode работает точно так же, как аргумент mode [toWeek()](#toweek). Если аргумент mode опущен, то используется режим 0. +Частный случай: для дат `1970-01-01`, `1970-01-02`, `1970-01-03` и `1970-01-04` (и `1970-01-05`, если `mode` равен `1`) результатом будет `1970-01-01`. +Аргумент `mode` работает точно так же, как аргумент mode [toWeek()](#toweek). Если аргумент mode опущен, то используется режим 0. ## toStartOfDay {#tostartofday} @@ -931,6 +952,13 @@ SELECT now('Europe/Moscow'); └──────────────────────┘ ``` +## nowInBlock {#nowinblock} + +Возращает текующию дату и время в момент обработки блока данных. В отличие от функции `now`, возращаемое значение не константа, и будет возрващаться разлчиные значения в разных блоках данных при долгих запросах + +Имеет смысл использовать данную функцию для получения времени сейчас при длительных запросов INSERT SELECT. + + ## today {#today} Возвращает текущую дату на момент выполнения запроса. Функция не требует аргументов. @@ -948,7 +976,7 @@ SELECT now('Europe/Moscow'); ## timeSlots(StartTime, Duration,\[, Size\]) {#timeslotsstarttime-duration-size} Для интервала, начинающегося в `StartTime` и длящегося `Duration` секунд, возвращает массив моментов времени, кратных `Size`. Параметр `Size` указывать необязательно, по умолчанию он равен 1800 секундам (30 минутам) - необязательный параметр. -Данная функция может использоваться, например, для анализа количества просмотров страницы за соответствующую сессию. +Данная функция может использоваться, например, для анализа количества просмотров страницы за соответствующую сессию. Аргумент `StartTime` может иметь тип `DateTime` или `DateTime64`. В случае, если используется `DateTime`, аргументы `Duration` и `Size` должны иметь тип `UInt32`; Для DateTime64 они должны быть типа `Decimal64`. Возвращает массив DateTime/DateTime64 (тип будет совпадать с типом параметра ’StartTime’). Для DateTime64 масштаб(scale) возвращаемой величины может отличаться от масштаба фргумента ’StartTime’ --- результат будет иметь наибольший масштаб среди всех данных аргументов. diff --git a/docs/ru/sql-reference/functions/encoding-functions.md b/docs/ru/sql-reference/functions/encoding-functions.md index 65d2b0e6538..675bf1cbf9f 100644 --- a/docs/ru/sql-reference/functions/encoding-functions.md +++ b/docs/ru/sql-reference/functions/encoding-functions.md @@ -1,4 +1,5 @@ --- +slug: /ru/sql-reference/functions/encoding-functions sidebar_position: 52 sidebar_label: "Функции кодирования" --- @@ -153,9 +154,10 @@ SELECT hex(toFloat64(number)) AS hex_presentation FROM numbers(15, 2); Если вы хотите преобразовать результат в число, вы можете использовать функции [reverse](../../sql-reference/functions/string-functions.md#reverse) и [reinterpretAs<Type>](../../sql-reference/functions/type-conversion-functions.md#type-conversion-functions). - :::note "Примечание" - Если `unhex` вызывается из `clickhouse-client`, двоичные строки отображаются с использованием UTF-8. - ::: +:::note "Примечание" +Если `unhex` вызывается из `clickhouse-client`, двоичные строки отображаются с использованием UTF-8. +::: + Синоним: `UNHEX`. **Синтаксис** @@ -294,9 +296,10 @@ unbin(arg) Для числового аргумента `unbin()` не возвращает значение, обратное результату `bin()`. Чтобы преобразовать результат в число, используйте функции [reverse](../../sql-reference/functions/string-functions.md#reverse) и [reinterpretAs<Type>](../../sql-reference/functions/type-conversion-functions.md#reinterpretasuint8163264). - :::note "Примечание" - Если `unbin` вызывается из клиента `clickhouse-client`, бинарная строка возвращается в кодировке UTF-8. - ::: +:::note "Примечание" +Если `unbin` вызывается из клиента `clickhouse-client`, бинарная строка возвращается в кодировке UTF-8. +::: + Поддерживает двоичные цифры `0` и `1`. Количество двоичных цифр не обязательно должно быть кратно восьми. Если строка аргумента содержит что-либо, кроме двоичных цифр, возвращается некоторый результат, определенный реализацией (ошибки не возникает). **Аргументы** diff --git a/docs/ru/sql-reference/functions/encryption-functions.md b/docs/ru/sql-reference/functions/encryption-functions.md index d85affe7b68..b0324b75a88 100644 --- a/docs/ru/sql-reference/functions/encryption-functions.md +++ b/docs/ru/sql-reference/functions/encryption-functions.md @@ -1,4 +1,5 @@ --- +slug: /ru/sql-reference/functions/encryption-functions sidebar_position: 67 sidebar_label: "Функции для шифрования" --- diff --git a/docs/ru/sql-reference/functions/ext-dict-functions.md b/docs/ru/sql-reference/functions/ext-dict-functions.md index 4468267d832..9651ad52a76 100644 --- a/docs/ru/sql-reference/functions/ext-dict-functions.md +++ b/docs/ru/sql-reference/functions/ext-dict-functions.md @@ -1,11 +1,13 @@ --- +slug: /ru/sql-reference/functions/ext-dict-functions sidebar_position: 58 sidebar_label: "Функции для работы с внешними словарями" --- - :::note "Внимание" +:::note "Внимание" Для словарей, созданных с помощью [DDL-запросов](../../sql-reference/statements/create/dictionary.md), в параметре `dict_name` указывается полное имя словаря вместе с базой данных, например: `.`. Если база данных не указана, используется текущая. - ::: +::: + # Функции для работы с внешними словарями {#ext_dict_functions} Информацию о подключении и настройке внешних словарей смотрите в разделе [Внешние словари](../../sql-reference/dictionaries/external-dictionaries/external-dicts.md). diff --git a/docs/ru/sql-reference/functions/files.md b/docs/ru/sql-reference/functions/files.md index ef4369870ed..13b526c9fb3 100644 --- a/docs/ru/sql-reference/functions/files.md +++ b/docs/ru/sql-reference/functions/files.md @@ -1,4 +1,5 @@ --- +slug: /ru/sql-reference/functions/files sidebar_position: 43 sidebar_label: "Функции для работы с файлами" --- diff --git a/docs/ru/sql-reference/functions/functions-for-nulls.md b/docs/ru/sql-reference/functions/functions-for-nulls.md index 5c84de20204..2ea4cff7231 100644 --- a/docs/ru/sql-reference/functions/functions-for-nulls.md +++ b/docs/ru/sql-reference/functions/functions-for-nulls.md @@ -1,4 +1,5 @@ --- +slug: /ru/sql-reference/functions/functions-for-nulls sidebar_position: 63 sidebar_label: "Функции для работы с Nullable-аргументами" --- diff --git a/docs/ru/sql-reference/functions/geo/coordinates.md b/docs/ru/sql-reference/functions/geo/coordinates.md index 748ca3b1cad..3fc2d602dd4 100644 --- a/docs/ru/sql-reference/functions/geo/coordinates.md +++ b/docs/ru/sql-reference/functions/geo/coordinates.md @@ -1,4 +1,5 @@ --- +slug: /ru/sql-reference/functions/geo/coordinates sidebar_label: "Функции для работы с географическими координатами" sidebar_position: 62 --- diff --git a/docs/ru/sql-reference/functions/geo/geohash.md b/docs/ru/sql-reference/functions/geo/geohash.md index 933775dcfbe..634c7b0d3df 100644 --- a/docs/ru/sql-reference/functions/geo/geohash.md +++ b/docs/ru/sql-reference/functions/geo/geohash.md @@ -1,4 +1,5 @@ --- +slug: /ru/sql-reference/functions/geo/geohash sidebar_label: "Функции для работы с системой Geohash" --- diff --git a/docs/ru/sql-reference/functions/geo/h3.md b/docs/ru/sql-reference/functions/geo/h3.md index b36e3f64acb..91e4639d3fe 100644 --- a/docs/ru/sql-reference/functions/geo/h3.md +++ b/docs/ru/sql-reference/functions/geo/h3.md @@ -1,4 +1,5 @@ --- +slug: /ru/sql-reference/functions/geo/h3 sidebar_label: "Функции для работы с индексами H3" --- diff --git a/docs/ru/sql-reference/functions/geo/index.md b/docs/ru/sql-reference/functions/geo/index.md index cc954bd4f00..e46ebc364ec 100644 --- a/docs/ru/sql-reference/functions/geo/index.md +++ b/docs/ru/sql-reference/functions/geo/index.md @@ -1,4 +1,5 @@ --- +slug: /ru/sql-reference/functions/geo/ sidebar_position: 62 sidebar_label: "Гео-данные" --- \ No newline at end of file diff --git a/docs/ru/sql-reference/functions/geo/s2.md b/docs/ru/sql-reference/functions/geo/s2.md index 282d9a9d7be..85b4ad08003 100644 --- a/docs/ru/sql-reference/functions/geo/s2.md +++ b/docs/ru/sql-reference/functions/geo/s2.md @@ -1,4 +1,5 @@ --- +slug: /ru/sql-reference/functions/geo/s2 sidebar_label: "Функции для работы с индексами S2" --- diff --git a/docs/ru/sql-reference/functions/hash-functions.md b/docs/ru/sql-reference/functions/hash-functions.md index 0065275519b..98b6d8d4b17 100644 --- a/docs/ru/sql-reference/functions/hash-functions.md +++ b/docs/ru/sql-reference/functions/hash-functions.md @@ -1,4 +1,5 @@ --- +slug: /ru/sql-reference/functions/hash-functions sidebar_position: 50 sidebar_label: "Функции хэширования" --- diff --git a/docs/ru/sql-reference/functions/in-functions.md b/docs/ru/sql-reference/functions/in-functions.md index c1ef8b7dacd..8e3482d9297 100644 --- a/docs/ru/sql-reference/functions/in-functions.md +++ b/docs/ru/sql-reference/functions/in-functions.md @@ -1,4 +1,5 @@ --- +slug: /ru/sql-reference/functions/in-functions sidebar_position: 60 sidebar_label: "Функции для реализации оператора IN" --- diff --git a/docs/ru/sql-reference/functions/index.md b/docs/ru/sql-reference/functions/index.md index 9bde1f2e56c..bcf3789fe52 100644 --- a/docs/ru/sql-reference/functions/index.md +++ b/docs/ru/sql-reference/functions/index.md @@ -1,4 +1,5 @@ --- +slug: /ru/sql-reference/functions/ sidebar_label: "Функции" sidebar_position: 32 --- diff --git a/docs/ru/sql-reference/functions/introspection.md b/docs/ru/sql-reference/functions/introspection.md index ce3c86c5943..657b2e0499b 100644 --- a/docs/ru/sql-reference/functions/introspection.md +++ b/docs/ru/sql-reference/functions/introspection.md @@ -1,4 +1,5 @@ --- +slug: /ru/sql-reference/functions/introspection sidebar_position: 65 sidebar_label: "Функции интроспекции" --- @@ -9,6 +10,7 @@ sidebar_label: "Функции интроспекции" :::danger "Предупреждение" Эти функции выполняются медленно и могут приводить к нежелательным последствиям в плане безопасности. +::: Для правильной работы функций интроспекции: diff --git a/docs/ru/sql-reference/functions/ip-address-functions.md b/docs/ru/sql-reference/functions/ip-address-functions.md index 428fd3dff31..96d4b737c88 100644 --- a/docs/ru/sql-reference/functions/ip-address-functions.md +++ b/docs/ru/sql-reference/functions/ip-address-functions.md @@ -1,4 +1,5 @@ --- +slug: /ru/sql-reference/functions/ip-address-functions sidebar_position: 55 sidebar_label: "Функции для работы с IP-адресами" --- diff --git a/docs/ru/sql-reference/functions/json-functions.md b/docs/ru/sql-reference/functions/json-functions.md index 7656e49b2fd..53ab638165a 100644 --- a/docs/ru/sql-reference/functions/json-functions.md +++ b/docs/ru/sql-reference/functions/json-functions.md @@ -1,4 +1,5 @@ --- +slug: /ru/sql-reference/functions/json-functions sidebar_position: 56 sidebar_label: JSON --- @@ -360,9 +361,10 @@ SELECT JSON_EXISTS('{"hello":["world"]}', '$.hello[*]'); SELECT JSON_EXISTS('{"hello":["world"]}', '$.hello[0]'); ``` - :::note "Примечание" +:::note "Примечание" до версии 21.11 порядок аргументов функции был обратный, т.е. JSON_EXISTS(path, json) - ::: +::: + ## JSON_QUERY(json, path) {#json-query} Парсит JSON и извлекает значение как JSON массив или JSON объект. @@ -386,9 +388,10 @@ SELECT toTypeName(JSON_QUERY('{"hello":2}', '$.hello')); [2] String ``` - :::note "Примечание" +:::note "Примечание" до версии 21.11 порядок аргументов функции был обратный, т.е. JSON_QUERY(path, json) - ::: +::: + ## JSON_VALUE(json, path) {#json-value} Парсит JSON и извлекает значение как JSON скаляр. @@ -413,9 +416,10 @@ world String ``` - :::note "Примечание" +:::note "Примечание" до версии 21.11 порядок аргументов функции был обратный, т.е. JSON_VALUE(path, json) - ::: +::: + ## toJSONString {#tojsonstring} Сериализует значение в JSON представление. Поддерживаются различные типы данных и вложенные структуры. diff --git a/docs/ru/sql-reference/functions/logical-functions.md b/docs/ru/sql-reference/functions/logical-functions.md index ed313c744d3..cec605944bf 100644 --- a/docs/ru/sql-reference/functions/logical-functions.md +++ b/docs/ru/sql-reference/functions/logical-functions.md @@ -1,4 +1,5 @@ --- +slug: /ru/sql-reference/functions/logical-functions sidebar_position: 37 sidebar_label: "Логические функции" --- diff --git a/docs/ru/sql-reference/functions/machine-learning-functions.md b/docs/ru/sql-reference/functions/machine-learning-functions.md index dfe6d7146f5..805557a720b 100644 --- a/docs/ru/sql-reference/functions/machine-learning-functions.md +++ b/docs/ru/sql-reference/functions/machine-learning-functions.md @@ -1,4 +1,5 @@ --- +slug: /ru/sql-reference/functions/machine-learning-functions sidebar_position: 64 sidebar_label: "Функции машинного обучения" --- diff --git a/docs/ru/sql-reference/functions/math-functions.md b/docs/ru/sql-reference/functions/math-functions.md index 37a4226a1f9..59733d111ba 100644 --- a/docs/ru/sql-reference/functions/math-functions.md +++ b/docs/ru/sql-reference/functions/math-functions.md @@ -1,4 +1,5 @@ --- +slug: /ru/sql-reference/functions/math-functions sidebar_position: 44 sidebar_label: "Математические функции" --- diff --git a/docs/ru/sql-reference/functions/nlp-functions.md b/docs/ru/sql-reference/functions/nlp-functions.md index d115b8365e7..5d6540871d5 100644 --- a/docs/ru/sql-reference/functions/nlp-functions.md +++ b/docs/ru/sql-reference/functions/nlp-functions.md @@ -1,4 +1,5 @@ --- +slug: /ru/sql-reference/functions/nlp-functions sidebar_position: 67 sidebar_label: NLP --- @@ -7,6 +8,7 @@ sidebar_label: NLP :::danger "Предупреждение" Сейчас использование функций для работы с естественным языком является экспериментальной возможностью. Чтобы использовать данные функции, включите настройку `allow_experimental_nlp_functions = 1`. +::: ## stem {#stem} @@ -129,4 +131,4 @@ SELECT synonyms('list', 'important'); en/ -``` \ No newline at end of file +``` diff --git a/docs/ru/sql-reference/functions/other-functions.md b/docs/ru/sql-reference/functions/other-functions.md index bfe0d3d463b..5e89a4f1236 100644 --- a/docs/ru/sql-reference/functions/other-functions.md +++ b/docs/ru/sql-reference/functions/other-functions.md @@ -1,4 +1,5 @@ --- +slug: /ru/sql-reference/functions/other-functions sidebar_position: 66 sidebar_label: "Прочие функции" --- @@ -2020,9 +2021,10 @@ countDigits(x) Тип: [UInt8](../../sql-reference/data-types/int-uint.md#uint-ranges). - :::note "Примечание" +:::note "Примечание" Для `Decimal` значений учитывается их масштаб: вычисляется результат по базовому целочисленному типу, полученному как `(value * scale)`. Например: `countDigits(42) = 2`, `countDigits(42.000) = 5`, `countDigits(0.04200) = 4`. То есть вы можете проверить десятичное переполнение для `Decimal64` с помощью `countDecimal(x) > 18`. Это медленный вариант [isDecimalOverflow](#is-decimal-overflow). - ::: +::: + **Пример** Запрос: diff --git a/docs/ru/sql-reference/functions/random-functions.md b/docs/ru/sql-reference/functions/random-functions.md index b09ca684713..b5c59b89d4e 100644 --- a/docs/ru/sql-reference/functions/random-functions.md +++ b/docs/ru/sql-reference/functions/random-functions.md @@ -1,4 +1,5 @@ --- +slug: /ru/sql-reference/functions/random-functions sidebar_position: 51 sidebar_label: "Функции генерации псевдослучайных чисел" --- diff --git a/docs/ru/sql-reference/functions/rounding-functions.md b/docs/ru/sql-reference/functions/rounding-functions.md index a3ba1071d3e..97d40745115 100644 --- a/docs/ru/sql-reference/functions/rounding-functions.md +++ b/docs/ru/sql-reference/functions/rounding-functions.md @@ -1,4 +1,5 @@ --- +slug: /ru/sql-reference/functions/rounding-functions sidebar_position: 45 sidebar_label: "Функции округления" --- diff --git a/docs/ru/sql-reference/functions/splitting-merging-functions.md b/docs/ru/sql-reference/functions/splitting-merging-functions.md index 354d1d336f6..26dadc2c3ac 100644 --- a/docs/ru/sql-reference/functions/splitting-merging-functions.md +++ b/docs/ru/sql-reference/functions/splitting-merging-functions.md @@ -1,4 +1,5 @@ --- +slug: /ru/sql-reference/functions/splitting-merging-functions sidebar_position: 47 sidebar_label: "Функции разбиения и слияния строк и массивов" --- diff --git a/docs/ru/sql-reference/functions/string-functions.md b/docs/ru/sql-reference/functions/string-functions.md index c6e6fbdce9a..0ab34153edb 100644 --- a/docs/ru/sql-reference/functions/string-functions.md +++ b/docs/ru/sql-reference/functions/string-functions.md @@ -1,4 +1,5 @@ --- +slug: /ru/sql-reference/functions/string-functions sidebar_position: 40 sidebar_label: "Функции для работы со строками" --- diff --git a/docs/ru/sql-reference/functions/string-replace-functions.md b/docs/ru/sql-reference/functions/string-replace-functions.md index 14c49c19e64..d75eead2f94 100644 --- a/docs/ru/sql-reference/functions/string-replace-functions.md +++ b/docs/ru/sql-reference/functions/string-replace-functions.md @@ -1,4 +1,5 @@ --- +slug: /ru/sql-reference/functions/string-replace-functions sidebar_position: 42 sidebar_label: "Функции поиска и замены в строках" --- diff --git a/docs/ru/sql-reference/functions/string-search-functions.md b/docs/ru/sql-reference/functions/string-search-functions.md index 7b8f6ae537d..4da4365ce3c 100644 --- a/docs/ru/sql-reference/functions/string-search-functions.md +++ b/docs/ru/sql-reference/functions/string-search-functions.md @@ -1,4 +1,5 @@ --- +slug: /ru/sql-reference/functions/string-search-functions sidebar_position: 41 sidebar_label: "Функции поиска в строках" --- @@ -27,9 +28,10 @@ position(needle IN haystack) Алиас: `locate(haystack, needle[, start_pos])`. - :::note "Примечание" +:::note "Примечание" Синтаксис `position(needle IN haystack)` обеспечивает совместимость с SQL, функция работает так же, как `position(haystack, needle)`. - ::: +::: + **Аргументы** - `haystack` — строка, по которой выполняется поиск. [Строка](../syntax.md#syntax-string-literal). @@ -327,9 +329,10 @@ Result: Для поиска без учета регистра и/или в кодировке UTF-8 используйте функции `multiSearchAnyCaseInsensitive, multiSearchAnyUTF8, multiSearchAnyCaseInsensitiveUTF8`. - :::note "Примечание" +:::note "Примечание" Во всех функциях `multiSearch*` количество needles должно быть меньше 28 из-за особенностей реализации. - ::: +::: + ## match(haystack, pattern) {#matchhaystack-pattern} Проверка строки на соответствие регулярному выражению pattern. Регулярное выражение **re2**. Синтаксис регулярных выражений **re2** является более ограниченным по сравнению с регулярными выражениями **Perl** ([подробнее](https://github.com/google/re2/wiki/Syntax)). @@ -344,9 +347,9 @@ Result: То же, что и `match`, но возвращает ноль, если ни одно регулярное выражение не подошло и один, если хотя бы одно. Используется библиотека [hyperscan](https://github.com/intel/hyperscan) для соответствия регулярных выражений. Для шаблонов на поиск многих подстрок в строке, лучше используйте `multiSearchAny`, так как она работает существенно быстрее. - :::note "Примечание" +:::note "Примечание" Длина любой строки из `haystack` должна быть меньше 232 байт, иначе бросается исключение. Это ограничение связано с ограничением hyperscan API. - ::: +::: ## multiMatchAnyIndex(haystack, \[pattern1, pattern2, …, patternn\]) {#multimatchanyindexhaystack-pattern1-pattern2-patternn} То же, что и `multiMatchAny`, только возвращает любой индекс подходящего регулярного выражения. @@ -367,12 +370,13 @@ Result: То же, что и `multiFuzzyMatchAny`, только возвращает массив всех индексов всех подходящих регулярных выражений в любом порядке в пределах константного редакционного расстояния. - :::note "Примечание" +:::note "Примечание" `multiFuzzyMatch*` функции не поддерживают UTF-8 закодированные регулярные выражения, и такие выражения рассматриваются как байтовые из-за ограничения hyperscan. - ::: - :::note "Примечание" +::: + +:::note "Примечание" Чтобы выключить все функции, использующие hyperscan, используйте настройку `SET allow_hyperscan = 0;`. - ::: +::: ## extract(haystack, pattern) {#extracthaystack-pattern} Извлечение фрагмента строки по регулярному выражению. Если haystack не соответствует регулярному выражению pattern, то возвращается пустая строка. Если регулярное выражение не содержит subpattern-ов, то вынимается фрагмент, который подпадает под всё регулярное выражение. Иначе вынимается фрагмент, который подпадает под первый subpattern. @@ -385,9 +389,10 @@ Result: Разбирает строку `haystack` на фрагменты, соответствующие группам регулярного выражения `pattern`. Возвращает массив массивов, где первый массив содержит все фрагменты, соответствующие первой группе регулярного выражения, второй массив - соответствующие второй группе, и т.д. - :::note "Замечание" +:::note "Замечание" Функция `extractAllGroupsHorizontal` работает медленнее, чем функция [extractAllGroupsVertical](#extractallgroups-vertical). - ::: +::: + **Синтаксис** ``` sql @@ -556,9 +561,10 @@ SELECT * FROM Months WHERE ilike(name, '%j%'); Для поиска без учета регистра и/или в формате UTF-8 используйте функции `ngramSearchCaseInsensitive, ngramSearchUTF8, ngramSearchCaseInsensitiveUTF8`. - :::note "Примечание" +:::note "Примечание" Для случая UTF-8 мы используем триграммное расстояние. Вычисление n-граммного расстояния не совсем честное. Мы используем 2-х байтные хэши для хэширования n-грамм, а затем вычисляем (не)симметрическую разность между хэш таблицами – могут возникнуть коллизии. В формате UTF-8 без учета регистра мы не используем честную функцию `tolower` – мы обнуляем 5-й бит (нумерация с нуля) каждого байта кодовой точки, а также первый бит нулевого байта, если байтов больше 1 – это работает для латиницы и почти для всех кириллических букв. - ::: +::: + ## countMatches(haystack, pattern) {#countmatcheshaystack-pattern} Возвращает количество совпадений, найденных в строке `haystack`, для регулярного выражения `pattern`. diff --git a/docs/ru/sql-reference/functions/tuple-functions.md b/docs/ru/sql-reference/functions/tuple-functions.md index 6b860b2fceb..c702e5d00b1 100644 --- a/docs/ru/sql-reference/functions/tuple-functions.md +++ b/docs/ru/sql-reference/functions/tuple-functions.md @@ -1,4 +1,5 @@ --- +slug: /ru/sql-reference/functions/tuple-functions sidebar_position: 68 sidebar_label: Функции для работы с кортежами --- diff --git a/docs/ru/sql-reference/functions/tuple-map-functions.md b/docs/ru/sql-reference/functions/tuple-map-functions.md index c4099c00f93..da8909f537c 100644 --- a/docs/ru/sql-reference/functions/tuple-map-functions.md +++ b/docs/ru/sql-reference/functions/tuple-map-functions.md @@ -1,4 +1,5 @@ --- +slug: /ru/sql-reference/functions/tuple-map-functions sidebar_position: 46 sidebar_label: Работа с контейнерами map --- diff --git a/docs/ru/sql-reference/functions/type-conversion-functions.md b/docs/ru/sql-reference/functions/type-conversion-functions.md index 7635bda78e6..0f6f61173b6 100644 --- a/docs/ru/sql-reference/functions/type-conversion-functions.md +++ b/docs/ru/sql-reference/functions/type-conversion-functions.md @@ -1,4 +1,5 @@ --- +slug: /ru/sql-reference/functions/type-conversion-functions sidebar_position: 38 sidebar_label: "Функции преобразования типов" --- @@ -684,9 +685,10 @@ x::t - Преобразованное значение. - :::note "Примечание" - Если входное значение выходит за границы нового типа, то результат переполняется. Например, `CAST(-1, 'UInt8')` возвращает `255`. - ::: +:::note "Примечание" +Если входное значение выходит за границы нового типа, то результат переполняется. Например, `CAST(-1, 'UInt8')` возвращает `255`. +::: + **Примеры** Запрос: diff --git a/docs/ru/sql-reference/functions/url-functions.md b/docs/ru/sql-reference/functions/url-functions.md index cfac83901ae..34bb88f4991 100644 --- a/docs/ru/sql-reference/functions/url-functions.md +++ b/docs/ru/sql-reference/functions/url-functions.md @@ -1,4 +1,5 @@ --- +slug: /ru/sql-reference/functions/url-functions sidebar_position: 54 sidebar_label: "Функции для работы с URL" --- diff --git a/docs/ru/sql-reference/functions/uuid-functions.md b/docs/ru/sql-reference/functions/uuid-functions.md index 554e78002b8..65d13079ee8 100644 --- a/docs/ru/sql-reference/functions/uuid-functions.md +++ b/docs/ru/sql-reference/functions/uuid-functions.md @@ -1,4 +1,5 @@ --- +slug: /ru/sql-reference/functions/uuid-functions sidebar_position: 53 sidebar_label: "Функции для работы с UUID" --- diff --git a/docs/ru/sql-reference/functions/ym-dict-functions.md b/docs/ru/sql-reference/functions/ym-dict-functions.md index 15f684318eb..7b9363dd3db 100644 --- a/docs/ru/sql-reference/functions/ym-dict-functions.md +++ b/docs/ru/sql-reference/functions/ym-dict-functions.md @@ -1,4 +1,5 @@ --- +slug: /ru/sql-reference/functions/ym-dict-functions sidebar_position: 59 sidebar_label: "Функции для работы со словарями Яндекс.Метрики" --- diff --git a/docs/ru/sql-reference/index.md b/docs/ru/sql-reference/index.md index 5c50dccb567..f55c5e859f1 100644 --- a/docs/ru/sql-reference/index.md +++ b/docs/ru/sql-reference/index.md @@ -1,4 +1,5 @@ --- +slug: /ru/sql-reference/ sidebar_label: "Справка по SQL" sidebar_position: 28 --- diff --git a/docs/ru/sql-reference/operators/exists.md b/docs/ru/sql-reference/operators/exists.md index 0cf60df6694..3fc085fe021 100644 --- a/docs/ru/sql-reference/operators/exists.md +++ b/docs/ru/sql-reference/operators/exists.md @@ -1,3 +1,6 @@ +--- +slug: /ru/sql-reference/operators/exists +--- # EXISTS {#exists-operator} Оператор `EXISTS` проверяет, сколько строк содержит результат выполнения подзапроса. Если результат пустой, то оператор возвращает `0`. В остальных случаях оператор возвращает `1`. diff --git a/docs/ru/sql-reference/operators/in.md b/docs/ru/sql-reference/operators/in.md index e7087eb6274..2b3d87a877f 100644 --- a/docs/ru/sql-reference/operators/in.md +++ b/docs/ru/sql-reference/operators/in.md @@ -1,3 +1,6 @@ +--- +slug: /ru/sql-reference/operators/in +--- # Операторы IN {#select-in-operators} Операторы `IN`, `NOT IN`, `GLOBAL IN`, `GLOBAL NOT IN` рассматриваются отдельно, так как их функциональность достаточно богатая. diff --git a/docs/ru/sql-reference/operators/index.md b/docs/ru/sql-reference/operators/index.md index c9a835ec81b..57c426cb5ad 100644 --- a/docs/ru/sql-reference/operators/index.md +++ b/docs/ru/sql-reference/operators/index.md @@ -1,4 +1,5 @@ --- +slug: /ru/sql-reference/operators/ sidebar_position: 38 sidebar_label: "Операторы" --- diff --git a/docs/ru/sql-reference/statements/alter/column.md b/docs/ru/sql-reference/statements/alter/column.md index 6bbee5479ce..c337b64f1d6 100644 --- a/docs/ru/sql-reference/statements/alter/column.md +++ b/docs/ru/sql-reference/statements/alter/column.md @@ -1,4 +1,5 @@ --- +slug: /ru/sql-reference/statements/alter/column sidebar_position: 37 sidebar_label: "Манипуляции со столбцами" --- diff --git a/docs/ru/sql-reference/statements/alter/comment.md b/docs/ru/sql-reference/statements/alter/comment.md index 6d2edcfd5a7..727af15d03e 100644 --- a/docs/ru/sql-reference/statements/alter/comment.md +++ b/docs/ru/sql-reference/statements/alter/comment.md @@ -1,4 +1,5 @@ --- +slug: /ru/sql-reference/statements/alter/comment sidebar_position: 51 sidebar_label: COMMENT --- diff --git a/docs/ru/sql-reference/statements/alter/constraint.md b/docs/ru/sql-reference/statements/alter/constraint.md index b24507de0b8..cb380bc6a60 100644 --- a/docs/ru/sql-reference/statements/alter/constraint.md +++ b/docs/ru/sql-reference/statements/alter/constraint.md @@ -1,4 +1,5 @@ --- +slug: /ru/sql-reference/statements/alter/constraint sidebar_position: 43 sidebar_label: "Манипуляции с ограничениями" --- diff --git a/docs/ru/sql-reference/statements/alter/delete.md b/docs/ru/sql-reference/statements/alter/delete.md index 40ba8e634e9..e01470f4939 100644 --- a/docs/ru/sql-reference/statements/alter/delete.md +++ b/docs/ru/sql-reference/statements/alter/delete.md @@ -1,4 +1,5 @@ --- +slug: /ru/sql-reference/statements/alter/delete sidebar_position: 39 sidebar_label: DELETE --- diff --git a/docs/ru/sql-reference/statements/alter/index.md b/docs/ru/sql-reference/statements/alter/index.md index 0191c794e9c..b48c55c869e 100644 --- a/docs/ru/sql-reference/statements/alter/index.md +++ b/docs/ru/sql-reference/statements/alter/index.md @@ -1,4 +1,5 @@ --- +slug: /ru/sql-reference/statements/alter/ sidebar_position: 35 sidebar_label: ALTER --- diff --git a/docs/ru/sql-reference/statements/alter/index/index.md b/docs/ru/sql-reference/statements/alter/index/index.md index f73005b1bfe..64ba5f7df2c 100644 --- a/docs/ru/sql-reference/statements/alter/index/index.md +++ b/docs/ru/sql-reference/statements/alter/index/index.md @@ -1,4 +1,5 @@ --- +slug: /ru/sql-reference/statements/alter/ toc_hidden_folder: true sidebar_position: 42 sidebar_label: "Манипуляции с индексами" diff --git a/docs/ru/sql-reference/statements/alter/order-by.md b/docs/ru/sql-reference/statements/alter/order-by.md index be3ab0064ab..11f6954acc2 100644 --- a/docs/ru/sql-reference/statements/alter/order-by.md +++ b/docs/ru/sql-reference/statements/alter/order-by.md @@ -1,4 +1,5 @@ --- +slug: /ru/sql-reference/statements/alter/order-by sidebar_position: 41 sidebar_label: ORDER BY --- diff --git a/docs/ru/sql-reference/statements/alter/partition.md b/docs/ru/sql-reference/statements/alter/partition.md index aecf954a45a..95d02c062bd 100644 --- a/docs/ru/sql-reference/statements/alter/partition.md +++ b/docs/ru/sql-reference/statements/alter/partition.md @@ -1,4 +1,5 @@ --- +slug: /ru/sql-reference/statements/alter/partition sidebar_position: 38 sidebar_label: PARTITION --- diff --git a/docs/ru/sql-reference/statements/alter/projection.md b/docs/ru/sql-reference/statements/alter/projection.md index cba77f45e85..63b068611ec 100644 --- a/docs/ru/sql-reference/statements/alter/projection.md +++ b/docs/ru/sql-reference/statements/alter/projection.md @@ -1,4 +1,5 @@ --- +slug: /ru/sql-reference/statements/alter/projection sidebar_position: 49 sidebar_label: PROJECTION --- diff --git a/docs/ru/sql-reference/statements/alter/quota.md b/docs/ru/sql-reference/statements/alter/quota.md index 408b6ff5458..709baea6af0 100644 --- a/docs/ru/sql-reference/statements/alter/quota.md +++ b/docs/ru/sql-reference/statements/alter/quota.md @@ -1,4 +1,5 @@ --- +slug: /ru/sql-reference/statements/alter/quota sidebar_position: 46 sidebar_label: QUOTA --- diff --git a/docs/ru/sql-reference/statements/alter/role.md b/docs/ru/sql-reference/statements/alter/role.md index 311901a5574..a86ff780b8d 100644 --- a/docs/ru/sql-reference/statements/alter/role.md +++ b/docs/ru/sql-reference/statements/alter/role.md @@ -1,4 +1,5 @@ --- +slug: /ru/sql-reference/statements/alter/role sidebar_position: 46 sidebar_label: ROLE --- diff --git a/docs/ru/sql-reference/statements/alter/row-policy.md b/docs/ru/sql-reference/statements/alter/row-policy.md index e66e3d9f39f..1944d3c0d7d 100644 --- a/docs/ru/sql-reference/statements/alter/row-policy.md +++ b/docs/ru/sql-reference/statements/alter/row-policy.md @@ -1,4 +1,5 @@ --- +slug: /ru/sql-reference/statements/alter/row-policy sidebar_position: 47 sidebar_label: ROW POLICY --- diff --git a/docs/ru/sql-reference/statements/alter/sample-by.md b/docs/ru/sql-reference/statements/alter/sample-by.md index ea07deddfc1..ca3cb93d12b 100644 --- a/docs/ru/sql-reference/statements/alter/sample-by.md +++ b/docs/ru/sql-reference/statements/alter/sample-by.md @@ -1,4 +1,5 @@ --- +slug: /ru/sql-reference/statements/alter/sample-by sidebar_position: 41 sidebar_label: SAMPLE BY --- diff --git a/docs/ru/sql-reference/statements/alter/setting.md b/docs/ru/sql-reference/statements/alter/setting.md index 8744314f8b4..5eba971fae1 100644 --- a/docs/ru/sql-reference/statements/alter/setting.md +++ b/docs/ru/sql-reference/statements/alter/setting.md @@ -1,4 +1,5 @@ --- +slug: /ru/sql-reference/statements/alter/setting sidebar_position: 38 sidebar_label: SETTING --- diff --git a/docs/ru/sql-reference/statements/alter/settings-profile.md b/docs/ru/sql-reference/statements/alter/settings-profile.md index c5d0269d38d..ec1cd1f72e6 100644 --- a/docs/ru/sql-reference/statements/alter/settings-profile.md +++ b/docs/ru/sql-reference/statements/alter/settings-profile.md @@ -1,4 +1,5 @@ --- +slug: /ru/sql-reference/statements/alter/settings-profile sidebar_position: 48 sidebar_label: SETTINGS PROFILE --- diff --git a/docs/ru/sql-reference/statements/alter/ttl.md b/docs/ru/sql-reference/statements/alter/ttl.md index 54004b820c5..855a651ffc2 100644 --- a/docs/ru/sql-reference/statements/alter/ttl.md +++ b/docs/ru/sql-reference/statements/alter/ttl.md @@ -1,4 +1,5 @@ --- +slug: /ru/sql-reference/statements/alter/ttl sidebar_position: 44 sidebar_label: TTL --- diff --git a/docs/ru/sql-reference/statements/alter/update.md b/docs/ru/sql-reference/statements/alter/update.md index 23c6d1b65b0..2ff8ebff8d4 100644 --- a/docs/ru/sql-reference/statements/alter/update.md +++ b/docs/ru/sql-reference/statements/alter/update.md @@ -1,4 +1,5 @@ --- +slug: /ru/sql-reference/statements/alter/update sidebar_position: 40 sidebar_label: UPDATE --- diff --git a/docs/ru/sql-reference/statements/alter/user.md b/docs/ru/sql-reference/statements/alter/user.md index 398849de22d..1d6ad34ebed 100644 --- a/docs/ru/sql-reference/statements/alter/user.md +++ b/docs/ru/sql-reference/statements/alter/user.md @@ -1,4 +1,5 @@ --- +slug: /ru/sql-reference/statements/alter/user sidebar_position: 45 sidebar_label: USER --- diff --git a/docs/ru/sql-reference/statements/alter/view.md b/docs/ru/sql-reference/statements/alter/view.md index df2c5ba3f33..2d4823bba3a 100644 --- a/docs/ru/sql-reference/statements/alter/view.md +++ b/docs/ru/sql-reference/statements/alter/view.md @@ -1,4 +1,5 @@ --- +slug: /ru/sql-reference/statements/alter/view sidebar_position: 50 sidebar_label: VIEW --- diff --git a/docs/ru/sql-reference/statements/attach.md b/docs/ru/sql-reference/statements/attach.md index dbfd6317c4d..4d06b79f0cd 100644 --- a/docs/ru/sql-reference/statements/attach.md +++ b/docs/ru/sql-reference/statements/attach.md @@ -1,4 +1,5 @@ --- +slug: /ru/sql-reference/statements/attach sidebar_position: 40 sidebar_label: ATTACH --- diff --git a/docs/ru/sql-reference/statements/check-table.md b/docs/ru/sql-reference/statements/check-table.md index b874b4fc325..77d246b631e 100644 --- a/docs/ru/sql-reference/statements/check-table.md +++ b/docs/ru/sql-reference/statements/check-table.md @@ -1,4 +1,5 @@ --- +slug: /ru/sql-reference/statements/check-table sidebar_position: 41 sidebar_label: CHECK --- diff --git a/docs/ru/sql-reference/statements/create/database.md b/docs/ru/sql-reference/statements/create/database.md index c5bbab43cc0..ae9a119e537 100644 --- a/docs/ru/sql-reference/statements/create/database.md +++ b/docs/ru/sql-reference/statements/create/database.md @@ -1,4 +1,5 @@ --- +slug: /ru/sql-reference/statements/create/database sidebar_position: 35 sidebar_label: "База данных" --- diff --git a/docs/ru/sql-reference/statements/create/dictionary.md b/docs/ru/sql-reference/statements/create/dictionary.md index 8829417adb2..e228c4fad4a 100644 --- a/docs/ru/sql-reference/statements/create/dictionary.md +++ b/docs/ru/sql-reference/statements/create/dictionary.md @@ -1,4 +1,5 @@ --- +slug: /ru/sql-reference/statements/create/dictionary sidebar_position: 38 sidebar_label: "Словарь" --- diff --git a/docs/ru/sql-reference/statements/create/function.md b/docs/ru/sql-reference/statements/create/function.md index 9be15272893..dd5e27be751 100644 --- a/docs/ru/sql-reference/statements/create/function.md +++ b/docs/ru/sql-reference/statements/create/function.md @@ -1,4 +1,5 @@ --- +slug: /ru/sql-reference/statements/create/function sidebar_position: 38 sidebar_label: FUNCTION --- diff --git a/docs/ru/sql-reference/statements/create/index.md b/docs/ru/sql-reference/statements/create/index.md index 7d76c747750..492a70ec06d 100644 --- a/docs/ru/sql-reference/statements/create/index.md +++ b/docs/ru/sql-reference/statements/create/index.md @@ -1,4 +1,5 @@ --- +slug: /ru/sql-reference/statements/create/ sidebar_label: CREATE sidebar_position: 34 --- diff --git a/docs/ru/sql-reference/statements/create/quota.md b/docs/ru/sql-reference/statements/create/quota.md index a38281f5f13..18eba6b5b1a 100644 --- a/docs/ru/sql-reference/statements/create/quota.md +++ b/docs/ru/sql-reference/statements/create/quota.md @@ -1,4 +1,5 @@ --- +slug: /ru/sql-reference/statements/create/quota sidebar_position: 42 sidebar_label: "Квота" --- diff --git a/docs/ru/sql-reference/statements/create/role.md b/docs/ru/sql-reference/statements/create/role.md index de0e2424077..9e06ad1914e 100644 --- a/docs/ru/sql-reference/statements/create/role.md +++ b/docs/ru/sql-reference/statements/create/role.md @@ -1,4 +1,5 @@ --- +slug: /ru/sql-reference/statements/create/role sidebar_position: 40 sidebar_label: "Роль" --- diff --git a/docs/ru/sql-reference/statements/create/row-policy.md b/docs/ru/sql-reference/statements/create/row-policy.md index c1d4f6c95ea..aab2091344d 100644 --- a/docs/ru/sql-reference/statements/create/row-policy.md +++ b/docs/ru/sql-reference/statements/create/row-policy.md @@ -1,4 +1,5 @@ --- +slug: /ru/sql-reference/statements/create/row-policy sidebar_position: 41 sidebar_label: "Политика доступа" --- diff --git a/docs/ru/sql-reference/statements/create/settings-profile.md b/docs/ru/sql-reference/statements/create/settings-profile.md index 2497645bf00..d85b2aadeda 100644 --- a/docs/ru/sql-reference/statements/create/settings-profile.md +++ b/docs/ru/sql-reference/statements/create/settings-profile.md @@ -1,4 +1,5 @@ --- +slug: /ru/sql-reference/statements/create/settings-profile sidebar_position: 43 sidebar_label: "Профиль настроек" --- diff --git a/docs/ru/sql-reference/statements/create/table.md b/docs/ru/sql-reference/statements/create/table.md index c17f0af1b2c..c7ee2a62d98 100644 --- a/docs/ru/sql-reference/statements/create/table.md +++ b/docs/ru/sql-reference/statements/create/table.md @@ -1,4 +1,5 @@ --- +slug: /ru/sql-reference/statements/create/table sidebar_position: 36 sidebar_label: "Таблица" --- diff --git a/docs/ru/sql-reference/statements/create/user.md b/docs/ru/sql-reference/statements/create/user.md index 683e56a61b3..c0c50174d78 100644 --- a/docs/ru/sql-reference/statements/create/user.md +++ b/docs/ru/sql-reference/statements/create/user.md @@ -1,4 +1,5 @@ --- +slug: /ru/sql-reference/statements/create/user sidebar_position: 39 sidebar_label: "Пользователь" --- diff --git a/docs/ru/sql-reference/statements/create/view.md b/docs/ru/sql-reference/statements/create/view.md index a0193cea21c..a317bfdb596 100644 --- a/docs/ru/sql-reference/statements/create/view.md +++ b/docs/ru/sql-reference/statements/create/view.md @@ -1,4 +1,5 @@ --- +slug: /ru/sql-reference/statements/create/view sidebar_position: 37 sidebar_label: "Представление" --- diff --git a/docs/ru/sql-reference/statements/describe-table.md b/docs/ru/sql-reference/statements/describe-table.md index 3e93a0d0f7a..73b4278352a 100644 --- a/docs/ru/sql-reference/statements/describe-table.md +++ b/docs/ru/sql-reference/statements/describe-table.md @@ -1,4 +1,5 @@ --- +slug: /ru/sql-reference/statements/describe-table sidebar_position: 42 sidebar_label: DESCRIBE --- diff --git a/docs/ru/sql-reference/statements/detach.md b/docs/ru/sql-reference/statements/detach.md index d09ace0b20e..221a8a17174 100644 --- a/docs/ru/sql-reference/statements/detach.md +++ b/docs/ru/sql-reference/statements/detach.md @@ -1,4 +1,5 @@ --- +slug: /ru/sql-reference/statements/detach sidebar_position: 43 sidebar_label: DETACH --- diff --git a/docs/ru/sql-reference/statements/drop.md b/docs/ru/sql-reference/statements/drop.md index 645475e546c..7c40f525f0b 100644 --- a/docs/ru/sql-reference/statements/drop.md +++ b/docs/ru/sql-reference/statements/drop.md @@ -1,4 +1,5 @@ --- +slug: /ru/sql-reference/statements/drop sidebar_position: 44 sidebar_label: DROP --- diff --git a/docs/ru/sql-reference/statements/exchange.md b/docs/ru/sql-reference/statements/exchange.md index c8c97a1daa4..2c872791afd 100644 --- a/docs/ru/sql-reference/statements/exchange.md +++ b/docs/ru/sql-reference/statements/exchange.md @@ -1,4 +1,5 @@ --- +slug: /ru/sql-reference/statements/exchange sidebar_position: 49 sidebar_label: EXCHANGE --- diff --git a/docs/ru/sql-reference/statements/exists.md b/docs/ru/sql-reference/statements/exists.md index 18fc26b7331..3bec69c351a 100644 --- a/docs/ru/sql-reference/statements/exists.md +++ b/docs/ru/sql-reference/statements/exists.md @@ -1,4 +1,5 @@ --- +slug: /ru/sql-reference/statements/exists sidebar_position: 45 sidebar_label: EXISTS --- diff --git a/docs/ru/sql-reference/statements/explain.md b/docs/ru/sql-reference/statements/explain.md index 0a6e6b03f11..0e9edd4223e 100644 --- a/docs/ru/sql-reference/statements/explain.md +++ b/docs/ru/sql-reference/statements/explain.md @@ -1,4 +1,5 @@ --- +slug: /ru/sql-reference/statements/explain sidebar_position: 39 sidebar_label: EXPLAIN --- diff --git a/docs/ru/sql-reference/statements/grant.md b/docs/ru/sql-reference/statements/grant.md index f1777ae6a04..79e3006d4ad 100644 --- a/docs/ru/sql-reference/statements/grant.md +++ b/docs/ru/sql-reference/statements/grant.md @@ -1,4 +1,5 @@ --- +slug: /ru/sql-reference/statements/grant sidebar_position: 38 sidebar_label: GRANT --- diff --git a/docs/ru/sql-reference/statements/index.md b/docs/ru/sql-reference/statements/index.md index a3b5db11745..f6df8695f69 100644 --- a/docs/ru/sql-reference/statements/index.md +++ b/docs/ru/sql-reference/statements/index.md @@ -1,4 +1,5 @@ --- +slug: /ru/sql-reference/statements/ sidebar_label: "Выражения" sidebar_position: 31 --- diff --git a/docs/ru/sql-reference/statements/insert-into.md b/docs/ru/sql-reference/statements/insert-into.md index 936580aaead..573b8d39926 100644 --- a/docs/ru/sql-reference/statements/insert-into.md +++ b/docs/ru/sql-reference/statements/insert-into.md @@ -1,4 +1,5 @@ --- +slug: /ru/sql-reference/statements/insert-into sidebar_position: 33 sidebar_label: INSERT INTO --- diff --git a/docs/ru/sql-reference/statements/kill.md b/docs/ru/sql-reference/statements/kill.md index e536ee40c1f..6af6a75c613 100644 --- a/docs/ru/sql-reference/statements/kill.md +++ b/docs/ru/sql-reference/statements/kill.md @@ -1,4 +1,5 @@ --- +slug: /ru/sql-reference/statements/kill sidebar_position: 46 sidebar_label: KILL --- diff --git a/docs/ru/sql-reference/statements/misc.md b/docs/ru/sql-reference/statements/misc.md index cee3974822c..437215f20ce 100644 --- a/docs/ru/sql-reference/statements/misc.md +++ b/docs/ru/sql-reference/statements/misc.md @@ -1,4 +1,5 @@ --- +slug: /ru/sql-reference/statements/misc sidebar_position: 41 --- diff --git a/docs/ru/sql-reference/statements/optimize.md b/docs/ru/sql-reference/statements/optimize.md index 61480a0c1ab..0ded60b8796 100644 --- a/docs/ru/sql-reference/statements/optimize.md +++ b/docs/ru/sql-reference/statements/optimize.md @@ -1,4 +1,5 @@ --- +slug: /ru/sql-reference/statements/optimize sidebar_position: 47 sidebar_label: OPTIMIZE --- diff --git a/docs/ru/sql-reference/statements/rename.md b/docs/ru/sql-reference/statements/rename.md index 391b96e073b..6575dae9642 100644 --- a/docs/ru/sql-reference/statements/rename.md +++ b/docs/ru/sql-reference/statements/rename.md @@ -1,4 +1,5 @@ --- +slug: /ru/sql-reference/statements/rename sidebar_position: 48 sidebar_label: RENAME --- diff --git a/docs/ru/sql-reference/statements/revoke.md b/docs/ru/sql-reference/statements/revoke.md index 15064a733eb..a5e7dbb7a14 100644 --- a/docs/ru/sql-reference/statements/revoke.md +++ b/docs/ru/sql-reference/statements/revoke.md @@ -1,4 +1,5 @@ --- +slug: /ru/sql-reference/statements/revoke sidebar_position: 39 sidebar_label: REVOKE --- diff --git a/docs/ru/sql-reference/statements/select/all.md b/docs/ru/sql-reference/statements/select/all.md index d0bf6de8ba9..8e0a289e062 100644 --- a/docs/ru/sql-reference/statements/select/all.md +++ b/docs/ru/sql-reference/statements/select/all.md @@ -1,4 +1,5 @@ --- +slug: /ru/sql-reference/statements/select/all sidebar_label: ALL --- diff --git a/docs/ru/sql-reference/statements/select/array-join.md b/docs/ru/sql-reference/statements/select/array-join.md index db46c53720e..9d2dbf54a2b 100644 --- a/docs/ru/sql-reference/statements/select/array-join.md +++ b/docs/ru/sql-reference/statements/select/array-join.md @@ -1,4 +1,5 @@ --- +slug: /ru/sql-reference/statements/select/array-join sidebar_label: ARRAY JOIN --- diff --git a/docs/ru/sql-reference/statements/select/distinct.md b/docs/ru/sql-reference/statements/select/distinct.md index 9cf3f842a07..125231d733d 100644 --- a/docs/ru/sql-reference/statements/select/distinct.md +++ b/docs/ru/sql-reference/statements/select/distinct.md @@ -1,4 +1,5 @@ --- +slug: /ru/sql-reference/statements/select/distinct sidebar_label: DISTINCT --- diff --git a/docs/ru/sql-reference/statements/select/except.md b/docs/ru/sql-reference/statements/select/except.md index 5f6c7bc546c..1332869b73b 100644 --- a/docs/ru/sql-reference/statements/select/except.md +++ b/docs/ru/sql-reference/statements/select/except.md @@ -1,4 +1,5 @@ --- +slug: /ru/sql-reference/statements/select/except sidebar_label: EXCEPT --- diff --git a/docs/ru/sql-reference/statements/select/format.md b/docs/ru/sql-reference/statements/select/format.md index b2982ebbce2..4facafe387e 100644 --- a/docs/ru/sql-reference/statements/select/format.md +++ b/docs/ru/sql-reference/statements/select/format.md @@ -1,4 +1,5 @@ --- +slug: /ru/sql-reference/statements/select/format sidebar_label: FORMAT --- diff --git a/docs/ru/sql-reference/statements/select/from.md b/docs/ru/sql-reference/statements/select/from.md index 2c7214d3cbe..a7294d772d0 100644 --- a/docs/ru/sql-reference/statements/select/from.md +++ b/docs/ru/sql-reference/statements/select/from.md @@ -1,4 +1,5 @@ --- +slug: /ru/sql-reference/statements/select/from sidebar_label: FROM --- diff --git a/docs/ru/sql-reference/statements/select/group-by.md b/docs/ru/sql-reference/statements/select/group-by.md index 01df1b969bf..450543cb7a2 100644 --- a/docs/ru/sql-reference/statements/select/group-by.md +++ b/docs/ru/sql-reference/statements/select/group-by.md @@ -1,4 +1,5 @@ --- +slug: /ru/sql-reference/statements/select/group-by sidebar_label: GROUP BY --- diff --git a/docs/ru/sql-reference/statements/select/having.md b/docs/ru/sql-reference/statements/select/having.md index 245c65da610..81efcfa60a4 100644 --- a/docs/ru/sql-reference/statements/select/having.md +++ b/docs/ru/sql-reference/statements/select/having.md @@ -1,4 +1,5 @@ --- +slug: /ru/sql-reference/statements/select/having sidebar_label: HAVING --- diff --git a/docs/ru/sql-reference/statements/select/index.md b/docs/ru/sql-reference/statements/select/index.md index 1edf93faeaa..4479e24000b 100644 --- a/docs/ru/sql-reference/statements/select/index.md +++ b/docs/ru/sql-reference/statements/select/index.md @@ -1,4 +1,5 @@ --- +slug: /ru/sql-reference/statements/select/ title: "Синтаксис запросов SELECT" sidebar_label: SELECT sidebar_position: 32 diff --git a/docs/ru/sql-reference/statements/select/intersect.md b/docs/ru/sql-reference/statements/select/intersect.md index fe55ab931bb..4f526a551ad 100644 --- a/docs/ru/sql-reference/statements/select/intersect.md +++ b/docs/ru/sql-reference/statements/select/intersect.md @@ -1,4 +1,5 @@ --- +slug: /ru/sql-reference/statements/select/intersect sidebar_label: INTERSECT --- diff --git a/docs/ru/sql-reference/statements/select/into-outfile.md b/docs/ru/sql-reference/statements/select/into-outfile.md index 81d48badbe6..3f3ae1a0e2d 100644 --- a/docs/ru/sql-reference/statements/select/into-outfile.md +++ b/docs/ru/sql-reference/statements/select/into-outfile.md @@ -1,4 +1,5 @@ --- +slug: /ru/sql-reference/statements/select/into-outfile sidebar_label: INTO OUTFILE --- diff --git a/docs/ru/sql-reference/statements/select/join.md b/docs/ru/sql-reference/statements/select/join.md index 03ea3a5309b..6853a8c8c90 100644 --- a/docs/ru/sql-reference/statements/select/join.md +++ b/docs/ru/sql-reference/statements/select/join.md @@ -1,4 +1,5 @@ --- +slug: /ru/sql-reference/statements/select/join sidebar_label: JOIN --- diff --git a/docs/ru/sql-reference/statements/select/limit-by.md b/docs/ru/sql-reference/statements/select/limit-by.md index 3564287d19d..c7b9731f5d3 100644 --- a/docs/ru/sql-reference/statements/select/limit-by.md +++ b/docs/ru/sql-reference/statements/select/limit-by.md @@ -1,4 +1,5 @@ --- +slug: /ru/sql-reference/statements/select/limit-by sidebar_label: LIMIT BY --- diff --git a/docs/ru/sql-reference/statements/select/limit.md b/docs/ru/sql-reference/statements/select/limit.md index 35348209ec8..73daa76fafa 100644 --- a/docs/ru/sql-reference/statements/select/limit.md +++ b/docs/ru/sql-reference/statements/select/limit.md @@ -1,4 +1,5 @@ --- +slug: /ru/sql-reference/statements/select/limit sidebar_label: LIMIT --- diff --git a/docs/ru/sql-reference/statements/select/offset.md b/docs/ru/sql-reference/statements/select/offset.md index 5261d8e9869..fac995b9a8e 100644 --- a/docs/ru/sql-reference/statements/select/offset.md +++ b/docs/ru/sql-reference/statements/select/offset.md @@ -1,4 +1,5 @@ --- +slug: /ru/sql-reference/statements/select/offset sidebar_label: OFFSET --- diff --git a/docs/ru/sql-reference/statements/select/order-by.md b/docs/ru/sql-reference/statements/select/order-by.md index efe1b6f0b22..40efb862250 100644 --- a/docs/ru/sql-reference/statements/select/order-by.md +++ b/docs/ru/sql-reference/statements/select/order-by.md @@ -1,4 +1,5 @@ --- +slug: /ru/sql-reference/statements/select/order-by sidebar_label: ORDER BY --- diff --git a/docs/ru/sql-reference/statements/select/prewhere.md b/docs/ru/sql-reference/statements/select/prewhere.md index 26950cbe596..e37aec814ec 100644 --- a/docs/ru/sql-reference/statements/select/prewhere.md +++ b/docs/ru/sql-reference/statements/select/prewhere.md @@ -1,4 +1,5 @@ --- +slug: /ru/sql-reference/statements/select/prewhere sidebar_label: PREWHERE --- diff --git a/docs/ru/sql-reference/statements/select/sample.md b/docs/ru/sql-reference/statements/select/sample.md index f6e361bb744..e63479d6c27 100644 --- a/docs/ru/sql-reference/statements/select/sample.md +++ b/docs/ru/sql-reference/statements/select/sample.md @@ -1,4 +1,5 @@ --- +slug: /ru/sql-reference/statements/select/sample sidebar_label: SAMPLE --- diff --git a/docs/ru/sql-reference/statements/select/union.md b/docs/ru/sql-reference/statements/select/union.md index c3719c89214..4dad63c2c7c 100644 --- a/docs/ru/sql-reference/statements/select/union.md +++ b/docs/ru/sql-reference/statements/select/union.md @@ -1,4 +1,5 @@ --- +slug: /ru/sql-reference/statements/select/union sidebar_label: UNION --- diff --git a/docs/ru/sql-reference/statements/select/where.md b/docs/ru/sql-reference/statements/select/where.md index d457ca2aed8..f63ea121d4a 100644 --- a/docs/ru/sql-reference/statements/select/where.md +++ b/docs/ru/sql-reference/statements/select/where.md @@ -1,4 +1,5 @@ --- +slug: /ru/sql-reference/statements/select/where sidebar_label: WHERE --- diff --git a/docs/ru/sql-reference/statements/select/with.md b/docs/ru/sql-reference/statements/select/with.md index 3784c43fde3..e51c0ed3c47 100644 --- a/docs/ru/sql-reference/statements/select/with.md +++ b/docs/ru/sql-reference/statements/select/with.md @@ -1,4 +1,5 @@ --- +slug: /ru/sql-reference/statements/select/with sidebar_label: WITH --- diff --git a/docs/ru/sql-reference/statements/set-role.md b/docs/ru/sql-reference/statements/set-role.md index cd62206df13..7aeedfa5c68 100644 --- a/docs/ru/sql-reference/statements/set-role.md +++ b/docs/ru/sql-reference/statements/set-role.md @@ -1,4 +1,5 @@ --- +slug: /ru/sql-reference/statements/set-role sidebar_position: 50 sidebar_label: SET ROLE --- diff --git a/docs/ru/sql-reference/statements/set.md b/docs/ru/sql-reference/statements/set.md index 3b31e5ed503..c9aac8438b4 100644 --- a/docs/ru/sql-reference/statements/set.md +++ b/docs/ru/sql-reference/statements/set.md @@ -1,4 +1,5 @@ --- +slug: /ru/sql-reference/statements/set sidebar_position: 49 sidebar_label: SET --- diff --git a/docs/ru/sql-reference/statements/show.md b/docs/ru/sql-reference/statements/show.md index 3e7560b0882..1d072c9d5de 100644 --- a/docs/ru/sql-reference/statements/show.md +++ b/docs/ru/sql-reference/statements/show.md @@ -1,4 +1,5 @@ --- +slug: /ru/sql-reference/statements/show sidebar_position: 37 sidebar_label: SHOW --- diff --git a/docs/ru/sql-reference/statements/system.md b/docs/ru/sql-reference/statements/system.md index 2f5c9f59e22..c1dc03a63d1 100644 --- a/docs/ru/sql-reference/statements/system.md +++ b/docs/ru/sql-reference/statements/system.md @@ -1,4 +1,5 @@ --- +slug: /ru/sql-reference/statements/system sidebar_position: 36 sidebar_label: SYSTEM --- diff --git a/docs/ru/sql-reference/statements/truncate.md b/docs/ru/sql-reference/statements/truncate.md index cac6c261f21..7f588349184 100644 --- a/docs/ru/sql-reference/statements/truncate.md +++ b/docs/ru/sql-reference/statements/truncate.md @@ -1,4 +1,5 @@ --- +slug: /ru/sql-reference/statements/truncate sidebar_position: 51 sidebar_label: TRUNCATE --- diff --git a/docs/ru/sql-reference/statements/use.md b/docs/ru/sql-reference/statements/use.md index f4715b6c7a2..399439e2c88 100644 --- a/docs/ru/sql-reference/statements/use.md +++ b/docs/ru/sql-reference/statements/use.md @@ -1,4 +1,5 @@ --- +slug: /ru/sql-reference/statements/use sidebar_position: 52 sidebar_label: USE --- diff --git a/docs/ru/sql-reference/statements/watch.md b/docs/ru/sql-reference/statements/watch.md index f925e25b0d5..3a4bfb7dd00 100644 --- a/docs/ru/sql-reference/statements/watch.md +++ b/docs/ru/sql-reference/statements/watch.md @@ -1,4 +1,5 @@ --- +slug: /ru/sql-reference/statements/watch sidebar_position: 53 sidebar_label: WATCH --- diff --git a/docs/ru/sql-reference/syntax.md b/docs/ru/sql-reference/syntax.md index 17eb0165a4e..cf4b0f3714f 100644 --- a/docs/ru/sql-reference/syntax.md +++ b/docs/ru/sql-reference/syntax.md @@ -1,4 +1,5 @@ --- +slug: /ru/sql-reference/syntax sidebar_position: 31 sidebar_label: "Синтаксис" --- diff --git a/docs/ru/sql-reference/table-functions/cluster.md b/docs/ru/sql-reference/table-functions/cluster.md index 1ba729696a6..a831c280ec4 100644 --- a/docs/ru/sql-reference/table-functions/cluster.md +++ b/docs/ru/sql-reference/table-functions/cluster.md @@ -1,4 +1,5 @@ --- +slug: /ru/sql-reference/table-functions/cluster sidebar_position: 50 sidebar_label: cluster --- diff --git a/docs/ru/sql-reference/table-functions/dictionary.md b/docs/ru/sql-reference/table-functions/dictionary.md index d7e714538d2..72971f07ad3 100644 --- a/docs/ru/sql-reference/table-functions/dictionary.md +++ b/docs/ru/sql-reference/table-functions/dictionary.md @@ -1,4 +1,5 @@ --- +slug: /ru/sql-reference/table-functions/dictionary sidebar_position: 54 sidebar_label: dictionary --- diff --git a/docs/ru/sql-reference/table-functions/file.md b/docs/ru/sql-reference/table-functions/file.md index 318036ea1b5..1f262c9403a 100644 --- a/docs/ru/sql-reference/table-functions/file.md +++ b/docs/ru/sql-reference/table-functions/file.md @@ -1,4 +1,5 @@ --- +slug: /ru/sql-reference/table-functions/file sidebar_position: 37 sidebar_label: file --- diff --git a/docs/ru/sql-reference/table-functions/generate.md b/docs/ru/sql-reference/table-functions/generate.md index 266ca1bb8a6..3ebe4dcb988 100644 --- a/docs/ru/sql-reference/table-functions/generate.md +++ b/docs/ru/sql-reference/table-functions/generate.md @@ -1,4 +1,5 @@ --- +slug: /ru/sql-reference/table-functions/generate sidebar_position: 47 sidebar_label: generateRandom --- diff --git a/docs/ru/sql-reference/table-functions/hdfs.md b/docs/ru/sql-reference/table-functions/hdfs.md index 0048624201c..a1a65fa1a28 100644 --- a/docs/ru/sql-reference/table-functions/hdfs.md +++ b/docs/ru/sql-reference/table-functions/hdfs.md @@ -1,4 +1,5 @@ --- +slug: /ru/sql-reference/table-functions/hdfs sidebar_position: 45 sidebar_label: hdfs --- diff --git a/docs/ru/sql-reference/table-functions/index.md b/docs/ru/sql-reference/table-functions/index.md index c1ac3587af8..2ff033b57c2 100644 --- a/docs/ru/sql-reference/table-functions/index.md +++ b/docs/ru/sql-reference/table-functions/index.md @@ -1,4 +1,5 @@ --- +slug: /ru/sql-reference/table-functions/ sidebar_label: "Табличные функции" sidebar_position: 34 --- diff --git a/docs/ru/sql-reference/table-functions/input.md b/docs/ru/sql-reference/table-functions/input.md index 3520211ed96..224030fdbae 100644 --- a/docs/ru/sql-reference/table-functions/input.md +++ b/docs/ru/sql-reference/table-functions/input.md @@ -1,4 +1,5 @@ --- +slug: /ru/sql-reference/table-functions/input sidebar_position: 46 sidebar_label: input --- diff --git a/docs/ru/sql-reference/table-functions/jdbc.md b/docs/ru/sql-reference/table-functions/jdbc.md index e450878dc35..f583e694220 100644 --- a/docs/ru/sql-reference/table-functions/jdbc.md +++ b/docs/ru/sql-reference/table-functions/jdbc.md @@ -1,4 +1,5 @@ --- +slug: /ru/sql-reference/table-functions/jdbc sidebar_position: 43 sidebar_label: jdbc --- diff --git a/docs/ru/sql-reference/table-functions/merge.md b/docs/ru/sql-reference/table-functions/merge.md index 0c8a3c24f8a..b931a99a90d 100644 --- a/docs/ru/sql-reference/table-functions/merge.md +++ b/docs/ru/sql-reference/table-functions/merge.md @@ -1,4 +1,5 @@ --- +slug: /ru/sql-reference/table-functions/merge sidebar_position: 38 sidebar_label: merge --- diff --git a/docs/ru/sql-reference/table-functions/mysql.md b/docs/ru/sql-reference/table-functions/mysql.md index e238839b937..dee2b92b2fc 100644 --- a/docs/ru/sql-reference/table-functions/mysql.md +++ b/docs/ru/sql-reference/table-functions/mysql.md @@ -1,4 +1,5 @@ --- +slug: /ru/sql-reference/table-functions/mysql sidebar_position: 42 sidebar_label: mysql --- diff --git a/docs/ru/sql-reference/table-functions/null.md b/docs/ru/sql-reference/table-functions/null.md index 94d1785f77a..b33a9bc5a06 100644 --- a/docs/ru/sql-reference/table-functions/null.md +++ b/docs/ru/sql-reference/table-functions/null.md @@ -1,4 +1,5 @@ --- +slug: /ru/sql-reference/table-functions/null sidebar_position: 53 sidebar_label: null функция --- diff --git a/docs/ru/sql-reference/table-functions/numbers.md b/docs/ru/sql-reference/table-functions/numbers.md index 99c46f382fa..cfd3f63f1ba 100644 --- a/docs/ru/sql-reference/table-functions/numbers.md +++ b/docs/ru/sql-reference/table-functions/numbers.md @@ -1,4 +1,5 @@ --- +slug: /ru/sql-reference/table-functions/numbers sidebar_position: 39 sidebar_label: numbers --- diff --git a/docs/ru/sql-reference/table-functions/odbc.md b/docs/ru/sql-reference/table-functions/odbc.md index 7dc1b5e5360..3a0ea3ffbca 100644 --- a/docs/ru/sql-reference/table-functions/odbc.md +++ b/docs/ru/sql-reference/table-functions/odbc.md @@ -1,4 +1,5 @@ --- +slug: /ru/sql-reference/table-functions/odbc sidebar_position: 44 sidebar_label: odbc --- diff --git a/docs/ru/sql-reference/table-functions/postgresql.md b/docs/ru/sql-reference/table-functions/postgresql.md index 2e1e5314f91..abac2c287dd 100644 --- a/docs/ru/sql-reference/table-functions/postgresql.md +++ b/docs/ru/sql-reference/table-functions/postgresql.md @@ -1,4 +1,5 @@ --- +slug: /ru/sql-reference/table-functions/postgresql sidebar_position: 42 sidebar_label: postgresql --- diff --git a/docs/ru/sql-reference/table-functions/remote.md b/docs/ru/sql-reference/table-functions/remote.md index fb78db83eed..e65a339e784 100644 --- a/docs/ru/sql-reference/table-functions/remote.md +++ b/docs/ru/sql-reference/table-functions/remote.md @@ -1,4 +1,5 @@ --- +slug: /ru/sql-reference/table-functions/remote sidebar_position: 40 sidebar_label: remote --- diff --git a/docs/ru/sql-reference/table-functions/s3.md b/docs/ru/sql-reference/table-functions/s3.md index 8bfbeb2eca4..ae0419a4b84 100644 --- a/docs/ru/sql-reference/table-functions/s3.md +++ b/docs/ru/sql-reference/table-functions/s3.md @@ -1,4 +1,5 @@ --- +slug: /ru/sql-reference/table-functions/s3 sidebar_position: 45 sidebar_label: s3 --- diff --git a/docs/ru/sql-reference/table-functions/s3Cluster.md b/docs/ru/sql-reference/table-functions/s3Cluster.md index e42ab1a5168..e6b317253c0 100644 --- a/docs/ru/sql-reference/table-functions/s3Cluster.md +++ b/docs/ru/sql-reference/table-functions/s3Cluster.md @@ -1,4 +1,5 @@ --- +slug: /ru/sql-reference/table-functions/s3Cluster sidebar_position: 55 sidebar_label: s3Cluster --- diff --git a/docs/ru/sql-reference/table-functions/sqlite.md b/docs/ru/sql-reference/table-functions/sqlite.md index 268f2a906b4..aa5ae1e390c 100644 --- a/docs/ru/sql-reference/table-functions/sqlite.md +++ b/docs/ru/sql-reference/table-functions/sqlite.md @@ -1,4 +1,5 @@ --- +slug: /ru/sql-reference/table-functions/sqlite sidebar_position: 55 sidebar_label: sqlite --- diff --git a/docs/ru/sql-reference/table-functions/url.md b/docs/ru/sql-reference/table-functions/url.md index f077c7c64ef..d4fb11b0de7 100644 --- a/docs/ru/sql-reference/table-functions/url.md +++ b/docs/ru/sql-reference/table-functions/url.md @@ -1,4 +1,5 @@ --- +slug: /ru/sql-reference/table-functions/url sidebar_position: 41 sidebar_label: url --- diff --git a/docs/ru/sql-reference/table-functions/view.md b/docs/ru/sql-reference/table-functions/view.md index 498e76917ab..f02a03a8569 100644 --- a/docs/ru/sql-reference/table-functions/view.md +++ b/docs/ru/sql-reference/table-functions/view.md @@ -1,3 +1,6 @@ +--- +slug: /ru/sql-reference/table-functions/view +--- ## view {#view} Преобразовывает подзапрос в таблицу. Функция реализовывает представления (смотрите [CREATE VIEW](https://clickhouse.com/docs/ru/sql-reference/statements/create/view/#create-view)). Результирующая таблица не хранит данные, а только сохраняет указанный запрос `SELECT`. При чтении из таблицы, ClickHouse выполняет запрос и удаляет все ненужные столбцы из результата. diff --git a/docs/ru/sql-reference/window-functions/index.md b/docs/ru/sql-reference/window-functions/index.md deleted file mode 120000 index 206505bf0eb..00000000000 --- a/docs/ru/sql-reference/window-functions/index.md +++ /dev/null @@ -1 +0,0 @@ -../../../en/sql-reference/window-functions/index.md \ No newline at end of file diff --git a/docs/ru/sql-reference/window-functions/index.mdx b/docs/ru/sql-reference/window-functions/index.mdx new file mode 100644 index 00000000000..7f6025330f5 --- /dev/null +++ b/docs/ru/sql-reference/window-functions/index.mdx @@ -0,0 +1,10 @@ +--- +slug: /ru/sql-reference/window-functions/ +sidebar_position: 62 +sidebar_label: Window Functions +title: "Window Functions" +--- + +import Content from '@site/docs/en/sql-reference/window-functions/index.md'; + + diff --git a/docs/ru/whats-new/_category_.yml b/docs/ru/whats-new/_category_.yml new file mode 100644 index 00000000000..570ed7642a2 --- /dev/null +++ b/docs/ru/whats-new/_category_.yml @@ -0,0 +1,8 @@ +position: 70 +label: "What's New" +collapsible: true +collapsed: true +link: + type: generated-index + title: "What's New" + slug: /ru/whats-new/ diff --git a/docs/ru/whats-new/changelog/2017.mdx b/docs/ru/whats-new/changelog/2017.mdx new file mode 100644 index 00000000000..9d1c7220f3d --- /dev/null +++ b/docs/ru/whats-new/changelog/2017.mdx @@ -0,0 +1,10 @@ +--- +slug: /ru/whats-new/changelog/2017 +sidebar_position: 6 +sidebar_label: 2017 +title: 2017 Changelog +--- + +import Changelog from '@site/docs/en/whats-new/changelog/2017.md'; + + diff --git a/docs/ru/whats-new/changelog/2018.mdx b/docs/ru/whats-new/changelog/2018.mdx new file mode 100644 index 00000000000..fb804afa293 --- /dev/null +++ b/docs/ru/whats-new/changelog/2018.mdx @@ -0,0 +1,10 @@ +--- +slug: /ru/whats-new/changelog/2018 +sidebar_position: 5 +sidebar_label: 2018 +title: 2018 Changelog +--- + +import Changelog from '@site/docs/en/whats-new/changelog/2018.md'; + + diff --git a/docs/ru/whats-new/changelog/2019.mdx b/docs/ru/whats-new/changelog/2019.mdx new file mode 100644 index 00000000000..caea40f6077 --- /dev/null +++ b/docs/ru/whats-new/changelog/2019.mdx @@ -0,0 +1,10 @@ +--- +slug: /ru/whats-new/changelog/2019 +sidebar_position: 4 +sidebar_label: 2019 +title: 2019 Changelog +--- + +import Changelog from '@site/docs/en/whats-new/changelog/2019.md'; + + diff --git a/docs/ru/whats-new/changelog/2020.mdx b/docs/ru/whats-new/changelog/2020.mdx new file mode 100644 index 00000000000..51943785a4a --- /dev/null +++ b/docs/ru/whats-new/changelog/2020.mdx @@ -0,0 +1,10 @@ +--- +slug: /ru/whats-new/changelog/2020 +sidebar_position: 3 +sidebar_label: 2020 +title: 2020 Changelog +--- + +import Changelog from '@site/docs/en/whats-new/changelog/2020.md'; + + diff --git a/docs/ru/whats-new/changelog/2021.mdx b/docs/ru/whats-new/changelog/2021.mdx new file mode 100644 index 00000000000..d517bad19e6 --- /dev/null +++ b/docs/ru/whats-new/changelog/2021.mdx @@ -0,0 +1,10 @@ +--- +slug: /ru/whats-new/changelog/2021 +sidebar_position: 2 +sidebar_label: 2021 +title: 2021 Changelog +--- + +import Changelog from '@site/docs/en/whats-new/changelog/2021.md'; + + diff --git a/docs/ru/whats-new/changelog/_category_.yml b/docs/ru/whats-new/changelog/_category_.yml new file mode 100644 index 00000000000..7a496eace66 --- /dev/null +++ b/docs/ru/whats-new/changelog/_category_.yml @@ -0,0 +1,6 @@ +label: 'Changelog' +collapsible: true +collapsed: true +link: + type: doc + id: ru/whats-new/changelog/index diff --git a/docs/ru/whats-new/changelog/index.mdx b/docs/ru/whats-new/changelog/index.mdx new file mode 100644 index 00000000000..311be3be313 --- /dev/null +++ b/docs/ru/whats-new/changelog/index.mdx @@ -0,0 +1,10 @@ +--- +sidebar_position: 1 +sidebar_label: 2022 +title: 2022 Changelog +slug: /ru/whats-new/changelog/index +--- + +import Changelog from '@site/docs/en/whats-new/changelog/index.md'; + + diff --git a/docs/ru/whats-new/index.md b/docs/ru/whats-new/index.md deleted file mode 100644 index 89b4ccc7f64..00000000000 --- a/docs/ru/whats-new/index.md +++ /dev/null @@ -1,8 +0,0 @@ ---- -sidebar_label: "Что нового?" -sidebar_position: 82 ---- - -# Что нового в ClickHouse? - -Планы развития вкратце изложены [здесь](https://github.com/ClickHouse/ClickHouse/issues/32513), а новости по предыдущим релизам подробно описаны в [журнале изменений](./changelog/). diff --git a/docs/ru/whats-new/roadmap.mdx b/docs/ru/whats-new/roadmap.mdx new file mode 100644 index 00000000000..a5f3538caf1 --- /dev/null +++ b/docs/ru/whats-new/roadmap.mdx @@ -0,0 +1,9 @@ +--- +slug: /ru/whats-new/roadmap +sidebar_label: Roadmap +title: Roadmap +--- + +import Roadmap from '@site/docs/en/whats-new/roadmap.md'; + + diff --git a/docs/ru/whats-new/security-changelog.md b/docs/ru/whats-new/security-changelog.md index abfdd48ea09..2b435890934 100644 --- a/docs/ru/whats-new/security-changelog.md +++ b/docs/ru/whats-new/security-changelog.md @@ -1,4 +1,5 @@ --- +slug: /ru/whats-new/security-changelog sidebar_position: 76 sidebar_label: Security Changelog --- diff --git a/docs/zh/changelog/index.md b/docs/zh/changelog/index.md index e1add299fde..7afcc07c6fb 100644 --- a/docs/zh/changelog/index.md +++ b/docs/zh/changelog/index.md @@ -1,4 +1,5 @@ --- +slug: /zh/changelog/ machine_translated: true machine_translated_rev: b111334d6614a02564cf32f379679e9ff970d9b1 sidebar_label: "\u53D8\u66F4\u65E5\u5FD7" diff --git a/docs/zh/commercial/cloud.md b/docs/zh/commercial/cloud.md index 7f039e403bb..59dba717ee6 100644 --- a/docs/zh/commercial/cloud.md +++ b/docs/zh/commercial/cloud.md @@ -1,4 +1,5 @@ --- +slug: /zh/commercial/cloud sidebar_position: 1 sidebar_label: 云 --- diff --git a/docs/zh/commercial/index.md b/docs/zh/commercial/index.md index f6121d54d84..f9c10bb26e8 100644 --- a/docs/zh/commercial/index.md +++ b/docs/zh/commercial/index.md @@ -1,4 +1,5 @@ --- +slug: /zh/commercial/ sidebar_label: 商业支持 sidebar_position: 70 --- diff --git a/docs/zh/commercial/support.md b/docs/zh/commercial/support.md index 44c1e76d775..8a17f0e4c51 100644 --- a/docs/zh/commercial/support.md +++ b/docs/zh/commercial/support.md @@ -1,4 +1,5 @@ --- +slug: /zh/commercial/support sidebar_position: 3 sidebar_label: 支持 --- diff --git a/docs/zh/development/adding_test_queries.md b/docs/zh/development/adding_test_queries.md index a494bb5eb44..e39f1eccb87 100644 --- a/docs/zh/development/adding_test_queries.md +++ b/docs/zh/development/adding_test_queries.md @@ -1,3 +1,6 @@ +--- +slug: /zh/development/adding_test_queries +--- # 如何将测试查询添加到 ClickHouse CI ClickHouse有数百个(甚至数千个)功能。 每个提交都由包含数千个测试用例的一组复杂测试进行检查。 diff --git a/docs/zh/development/architecture.md b/docs/zh/development/architecture.md index dfc66b36730..06fb1a7cd52 100644 --- a/docs/zh/development/architecture.md +++ b/docs/zh/development/architecture.md @@ -1,3 +1,6 @@ +--- +slug: /zh/development/architecture +--- # ClickHouse 架构概述 {#clickhouse-jia-gou-gai-shu} ClickHouse 是一个真正的列式数据库管理系统(DBMS)。在 ClickHouse 中,数据始终是按列存储的,包括矢量(向量或列块)执行的过程。只要有可能,操作都是基于矢量进行分派的,而不是单个的值,这被称为«矢量化查询执行»,它有利于降低实际的数据处理开销。 diff --git a/docs/zh/development/browse-code.md b/docs/zh/development/browse-code.md index 7610589e171..16382a94ed5 100644 --- a/docs/zh/development/browse-code.md +++ b/docs/zh/development/browse-code.md @@ -1,4 +1,5 @@ --- +slug: /zh/development/browse-code sidebar_position: 63 sidebar_label: "\u6D4F\u89C8\u6E90\u4EE3\u7801" --- diff --git a/docs/zh/development/build-cross-arm.md b/docs/zh/development/build-cross-arm.md index a618d31da17..035e8c0a353 100644 --- a/docs/zh/development/build-cross-arm.md +++ b/docs/zh/development/build-cross-arm.md @@ -1,4 +1,5 @@ --- +slug: /zh/development/build-cross-arm machine_translated: true machine_translated_rev: 72537a2d527c63c07aa5d2361a8829f3895cf2bd sidebar_position: 67 diff --git a/docs/zh/development/build-cross-osx.md b/docs/zh/development/build-cross-osx.md index d9bc7666546..b9e229bd393 100644 --- a/docs/zh/development/build-cross-osx.md +++ b/docs/zh/development/build-cross-osx.md @@ -1,3 +1,6 @@ +--- +slug: /zh/development/build-cross-osx +--- # 如何在Linux中编译Mac OS X ClickHouse {#ru-he-zai-linuxzhong-bian-yi-mac-os-x-clickhouse} Linux机器也可以编译运行在OS X系统的`clickhouse`二进制包,这可以用于在Linux上跑持续集成测试。如果要在Mac OS X上直接构建ClickHouse,请参考另外一篇指南: https://clickhouse.com/docs/zh/development/build_osx/ diff --git a/docs/zh/development/build-osx.md b/docs/zh/development/build-osx.md index 6706e9b0e4d..0661586421d 100644 --- a/docs/zh/development/build-osx.md +++ b/docs/zh/development/build-osx.md @@ -1,3 +1,6 @@ +--- +slug: /zh/development/build-osx +--- # 在 Mac OS X 中编译 ClickHouse {#zai-mac-os-x-zhong-bian-yi-clickhouse} ClickHouse 支持在 Mac OS X 10.12 版本中编译。若您在用更早的操作系统版本,可以尝试在指令中使用 `Gentoo Prefix` 和 `clang sl`. diff --git a/docs/zh/development/build.md b/docs/zh/development/build.md index a46081d1d4b..ce2cd428f97 100644 --- a/docs/zh/development/build.md +++ b/docs/zh/development/build.md @@ -1,3 +1,6 @@ +--- +slug: /zh/development/build +--- # 如何构建 ClickHouse 发布包 {#ru-he-gou-jian-clickhouse-fa-bu-bao} ## 安装 Git 和 Pbuilder {#an-zhuang-git-he-pbuilder} diff --git a/docs/zh/development/continuous-integration.md b/docs/zh/development/continuous-integration.md index 836c4a35433..67b1eb228f1 100644 --- a/docs/zh/development/continuous-integration.md +++ b/docs/zh/development/continuous-integration.md @@ -1,4 +1,8 @@ +--- +slug: /zh/development/continuous-integration +--- # 持续集成检查 {#continuous-integration-checks} + 当你提交一个pull请求时, ClickHouse[持续集成(CI)系统](https://clickhouse.com/docs/en/development/tests/#test-automation)会对您的代码运行一些自动检查. 这在存储库维护者(来自ClickHouse团队的人)筛选了您的代码并将可测试标签添加到您的pull请求之后发生. diff --git a/docs/zh/development/contrib.md b/docs/zh/development/contrib.md index 8e8efc3c04e..0febafee05a 100644 --- a/docs/zh/development/contrib.md +++ b/docs/zh/development/contrib.md @@ -1,3 +1,6 @@ +--- +slug: /zh/development/contrib +--- # 使用的三方库 {#shi-yong-de-san-fang-ku} | 图书馆 | 许可 | diff --git a/docs/zh/development/developer-instruction.md b/docs/zh/development/developer-instruction.md index 77caad105c6..557bf33ee0c 100644 --- a/docs/zh/development/developer-instruction.md +++ b/docs/zh/development/developer-instruction.md @@ -1,4 +1,7 @@ -ClickHose支持Linux,FreeBSD 及 Mac OS X 系统。 +--- +slug: /zh/development/developer-instruction +--- +# ClickHose支持Linux,FreeBSD 及 Mac OS X 系统。 # Windows使用指引 {#windowsshi-yong-zhi-yin} diff --git a/docs/zh/development/index.md b/docs/zh/development/index.md index d14b0fc8fc7..79852be6c91 100644 --- a/docs/zh/development/index.md +++ b/docs/zh/development/index.md @@ -1,3 +1,6 @@ +--- +slug: /zh/development/ +--- # ClickHouse 开发 {#clickhouse-kai-fa} [来源文章](https://clickhouse.com/docs/en/development/) diff --git a/docs/zh/development/style.md b/docs/zh/development/style.md index 29287f31077..66b4ee8f9ab 100644 --- a/docs/zh/development/style.md +++ b/docs/zh/development/style.md @@ -1,3 +1,6 @@ +--- +slug: /zh/development/style +--- # 如何编写 C++ 代码 {#ru-he-bian-xie-c-dai-ma} ## 一般建议 {#yi-ban-jian-yi} diff --git a/docs/zh/development/tests.md b/docs/zh/development/tests.md index 43b37fffa9f..ca9300597c7 100644 --- a/docs/zh/development/tests.md +++ b/docs/zh/development/tests.md @@ -1,3 +1,6 @@ +--- +slug: /zh/development/tests +--- # ClickHouse 测试 {#clickhouse-testing} ## 功能测试 {#functional-tests} diff --git a/docs/zh/engines/database-engines/atomic.md b/docs/zh/engines/database-engines/atomic.md index 3267a19f73e..4b6394ca7e4 100644 --- a/docs/zh/engines/database-engines/atomic.md +++ b/docs/zh/engines/database-engines/atomic.md @@ -1,4 +1,5 @@ --- +slug: /zh/engines/database-engines/atomic sidebar_position: 32 sidebar_label: Atomic --- diff --git a/docs/zh/engines/database-engines/index.md b/docs/zh/engines/database-engines/index.md index 1cb846b5afd..56f0dce062f 100644 --- a/docs/zh/engines/database-engines/index.md +++ b/docs/zh/engines/database-engines/index.md @@ -1,4 +1,5 @@ --- +slug: /zh/engines/database-engines/ sidebar_label: 数据库引擎 sidebar_position: 27 --- diff --git a/docs/zh/engines/database-engines/lazy.md b/docs/zh/engines/database-engines/lazy.md index 8af6816ac34..41bf2b670d8 100644 --- a/docs/zh/engines/database-engines/lazy.md +++ b/docs/zh/engines/database-engines/lazy.md @@ -1,4 +1,5 @@ --- +slug: /zh/engines/database-engines/lazy sidebar_position: 31 sidebar_label: Lazy --- diff --git a/docs/zh/engines/database-engines/materialize-mysql.md b/docs/zh/engines/database-engines/materialize-mysql.md index 0fe0d9be836..42863fda15c 100644 --- a/docs/zh/engines/database-engines/materialize-mysql.md +++ b/docs/zh/engines/database-engines/materialize-mysql.md @@ -1,4 +1,5 @@ --- +slug: /zh/engines/database-engines/materialize-mysql sidebar_position: 29 sidebar_label: "[experimental] MaterializedMySQL" --- diff --git a/docs/zh/engines/database-engines/materialized-mysql.md b/docs/zh/engines/database-engines/materialized-mysql.md index 51733217ca6..3c69c5f7e37 100644 --- a/docs/zh/engines/database-engines/materialized-mysql.md +++ b/docs/zh/engines/database-engines/materialized-mysql.md @@ -1,4 +1,5 @@ --- +slug: /zh/engines/database-engines/materialized-mysql sidebar_position: 29 sidebar_label: MaterializedMySQL --- @@ -152,8 +153,8 @@ ClickHouse只有一个物理排序,由 `order by` 条件决定。要创建一 * 修改列类型。必须与原始类型兼容,否则复制将失败。例如,可以将`UInt32`列修改为`UInt64`,不能将 `String` 列修改为 `Array(String)`。 * 修改 [column TTL](../table-engines/mergetree-family/mergetree/#mergetree-column-ttl). - * 修改 [column compression codec](../../sql-reference/statements/create/table/#codecs). - * 增加 [ALIAS columns](../../sql-reference/statements/create/table/#alias). + * 修改 [column compression codec](../../sql-reference/statements/create/table.md/#codecs). + * 增加 [ALIAS columns](../../sql-reference/statements/create/table.md/#alias). * 增加 [skipping indexes](../table-engines/mergetree-family/mergetree/#table_engine-mergetree-data_skipping-indexes) * 增加 [projections](../table-engines/mergetree-family/mergetree/#projections). 请注意,当使用 `SELECT ... FINAL ` (MaterializedMySQL默认是这样做的) 时,预测优化是被禁用的,所以这里是受限的, `INDEX ... TYPE hypothesis `[在v21.12的博客文章中描述]](https://clickhouse.com/blog/en/2021/clickhouse-v21.12-released/)可能在这种情况下更有用。 diff --git a/docs/zh/engines/database-engines/materialized-postgresql.md b/docs/zh/engines/database-engines/materialized-postgresql.md index 5be55f77308..4c51b0cc362 100644 --- a/docs/zh/engines/database-engines/materialized-postgresql.md +++ b/docs/zh/engines/database-engines/materialized-postgresql.md @@ -1,4 +1,5 @@ --- +slug: /zh/engines/database-engines/materialized-postgresql sidebar_position: 30 sidebar_label: MaterializedPostgreSQL --- diff --git a/docs/zh/engines/database-engines/mysql.md b/docs/zh/engines/database-engines/mysql.md index 97983f2915a..fc05fa8e3ee 100644 --- a/docs/zh/engines/database-engines/mysql.md +++ b/docs/zh/engines/database-engines/mysql.md @@ -1,4 +1,5 @@ --- +slug: /zh/engines/database-engines/mysql sidebar_position: 30 sidebar_label: MySQL --- diff --git a/docs/zh/engines/database-engines/postgresql.md b/docs/zh/engines/database-engines/postgresql.md index 0f2901e6389..ac93616c821 100644 --- a/docs/zh/engines/database-engines/postgresql.md +++ b/docs/zh/engines/database-engines/postgresql.md @@ -1,4 +1,5 @@ --- +slug: /zh/engines/database-engines/postgresql sidebar_position: 35 sidebar_label: PostgreSQL --- diff --git a/docs/zh/engines/database-engines/replicated.md b/docs/zh/engines/database-engines/replicated.md index bd5841491dd..5f209f04875 100644 --- a/docs/zh/engines/database-engines/replicated.md +++ b/docs/zh/engines/database-engines/replicated.md @@ -1,3 +1,6 @@ +--- +slug: /zh/engines/database-engines/replicated +--- # [experimental] Replicated {#replicated} 该引擎基于[Atomic](../../engines/database-engines/atomic.md)引擎。它支持通过将DDL日志写入ZooKeeper并在给定数据库的所有副本上执行的元数据复制。 diff --git a/docs/zh/engines/database-engines/sqlite.md b/docs/zh/engines/database-engines/sqlite.md index 0742f37e8c0..6840da972ae 100644 --- a/docs/zh/engines/database-engines/sqlite.md +++ b/docs/zh/engines/database-engines/sqlite.md @@ -1,4 +1,5 @@ --- +slug: /zh/engines/database-engines/sqlite sidebar_position: 32 sidebar_label: SQLite --- diff --git a/docs/zh/engines/index.md b/docs/zh/engines/index.md index c1176e2a77d..4c9fa7cabff 100644 --- a/docs/zh/engines/index.md +++ b/docs/zh/engines/index.md @@ -1,4 +1,5 @@ --- +slug: /zh/engines/ toc_folder_title: "\u5f15\u64ce" toc_priority: 25 --- diff --git a/docs/zh/engines/table-engines/index.md b/docs/zh/engines/table-engines/index.md index f31fa257135..ebad09e494c 100644 --- a/docs/zh/engines/table-engines/index.md +++ b/docs/zh/engines/table-engines/index.md @@ -1,3 +1,6 @@ +--- +slug: /zh/engines/table-engines/ +--- # 表引擎 {#biao-yin-qing} 表引擎(即表的类型)决定了: diff --git a/docs/zh/engines/table-engines/integrations/embedded-rocksdb.md b/docs/zh/engines/table-engines/integrations/embedded-rocksdb.md index f97777ce02b..03b8ca9eb3a 100644 --- a/docs/zh/engines/table-engines/integrations/embedded-rocksdb.md +++ b/docs/zh/engines/table-engines/integrations/embedded-rocksdb.md @@ -1,4 +1,5 @@ --- +slug: /zh/engines/table-engines/integrations/embedded-rocksdb sidebar_position: 9 sidebar_label: EmbeddedRocksDB --- diff --git a/docs/zh/engines/table-engines/integrations/hdfs.md b/docs/zh/engines/table-engines/integrations/hdfs.md index 6f49b2ba0bb..c9c83391b2d 100644 --- a/docs/zh/engines/table-engines/integrations/hdfs.md +++ b/docs/zh/engines/table-engines/integrations/hdfs.md @@ -1,4 +1,5 @@ --- +slug: /zh/engines/table-engines/integrations/hdfs sidebar_position: 36 sidebar_label: HDFS --- diff --git a/docs/zh/engines/table-engines/integrations/hive.md b/docs/zh/engines/table-engines/integrations/hive.md index adfeeb352a5..5b0034a228c 100644 --- a/docs/zh/engines/table-engines/integrations/hive.md +++ b/docs/zh/engines/table-engines/integrations/hive.md @@ -1,4 +1,5 @@ --- +slug: /zh/engines/table-engines/integrations/hive sidebar_position: 4 sidebar_label: Hive --- diff --git a/docs/zh/engines/table-engines/integrations/index.md b/docs/zh/engines/table-engines/integrations/index.md index a26c6295548..d66d5267af7 100644 --- a/docs/zh/engines/table-engines/integrations/index.md +++ b/docs/zh/engines/table-engines/integrations/index.md @@ -1,4 +1,5 @@ --- +slug: /zh/engines/table-engines/integrations/ sidebar_label: 集成的表引擎 sidebar_position: 30 --- diff --git a/docs/zh/engines/table-engines/integrations/jdbc.md b/docs/zh/engines/table-engines/integrations/jdbc.md index 3a8695b719f..a08f76b38df 100644 --- a/docs/zh/engines/table-engines/integrations/jdbc.md +++ b/docs/zh/engines/table-engines/integrations/jdbc.md @@ -1,4 +1,5 @@ --- +slug: /zh/engines/table-engines/integrations/jdbc sidebar_position: 34 sidebar_label: JDBC表引擎 --- diff --git a/docs/zh/engines/table-engines/integrations/kafka.md b/docs/zh/engines/table-engines/integrations/kafka.md index 0a8fbdbf5ae..707ee962ace 100644 --- a/docs/zh/engines/table-engines/integrations/kafka.md +++ b/docs/zh/engines/table-engines/integrations/kafka.md @@ -1,3 +1,6 @@ +--- +slug: /zh/engines/table-engines/integrations/kafka +--- # Kafka {#kafka} 此引擎与 [Apache Kafka](http://kafka.apache.org/) 结合使用。 diff --git a/docs/zh/engines/table-engines/integrations/mongodb.md b/docs/zh/engines/table-engines/integrations/mongodb.md index d0de5e694b7..b71640c688d 100644 --- a/docs/zh/engines/table-engines/integrations/mongodb.md +++ b/docs/zh/engines/table-engines/integrations/mongodb.md @@ -1,4 +1,5 @@ --- +slug: /zh/engines/table-engines/integrations/mongodb sidebar_position: 5 sidebar_label: MongoDB --- diff --git a/docs/zh/engines/table-engines/integrations/mysql.md b/docs/zh/engines/table-engines/integrations/mysql.md index 39a7eabda35..62de40d6e2c 100644 --- a/docs/zh/engines/table-engines/integrations/mysql.md +++ b/docs/zh/engines/table-engines/integrations/mysql.md @@ -1,3 +1,6 @@ +--- +slug: /zh/engines/table-engines/integrations/mysql +--- # MySQL {#mysql} MySQL 引擎可以对存储在远程 MySQL 服务器上的数据执行 `SELECT` 查询。 diff --git a/docs/zh/engines/table-engines/integrations/odbc.md b/docs/zh/engines/table-engines/integrations/odbc.md index 857f4c0c669..3c58f7c3875 100644 --- a/docs/zh/engines/table-engines/integrations/odbc.md +++ b/docs/zh/engines/table-engines/integrations/odbc.md @@ -1,4 +1,5 @@ --- +slug: /zh/engines/table-engines/integrations/odbc sidebar_position: 35 sidebar_label: ODBC --- diff --git a/docs/zh/engines/table-engines/integrations/postgresql.md b/docs/zh/engines/table-engines/integrations/postgresql.md index d07e70d3f0d..fc73f9d5dc9 100644 --- a/docs/zh/engines/table-engines/integrations/postgresql.md +++ b/docs/zh/engines/table-engines/integrations/postgresql.md @@ -1,4 +1,5 @@ --- +slug: /zh/engines/table-engines/integrations/postgresql sidebar_position: 11 sidebar_label: PostgreSQL --- diff --git a/docs/zh/engines/table-engines/integrations/rabbitmq.md b/docs/zh/engines/table-engines/integrations/rabbitmq.md index f62f347ed51..730c4f42c13 100644 --- a/docs/zh/engines/table-engines/integrations/rabbitmq.md +++ b/docs/zh/engines/table-engines/integrations/rabbitmq.md @@ -1,4 +1,5 @@ --- +slug: /zh/engines/table-engines/integrations/rabbitmq sidebar_position: 10 sidebar_label: RabbitMQ --- diff --git a/docs/zh/engines/table-engines/integrations/s3.md b/docs/zh/engines/table-engines/integrations/s3.md index c9bf2e7be76..f2585decabf 100644 --- a/docs/zh/engines/table-engines/integrations/s3.md +++ b/docs/zh/engines/table-engines/integrations/s3.md @@ -1,4 +1,5 @@ --- +slug: /zh/engines/table-engines/integrations/s3 sidebar_position: 7 sidebar_label: S3 --- diff --git a/docs/zh/engines/table-engines/integrations/sqlite.md b/docs/zh/engines/table-engines/integrations/sqlite.md index 8da59ca31d7..0e869f8ec0d 100644 --- a/docs/zh/engines/table-engines/integrations/sqlite.md +++ b/docs/zh/engines/table-engines/integrations/sqlite.md @@ -1,4 +1,5 @@ --- +slug: /zh/engines/table-engines/integrations/sqlite sidebar_position: 7 sidebar_label: SQLite --- diff --git a/docs/zh/engines/table-engines/log-family/index.md b/docs/zh/engines/table-engines/log-family/index.md index e30ae49e629..56776522445 100644 --- a/docs/zh/engines/table-engines/log-family/index.md +++ b/docs/zh/engines/table-engines/log-family/index.md @@ -1,4 +1,5 @@ --- +slug: /zh/engines/table-engines/log-family/ sidebar_label: 日志引擎系列 sidebar_position: 29 --- diff --git a/docs/zh/engines/table-engines/log-family/log.md b/docs/zh/engines/table-engines/log-family/log.md index c825868bcda..cb4bce204fb 100644 --- a/docs/zh/engines/table-engines/log-family/log.md +++ b/docs/zh/engines/table-engines/log-family/log.md @@ -1,3 +1,6 @@ +--- +slug: /zh/engines/table-engines/log-family/log +--- # Log {#log} `Log` 与 `TinyLog` 的不同之处在于,«标记» 的小文件与列文件存在一起。这些标记写在每个数据块上,并且包含偏移量,这些偏移量指示从哪里开始读取文件以便跳过指定的行数。这使得可以在多个线程中读取表数据。对于并发数据访问,可以同时执行读取操作,而写入操作则阻塞读取和其它写入。`Log`引擎不支持索引。同样,如果写入表失败,则该表将被破坏,并且从该表读取将返回错误。`Log`引擎适用于临时数据,write-once 表以及测试或演示目的。 diff --git a/docs/zh/engines/table-engines/log-family/stripelog.md b/docs/zh/engines/table-engines/log-family/stripelog.md index dbb725d3a72..13fedf4c482 100644 --- a/docs/zh/engines/table-engines/log-family/stripelog.md +++ b/docs/zh/engines/table-engines/log-family/stripelog.md @@ -1,3 +1,6 @@ +--- +slug: /zh/engines/table-engines/log-family/stripelog +--- # StripeLog {#stripelog} 该引擎属于日志引擎系列。请在[日志引擎系列](index.md)文章中查看引擎的共同属性和差异。 diff --git a/docs/zh/engines/table-engines/log-family/tinylog.md b/docs/zh/engines/table-engines/log-family/tinylog.md index 533e5a89528..e7785be3bff 100644 --- a/docs/zh/engines/table-engines/log-family/tinylog.md +++ b/docs/zh/engines/table-engines/log-family/tinylog.md @@ -1,3 +1,6 @@ +--- +slug: /zh/engines/table-engines/log-family/tinylog +--- # TinyLog {#tinylog} 最简单的表引擎,用于将数据存储在磁盘上。每列都存储在单独的压缩文件中。写入时,数据将附加到文件末尾。 diff --git a/docs/zh/engines/table-engines/mergetree-family/aggregatingmergetree.md b/docs/zh/engines/table-engines/mergetree-family/aggregatingmergetree.md index 66c1fd968fe..493ddbc2b25 100644 --- a/docs/zh/engines/table-engines/mergetree-family/aggregatingmergetree.md +++ b/docs/zh/engines/table-engines/mergetree-family/aggregatingmergetree.md @@ -1,3 +1,6 @@ +--- +slug: /zh/engines/table-engines/mergetree-family/aggregatingmergetree +--- # AggregatingMergeTree {#aggregatingmergetree} 该引擎继承自 [MergeTree](mergetree.md),并改变了数据片段的合并逻辑。 ClickHouse 会将一个数据片段内所有具有相同主键(准确的说是 [排序键](../../../engines/table-engines/mergetree-family/mergetree.md))的行替换成一行,这一行会存储一系列聚合函数的状态。 diff --git a/docs/zh/engines/table-engines/mergetree-family/collapsingmergetree.md b/docs/zh/engines/table-engines/mergetree-family/collapsingmergetree.md index 27e8c85a5f3..72d9b1c8367 100644 --- a/docs/zh/engines/table-engines/mergetree-family/collapsingmergetree.md +++ b/docs/zh/engines/table-engines/mergetree-family/collapsingmergetree.md @@ -1,3 +1,6 @@ +--- +slug: /zh/engines/table-engines/mergetree-family/collapsingmergetree +--- # CollapsingMergeTree {#table_engine-collapsingmergetree} 该引擎继承于 [MergeTree](mergetree.md),并在数据块合并算法中添加了折叠行的逻辑。 diff --git a/docs/zh/engines/table-engines/mergetree-family/custom-partitioning-key.md b/docs/zh/engines/table-engines/mergetree-family/custom-partitioning-key.md index 8694fe88027..e256f006a42 100644 --- a/docs/zh/engines/table-engines/mergetree-family/custom-partitioning-key.md +++ b/docs/zh/engines/table-engines/mergetree-family/custom-partitioning-key.md @@ -1,3 +1,6 @@ +--- +slug: /zh/engines/table-engines/mergetree-family/custom-partitioning-key +--- # 自定义分区键 {#zi-ding-yi-fen-qu-jian} [MergeTree](mergetree.md) 系列的表(包括 [可复制表](replication.md) )可以使用分区。基于 MergeTree 表的 [物化视图](../special/materializedview.md#materializedview) 也支持分区。 diff --git a/docs/zh/engines/table-engines/mergetree-family/graphitemergetree.md b/docs/zh/engines/table-engines/mergetree-family/graphitemergetree.md index ddb833d7ebe..cd452698c24 100644 --- a/docs/zh/engines/table-engines/mergetree-family/graphitemergetree.md +++ b/docs/zh/engines/table-engines/mergetree-family/graphitemergetree.md @@ -1,4 +1,5 @@ --- +slug: /zh/engines/table-engines/mergetree-family/graphitemergetree sidebar_position: 38 sidebar_label: GraphiteMergeTree --- diff --git a/docs/zh/engines/table-engines/mergetree-family/index.md b/docs/zh/engines/table-engines/mergetree-family/index.md index 1bead9fd726..8733a936508 100644 --- a/docs/zh/engines/table-engines/mergetree-family/index.md +++ b/docs/zh/engines/table-engines/mergetree-family/index.md @@ -1,4 +1,5 @@ --- +slug: /zh/engines/table-engines/mergetree-family/ sidebar_label: "合并树家族" sidebar_position: 28 --- diff --git a/docs/zh/engines/table-engines/mergetree-family/mergetree.md b/docs/zh/engines/table-engines/mergetree-family/mergetree.md index 7b723d38594..45a27e16b5b 100644 --- a/docs/zh/engines/table-engines/mergetree-family/mergetree.md +++ b/docs/zh/engines/table-engines/mergetree-family/mergetree.md @@ -1,3 +1,6 @@ +--- +slug: /zh/engines/table-engines/mergetree-family/mergetree +--- # MergeTree {#table_engines-mergetree} Clickhouse 中最强大的表引擎当属 `MergeTree` (合并树)引擎及该系列(`*MergeTree`)中的其他引擎。 diff --git a/docs/zh/engines/table-engines/mergetree-family/replacingmergetree.md b/docs/zh/engines/table-engines/mergetree-family/replacingmergetree.md index 38f4130ab6c..60084c26969 100644 --- a/docs/zh/engines/table-engines/mergetree-family/replacingmergetree.md +++ b/docs/zh/engines/table-engines/mergetree-family/replacingmergetree.md @@ -1,3 +1,6 @@ +--- +slug: /zh/engines/table-engines/mergetree-family/replacingmergetree +--- # ReplacingMergeTree {#replacingmergetree} 该引擎和 [MergeTree](mergetree.md) 的不同之处在于它会删除排序键值相同的重复项。 diff --git a/docs/zh/engines/table-engines/mergetree-family/replication.md b/docs/zh/engines/table-engines/mergetree-family/replication.md index 69d1cd9d942..2ef02d781bf 100644 --- a/docs/zh/engines/table-engines/mergetree-family/replication.md +++ b/docs/zh/engines/table-engines/mergetree-family/replication.md @@ -1,3 +1,6 @@ +--- +slug: /zh/engines/table-engines/mergetree-family/replication +--- # 数据副本 {#table_engines-replication} 只有 MergeTree 系列里的表可支持副本: diff --git a/docs/zh/engines/table-engines/mergetree-family/summingmergetree.md b/docs/zh/engines/table-engines/mergetree-family/summingmergetree.md index daf7867c611..4e4cee28270 100644 --- a/docs/zh/engines/table-engines/mergetree-family/summingmergetree.md +++ b/docs/zh/engines/table-engines/mergetree-family/summingmergetree.md @@ -1,3 +1,6 @@ +--- +slug: /zh/engines/table-engines/mergetree-family/summingmergetree +--- # SummingMergeTree {#summingmergetree} 该引擎继承自 [MergeTree](mergetree.md)。区别在于,当合并 `SummingMergeTree` 表的数据片段时,ClickHouse 会把所有具有相同主键的行合并为一行,该行包含了被合并的行中具有数值数据类型的列的汇总值。如果主键的组合方式使得单个键值对应于大量的行,则可以显著的减少存储空间并加快数据查询的速度。 diff --git a/docs/zh/engines/table-engines/mergetree-family/versionedcollapsingmergetree.md b/docs/zh/engines/table-engines/mergetree-family/versionedcollapsingmergetree.md index fb046e45596..403031a8804 100644 --- a/docs/zh/engines/table-engines/mergetree-family/versionedcollapsingmergetree.md +++ b/docs/zh/engines/table-engines/mergetree-family/versionedcollapsingmergetree.md @@ -1,4 +1,5 @@ --- +slug: /zh/engines/table-engines/mergetree-family/versionedcollapsingmergetree sidebar_position: 37 sidebar_label: "版本折叠MergeTree" --- diff --git a/docs/zh/engines/table-engines/special/buffer.md b/docs/zh/engines/table-engines/special/buffer.md index 5c48d54676e..bf3eb70a980 100644 --- a/docs/zh/engines/table-engines/special/buffer.md +++ b/docs/zh/engines/table-engines/special/buffer.md @@ -1,3 +1,6 @@ +--- +slug: /zh/engines/table-engines/special/buffer +--- # 缓冲区 {#buffer} 缓冲数据写入 RAM 中,周期性地将数据刷新到另一个表。在读取操作时,同时从缓冲区和另一个表读取数据。 diff --git a/docs/zh/engines/table-engines/special/dictionary.md b/docs/zh/engines/table-engines/special/dictionary.md index 50aa5f917d7..77cbe695d05 100644 --- a/docs/zh/engines/table-engines/special/dictionary.md +++ b/docs/zh/engines/table-engines/special/dictionary.md @@ -1,3 +1,6 @@ +--- +slug: /zh/engines/table-engines/special/dictionary +--- # 字典 {#dictionary} `Dictionary` 引擎将字典数据展示为一个ClickHouse的表。 diff --git a/docs/zh/engines/table-engines/special/distributed.md b/docs/zh/engines/table-engines/special/distributed.md index 5e2ecfd9d8a..8d495d24221 100644 --- a/docs/zh/engines/table-engines/special/distributed.md +++ b/docs/zh/engines/table-engines/special/distributed.md @@ -1,4 +1,5 @@ --- +slug: /zh/engines/table-engines/special/distributed sidebar_position: 33 sidebar_label: 分布式引擎 --- diff --git a/docs/zh/engines/table-engines/special/external-data.md b/docs/zh/engines/table-engines/special/external-data.md index ed44d7ce3b7..fb13359ff32 100644 --- a/docs/zh/engines/table-engines/special/external-data.md +++ b/docs/zh/engines/table-engines/special/external-data.md @@ -1,3 +1,6 @@ +--- +slug: /zh/engines/table-engines/special/external-data +--- # 用于查询处理的外部数据 {#external-data-for-query-processing} ClickHouse 允许向服务器发送处理查询所需的数据以及 SELECT 查询。这些数据放在一个临时表中(请参阅 «临时表» 一节),可以在查询中使用(例如,在 IN 操作符中)。 diff --git a/docs/zh/engines/table-engines/special/file.md b/docs/zh/engines/table-engines/special/file.md index ce4a43bba3f..86e9884dcc5 100644 --- a/docs/zh/engines/table-engines/special/file.md +++ b/docs/zh/engines/table-engines/special/file.md @@ -1,3 +1,6 @@ +--- +slug: /zh/engines/table-engines/special/file +--- # 文件(输入格式) {#table_engines-file} 数据源是以 Clickhouse 支持的一种输入格式(TabSeparated,Native等)存储数据的文件。 diff --git a/docs/zh/engines/table-engines/special/generate.md b/docs/zh/engines/table-engines/special/generate.md index 3d4727196fc..c97228361e7 100644 --- a/docs/zh/engines/table-engines/special/generate.md +++ b/docs/zh/engines/table-engines/special/generate.md @@ -1,4 +1,5 @@ --- +slug: /zh/engines/table-engines/special/generate sidebar_position: 46 sidebar_label: 随机数生成 --- diff --git a/docs/zh/engines/table-engines/special/join.md b/docs/zh/engines/table-engines/special/join.md index 03658e3b386..f804905dec4 100644 --- a/docs/zh/engines/table-engines/special/join.md +++ b/docs/zh/engines/table-engines/special/join.md @@ -1,4 +1,5 @@ --- +slug: /zh/engines/table-engines/special/join sidebar_position: 40 sidebar_label: 关联表引擎 --- diff --git a/docs/zh/engines/table-engines/special/materializedview.md b/docs/zh/engines/table-engines/special/materializedview.md index d4c678839b6..875ab04a294 100644 --- a/docs/zh/engines/table-engines/special/materializedview.md +++ b/docs/zh/engines/table-engines/special/materializedview.md @@ -1,3 +1,6 @@ +--- +slug: /zh/engines/table-engines/special/materializedview +--- # MaterializedView {#materializedview} 物化视图的使用(更多信息请参阅 [CREATE TABLE](../../../sql-reference/statements/create.md#create-table-query) )。它需要使用一个不同的引擎来存储数据,这个引擎要在创建物化视图时指定。当从表中读取时,它就会使用该引擎。 diff --git a/docs/zh/engines/table-engines/special/memory.md b/docs/zh/engines/table-engines/special/memory.md index 3058689510c..a55491028ef 100644 --- a/docs/zh/engines/table-engines/special/memory.md +++ b/docs/zh/engines/table-engines/special/memory.md @@ -1,4 +1,5 @@ --- +slug: /zh/engines/table-engines/special/memory sidebar_position: 44 sidebar_label: Memory --- diff --git a/docs/zh/engines/table-engines/special/merge.md b/docs/zh/engines/table-engines/special/merge.md index c29a600bef6..6cd9fe46773 100644 --- a/docs/zh/engines/table-engines/special/merge.md +++ b/docs/zh/engines/table-engines/special/merge.md @@ -1,3 +1,6 @@ +--- +slug: /zh/engines/table-engines/special/merge +--- # 合并 {#merge} `Merge` 引擎 (不要跟 `MergeTree` 引擎混淆) 本身不存储数据,但可用于同时从任意多个其他的表中读取数据。 diff --git a/docs/zh/engines/table-engines/special/null.md b/docs/zh/engines/table-engines/special/null.md index 60b91109abe..8a3aa72f90a 100644 --- a/docs/zh/engines/table-engines/special/null.md +++ b/docs/zh/engines/table-engines/special/null.md @@ -1,3 +1,6 @@ +--- +slug: /zh/engines/table-engines/special/null +--- # Null {#null} 当写入 Null 类型的表时,将忽略数据。从 Null 类型的表中读取时,返回空。 diff --git a/docs/zh/engines/table-engines/special/set.md b/docs/zh/engines/table-engines/special/set.md index ae522221c14..980a8ffa5ee 100644 --- a/docs/zh/engines/table-engines/special/set.md +++ b/docs/zh/engines/table-engines/special/set.md @@ -1,3 +1,6 @@ +--- +slug: /zh/engines/table-engines/special/set +--- # 集合 {#set} 始终存在于 RAM 中的数据集。它适用于IN运算符的右侧(请参见 «IN运算符» 部分)。 diff --git a/docs/zh/engines/table-engines/special/url.md b/docs/zh/engines/table-engines/special/url.md index 41dd887d20a..5ff6cb9a367 100644 --- a/docs/zh/engines/table-engines/special/url.md +++ b/docs/zh/engines/table-engines/special/url.md @@ -1,3 +1,6 @@ +--- +slug: /zh/engines/table-engines/special/url +--- # URL(URL,格式) {#table_engines-url} 用于管理远程 HTTP/HTTPS 服务器上的数据。该引擎类似 diff --git a/docs/zh/engines/table-engines/special/view.md b/docs/zh/engines/table-engines/special/view.md index fde72ce85b0..af15842a8b9 100644 --- a/docs/zh/engines/table-engines/special/view.md +++ b/docs/zh/engines/table-engines/special/view.md @@ -1,3 +1,6 @@ +--- +slug: /zh/engines/table-engines/special/view +--- # 视图 {#table_engines-view} 用于构建视图(有关更多信息,请参阅 `CREATE VIEW 查询`)。 它不存储数据,仅存储指定的 `SELECT` 查询。 从表中读取时,它会运行此查询(并从查询中删除所有不必要的列)。 diff --git a/docs/zh/faq/general.md b/docs/zh/faq/general.md index 05dad567195..4d84f1d642a 100644 --- a/docs/zh/faq/general.md +++ b/docs/zh/faq/general.md @@ -1,3 +1,6 @@ +--- +slug: /zh/faq/general +--- # 常见问题 {#chang-jian-wen-ti} ## 为什么不使用MapReduce之类的产品呢? {#wei-shi-yao-bu-shi-yong-mapreducezhi-lei-de-chan-pin-ni} diff --git a/docs/zh/faq/general/columnar-database.md b/docs/zh/faq/general/columnar-database.md index 547570ac844..57541aec69b 100644 --- a/docs/zh/faq/general/columnar-database.md +++ b/docs/zh/faq/general/columnar-database.md @@ -1,4 +1,5 @@ --- +slug: /zh/faq/general/columnar-database title: 什么是列存储数据库? toc_hidden: true sidebar_position: 101 diff --git a/docs/zh/faq/general/dbms-naming.md b/docs/zh/faq/general/dbms-naming.md index 6725524d58a..e732c2f054e 100644 --- a/docs/zh/faq/general/dbms-naming.md +++ b/docs/zh/faq/general/dbms-naming.md @@ -1,4 +1,5 @@ --- +slug: /zh/faq/general/dbms-naming title: "\u201CClickHouse\u201D 有什么含义?" toc_hidden: true sidebar_position: 10 diff --git a/docs/zh/faq/general/how-do-i-contribute-code-to-clickhouse.md b/docs/zh/faq/general/how-do-i-contribute-code-to-clickhouse.md index e7429bde7b8..daa7abf525f 100644 --- a/docs/zh/faq/general/how-do-i-contribute-code-to-clickhouse.md +++ b/docs/zh/faq/general/how-do-i-contribute-code-to-clickhouse.md @@ -1,4 +1,5 @@ --- +slug: /zh/faq/general/how-do-i-contribute-code-to-clickhouse title: 我如何为ClickHouse贡献代码? toc_hidden: true sidebar_position: 120 diff --git a/docs/zh/faq/general/index.md b/docs/zh/faq/general/index.md index 041b2f2affa..8b0b42cede2 100644 --- a/docs/zh/faq/general/index.md +++ b/docs/zh/faq/general/index.md @@ -1,4 +1,5 @@ --- +slug: /zh/faq/general/ title: ClickHouse 有关常见问题 toc_hidden_folder: true sidebar_position: 1 diff --git a/docs/zh/faq/general/mapreduce.md b/docs/zh/faq/general/mapreduce.md index fc9b25b56bf..0bac2ae3611 100644 --- a/docs/zh/faq/general/mapreduce.md +++ b/docs/zh/faq/general/mapreduce.md @@ -1,4 +1,5 @@ --- +slug: /zh/faq/general/mapreduce title: 为何不使用 MapReduce等技术? toc_hidden: true sidebar_position: 110 diff --git a/docs/zh/faq/general/ne-tormozit.md b/docs/zh/faq/general/ne-tormozit.md index 439d894c390..c4149655108 100644 --- a/docs/zh/faq/general/ne-tormozit.md +++ b/docs/zh/faq/general/ne-tormozit.md @@ -1,4 +1,5 @@ --- +slug: /zh/faq/general/ne-tormozit title: "What does \u201C\u043D\u0435 \u0442\u043E\u0440\u043C\u043E\u0437\u0438\u0442\ \u201D mean?" toc_hidden: true diff --git a/docs/zh/faq/general/olap.md b/docs/zh/faq/general/olap.md index f6bab9759da..b014419578b 100644 --- a/docs/zh/faq/general/olap.md +++ b/docs/zh/faq/general/olap.md @@ -1,4 +1,5 @@ --- +slug: /zh/faq/general/olap title: What is OLAP? toc_hidden: true sidebar_position: 100 diff --git a/docs/zh/faq/general/who-is-using-clickhouse.md b/docs/zh/faq/general/who-is-using-clickhouse.md index 73a48657b65..c5ea6a07307 100644 --- a/docs/zh/faq/general/who-is-using-clickhouse.md +++ b/docs/zh/faq/general/who-is-using-clickhouse.md @@ -1,4 +1,5 @@ --- +slug: /zh/faq/general/who-is-using-clickhouse title: 谁在使用 ClickHouse? toc_hidden: true sidebar_position: 9 diff --git a/docs/zh/faq/general/why-clickhouse-is-so-fast.md b/docs/zh/faq/general/why-clickhouse-is-so-fast.md index 9dc8fced514..9183ead34b0 100644 --- a/docs/zh/faq/general/why-clickhouse-is-so-fast.md +++ b/docs/zh/faq/general/why-clickhouse-is-so-fast.md @@ -1,5 +1,6 @@ --- -title: Why ClickHouse is so fast? +slug: /zh/faq/general/why-clickhouse-is-so-fast +title: Why is ClickHouse so fast? toc_hidden: true sidebar_position: 8 --- @@ -17,7 +18,7 @@ Indexes : ClickHouse keeps data structures in memory that allows reading not only used columns but only necessary row ranges of those columns. Data compression -: Storing different values of the same column together often leads to better compression ratios (compared to row-oriented systems) because in real data column often has the same or not so many different values for neighboring rows. In addition to general-purpose compression, ClickHouse supports [specialized codecs](../../sql-reference/statements/create/table/#create-query-specialized-codecs) that can make data even more compact. +: Storing different values of the same column together often leads to better compression ratios (compared to row-oriented systems) because in real data column often has the same or not so many different values for neighboring rows. In addition to general-purpose compression, ClickHouse supports [specialized codecs](../../sql-reference/statements/create/table.md/#create-query-specialized-codecs) that can make data even more compact. Vectorized query execution : ClickHouse not only stores data in columns but also processes data in columns. It leads to better CPU cache utilization and allows for [SIMD](https://en.wikipedia.org/wiki/SIMD) CPU instructions usage. diff --git a/docs/zh/faq/index.md b/docs/zh/faq/index.md index 5a8e88bc95a..4807dce4206 100644 --- a/docs/zh/faq/index.md +++ b/docs/zh/faq/index.md @@ -1,4 +1,5 @@ --- +slug: /zh/faq/ sidebar_label: F.A.Q. toc_hidden: true sidebar_position: 76 diff --git a/docs/zh/faq/integration/file-export.md b/docs/zh/faq/integration/file-export.md index 44981c55cf3..ff2e7dd8a72 100644 --- a/docs/zh/faq/integration/file-export.md +++ b/docs/zh/faq/integration/file-export.md @@ -1,4 +1,5 @@ --- +slug: /zh/faq/integration/file-export title: 如何从 ClickHouse 导出数据到一个文件? toc_hidden: true sidebar_position: 10 diff --git a/docs/zh/faq/integration/index.md b/docs/zh/faq/integration/index.md index f1a93643457..6678956a0b3 100644 --- a/docs/zh/faq/integration/index.md +++ b/docs/zh/faq/integration/index.md @@ -1,4 +1,5 @@ --- +slug: /zh/faq/integration/ title: 关于集成ClickHouse和其他系统的问题 toc_hidden_folder: true sidebar_position: 4 diff --git a/docs/zh/faq/integration/json-import.md b/docs/zh/faq/integration/json-import.md index e676a3796cc..861abacc1e1 100644 --- a/docs/zh/faq/integration/json-import.md +++ b/docs/zh/faq/integration/json-import.md @@ -1,4 +1,5 @@ --- +slug: /zh/faq/integration/json-import title: How to import JSON into ClickHouse? toc_hidden: true sidebar_position: 11 diff --git a/docs/zh/faq/integration/oracle-odbc.md b/docs/zh/faq/integration/oracle-odbc.md index ed21b3a01f7..e22db1d8960 100644 --- a/docs/zh/faq/integration/oracle-odbc.md +++ b/docs/zh/faq/integration/oracle-odbc.md @@ -1,4 +1,5 @@ --- +slug: /zh/faq/integration/oracle-odbc title: What if I have a problem with encodings when using Oracle via ODBC? toc_hidden: true sidebar_position: 20 diff --git a/docs/zh/faq/operations/delete-old-data.md b/docs/zh/faq/operations/delete-old-data.md index 65f8de264d3..962fff01c5b 100644 --- a/docs/zh/faq/operations/delete-old-data.md +++ b/docs/zh/faq/operations/delete-old-data.md @@ -1,4 +1,5 @@ --- +slug: /zh/faq/operations/delete-old-data title: Is it possible to delete old records from a ClickHouse table? toc_hidden: true sidebar_position: 20 @@ -28,13 +29,13 @@ ClickHouse does not have real-time point deletes like in [OLTP](https://en.wikip This is the most common approach to make your system based on ClickHouse [GDPR](https://gdpr-info.eu)-compliant. -More details on [mutations](../../sql-reference/statements/alter/#alter-mutations). +More details on [mutations](../../sql-reference/statements/alter.md/#alter-mutations). ## DROP PARTITION {#drop-partition} `ALTER TABLE ... DROP PARTITION` provides a cost-efficient way to drop a whole partition. It’s not that flexible and needs proper partitioning scheme configured on table creation, but still covers most common cases. Like mutations need to be executed from an external system for regular use. -More details on [manipulating partitions](../../sql-reference/statements/alter/partition/#alter_drop-partition). +More details on [manipulating partitions](../../sql-reference/statements/alter/partition.md/#alter_drop-partition). ## TRUNCATE {#truncate} diff --git a/docs/zh/faq/operations/index.md b/docs/zh/faq/operations/index.md index 933ffe69cf2..071cc872e4e 100644 --- a/docs/zh/faq/operations/index.md +++ b/docs/zh/faq/operations/index.md @@ -1,4 +1,5 @@ --- +slug: /zh/faq/operations/ title: 关于操作ClickHouse服务器和集群的问题 toc_hidden_folder: true sidebar_position: 3 diff --git a/docs/zh/faq/operations/multi-region-replication.md b/docs/zh/faq/operations/multi-region-replication.md index cd277b6a2c8..05f856a9ea7 100644 --- a/docs/zh/faq/operations/multi-region-replication.md +++ b/docs/zh/faq/operations/multi-region-replication.md @@ -1,4 +1,5 @@ --- +slug: /zh/faq/operations/multi-region-replication title: ClickHouse支持多区域复制吗? toc_hidden: true sidebar_position: 30 diff --git a/docs/zh/faq/operations/production.md b/docs/zh/faq/operations/production.md index 88bca0e7ffe..cc5cf6b9614 100644 --- a/docs/zh/faq/operations/production.md +++ b/docs/zh/faq/operations/production.md @@ -1,4 +1,5 @@ --- +slug: /zh/faq/operations/production title: Which ClickHouse version to use in production? toc_hidden: true sidebar_position: 10 diff --git a/docs/zh/faq/terms_translation_zh.md b/docs/zh/faq/terms_translation_zh.md index d252b4e293e..87a283cd30d 100644 --- a/docs/zh/faq/terms_translation_zh.md +++ b/docs/zh/faq/terms_translation_zh.md @@ -1,4 +1,8 @@ +--- +slug: /zh/faq/terms_translation_zh +--- # 术语翻译约定 + 本文档用来维护从英文翻译成中文的术语集。 diff --git a/docs/zh/faq/use-cases/index.md b/docs/zh/faq/use-cases/index.md index d1adde4a544..75ef26368a3 100644 --- a/docs/zh/faq/use-cases/index.md +++ b/docs/zh/faq/use-cases/index.md @@ -1,4 +1,5 @@ --- +slug: /zh/faq/use-cases/ title: 关于ClickHouse使用案例的问题 toc_hidden_folder: true sidebar_position: 2 diff --git a/docs/zh/faq/use-cases/key-value.md b/docs/zh/faq/use-cases/key-value.md index 79c221d30c8..ab8e2ed7cae 100644 --- a/docs/zh/faq/use-cases/key-value.md +++ b/docs/zh/faq/use-cases/key-value.md @@ -1,4 +1,5 @@ --- +slug: /zh/faq/use-cases/key-value title: 我能把 ClickHouse 当做Key-value 键值存储来使用吗? toc_hidden: true sidebar_position: 101 diff --git a/docs/zh/faq/use-cases/time-series.md b/docs/zh/faq/use-cases/time-series.md index 0d38167929a..9a48f483be7 100644 --- a/docs/zh/faq/use-cases/time-series.md +++ b/docs/zh/faq/use-cases/time-series.md @@ -1,4 +1,5 @@ --- +slug: /zh/faq/use-cases/time-series title: 我能把 ClickHouse 当做时序数据库来使用吗? toc_hidden: true sidebar_position: 101 diff --git a/docs/zh/getting-started/example-datasets/amplab-benchmark.md b/docs/zh/getting-started/example-datasets/amplab-benchmark.md index d7194347e23..cb7c109c0c7 100644 --- a/docs/zh/getting-started/example-datasets/amplab-benchmark.md +++ b/docs/zh/getting-started/example-datasets/amplab-benchmark.md @@ -1,4 +1,5 @@ --- +slug: /zh/getting-started/example-datasets/amplab-benchmark sidebar_position: 19 sidebar_label: AMPLab Big Data Benchmark --- diff --git a/docs/zh/getting-started/example-datasets/criteo.md b/docs/zh/getting-started/example-datasets/criteo.md index ba223c8ce0d..453f6a6813f 100644 --- a/docs/zh/getting-started/example-datasets/criteo.md +++ b/docs/zh/getting-started/example-datasets/criteo.md @@ -1,4 +1,5 @@ --- +slug: /zh/getting-started/example-datasets/criteo sidebar_position: 18 sidebar_label: Terabyte Click Logs from Criteo --- diff --git a/docs/zh/getting-started/example-datasets/github-events.md b/docs/zh/getting-started/example-datasets/github-events.md index c99dfb5ff7f..2fb89ba70f8 100644 --- a/docs/zh/getting-started/example-datasets/github-events.md +++ b/docs/zh/getting-started/example-datasets/github-events.md @@ -1,4 +1,5 @@ --- +slug: /zh/getting-started/example-datasets/github-events sidebar_position: 11 sidebar_label: GitHub 事件数据集 --- diff --git a/docs/zh/getting-started/example-datasets/index.md b/docs/zh/getting-started/example-datasets/index.md index bd4f86e1f3b..342b59301c2 100644 --- a/docs/zh/getting-started/example-datasets/index.md +++ b/docs/zh/getting-started/example-datasets/index.md @@ -1,4 +1,5 @@ --- +slug: /zh/getting-started/example-datasets/ sidebar_label: 示例数据集 sidebar_position: 12 --- diff --git a/docs/zh/getting-started/example-datasets/metrica.md b/docs/zh/getting-started/example-datasets/metrica.md index d788b0f708c..d6ebf12d7b0 100644 --- a/docs/zh/getting-started/example-datasets/metrica.md +++ b/docs/zh/getting-started/example-datasets/metrica.md @@ -1,4 +1,5 @@ --- +slug: /zh/getting-started/example-datasets/metrica sidebar_position: 15 sidebar_label: Yandex.Metrica Data --- diff --git a/docs/zh/getting-started/example-datasets/nyc-taxi.md b/docs/zh/getting-started/example-datasets/nyc-taxi.md index 8d96156cd95..f7d8a61ab7f 100644 --- a/docs/zh/getting-started/example-datasets/nyc-taxi.md +++ b/docs/zh/getting-started/example-datasets/nyc-taxi.md @@ -1,4 +1,5 @@ --- +slug: /zh/getting-started/example-datasets/nyc-taxi sidebar_position: 20 sidebar_label: New York Taxi Data --- diff --git a/docs/zh/getting-started/example-datasets/ontime.md b/docs/zh/getting-started/example-datasets/ontime.md index 6015c248fb3..5df130a2c41 100644 --- a/docs/zh/getting-started/example-datasets/ontime.md +++ b/docs/zh/getting-started/example-datasets/ontime.md @@ -1,4 +1,5 @@ --- +slug: /zh/getting-started/example-datasets/ontime sidebar_position: 21 sidebar_label: OnTime --- diff --git a/docs/zh/getting-started/example-datasets/star-schema.md b/docs/zh/getting-started/example-datasets/star-schema.md index 47044f7b555..6c9ae353f29 100644 --- a/docs/zh/getting-started/example-datasets/star-schema.md +++ b/docs/zh/getting-started/example-datasets/star-schema.md @@ -1,4 +1,5 @@ --- +slug: /zh/getting-started/example-datasets/star-schema sidebar_position: 16 sidebar_label: Star Schema Benchmark --- diff --git a/docs/zh/getting-started/example-datasets/wikistat.md b/docs/zh/getting-started/example-datasets/wikistat.md index 5722d18ec57..2db921741fd 100644 --- a/docs/zh/getting-started/example-datasets/wikistat.md +++ b/docs/zh/getting-started/example-datasets/wikistat.md @@ -1,4 +1,5 @@ --- +slug: /zh/getting-started/example-datasets/wikistat sidebar_position: 17 sidebar_label: WikiStat --- diff --git a/docs/zh/getting-started/index.md b/docs/zh/getting-started/index.md index 0e7052e9faf..e50eb2f2542 100644 --- a/docs/zh/getting-started/index.md +++ b/docs/zh/getting-started/index.md @@ -1,4 +1,5 @@ --- +slug: /zh/getting-started/ sidebar_label: 快速上手 sidebar_position: 2 --- diff --git a/docs/zh/getting-started/install.md b/docs/zh/getting-started/install.md index 6a0b47607f5..809e2433823 100644 --- a/docs/zh/getting-started/install.md +++ b/docs/zh/getting-started/install.md @@ -1,4 +1,5 @@ --- +slug: /zh/getting-started/install sidebar_position: 11 sidebar_label: 安装部署 --- @@ -134,8 +135,6 @@ do || curl -fO "https://packages.clickhouse.com/tgz/stable/$PKG-$LATEST_VERSION.tgz" done -exit 0 - tar -xzvf "clickhouse-common-static-$LATEST_VERSION-${ARCH}.tgz" \ || tar -xzvf "clickhouse-common-static-$LATEST_VERSION.tgz" sudo "clickhouse-common-static-$LATEST_VERSION/install/doinst.sh" @@ -146,7 +145,7 @@ sudo "clickhouse-common-static-dbg-$LATEST_VERSION/install/doinst.sh" tar -xzvf "clickhouse-server-$LATEST_VERSION-${ARCH}.tgz" \ || tar -xzvf "clickhouse-server-$LATEST_VERSION.tgz" -sudo "clickhouse-server-$LATEST_VERSION/install/doinst.sh" +sudo "clickhouse-server-$LATEST_VERSION/install/doinst.sh" configure sudo /etc/init.d/clickhouse-server start tar -xzvf "clickhouse-client-$LATEST_VERSION-${ARCH}.tgz" \ diff --git a/docs/zh/getting-started/playground.md b/docs/zh/getting-started/playground.md index cd35b282f5d..2874b307cee 100644 --- a/docs/zh/getting-started/playground.md +++ b/docs/zh/getting-started/playground.md @@ -1,4 +1,5 @@ --- +slug: /zh/getting-started/playground sidebar_position: 14 sidebar_label: 体验平台 --- diff --git a/docs/zh/getting-started/tutorial.md b/docs/zh/getting-started/tutorial.md index 5fa77bffa78..83a4ebc80e1 100644 --- a/docs/zh/getting-started/tutorial.md +++ b/docs/zh/getting-started/tutorial.md @@ -1,4 +1,5 @@ --- +slug: /zh/getting-started/tutorial sidebar_position: 12 sidebar_label: 使用教程 --- diff --git a/docs/zh/guides/apply-catboost-model.md b/docs/zh/guides/apply-catboost-model.md index e268ac92a8c..861e5372875 100644 --- a/docs/zh/guides/apply-catboost-model.md +++ b/docs/zh/guides/apply-catboost-model.md @@ -1,4 +1,5 @@ --- +slug: /zh/guides/apply-catboost-model sidebar_position: 41 sidebar_label: "\u5E94\u7528CatBoost\u6A21\u578B" --- diff --git a/docs/zh/guides/improving-query-performance/skipping-indexes.md b/docs/zh/guides/improving-query-performance/skipping-indexes.md index b3cb82bf769..2f9ce09d77f 100644 --- a/docs/zh/guides/improving-query-performance/skipping-indexes.md +++ b/docs/zh/guides/improving-query-performance/skipping-indexes.md @@ -1,4 +1,5 @@ --- +slug: /zh/guides/improving-query-performance/skipping-indexes sidebar_label: Data Skipping Indexes sidebar_position: 2 --- diff --git a/docs/zh/guides/improving-query-performance/sparse-primary-indexes.md b/docs/zh/guides/improving-query-performance/sparse-primary-indexes.md index 3d91d75432c..13b4c368a96 100644 --- a/docs/zh/guides/improving-query-performance/sparse-primary-indexes.md +++ b/docs/zh/guides/improving-query-performance/sparse-primary-indexes.md @@ -1,4 +1,5 @@ --- +slug: /zh/guides/improving-query-performance/sparse-primary-indexes sidebar_label: Sparse Primary Indexes sidebar_position: 20 --- diff --git a/docs/zh/guides/index.md b/docs/zh/guides/index.md index 54885ab0a58..5e535ea5736 100644 --- a/docs/zh/guides/index.md +++ b/docs/zh/guides/index.md @@ -1,4 +1,5 @@ --- +slug: /zh/guides/ sidebar_position: 38 sidebar_label: ClickHouse指南 --- diff --git a/docs/zh/index.md b/docs/zh/index.md index eaa1785624b..a331f579dda 100644 --- a/docs/zh/index.md +++ b/docs/zh/index.md @@ -1,4 +1,5 @@ --- +slug: /zh/ sidebar_position: 0 sidebar_label: 什么是ClickHouse? --- diff --git a/docs/zh/interfaces/cli.md b/docs/zh/interfaces/cli.md index e43efb50915..b87d84c8b1c 100644 --- a/docs/zh/interfaces/cli.md +++ b/docs/zh/interfaces/cli.md @@ -1,4 +1,5 @@ --- +slug: /zh/interfaces/cli sidebar_position: 17 sidebar_label: 命令行客户端 --- diff --git a/docs/zh/interfaces/cpp.md b/docs/zh/interfaces/cpp.md index 089de7638c3..3a807c2d68a 100644 --- a/docs/zh/interfaces/cpp.md +++ b/docs/zh/interfaces/cpp.md @@ -1,4 +1,5 @@ --- +slug: /zh/interfaces/cpp sidebar_position: 24 sidebar_label: C++客户端库 --- diff --git a/docs/zh/interfaces/formats.md b/docs/zh/interfaces/formats.md index a7a7d41e123..848931342a8 100644 --- a/docs/zh/interfaces/formats.md +++ b/docs/zh/interfaces/formats.md @@ -1,4 +1,5 @@ --- +slug: /zh/interfaces/formats sidebar_position: 21 sidebar_label: 输入/输出格式 --- diff --git a/docs/zh/interfaces/http.md b/docs/zh/interfaces/http.md index 257f8957bb3..8b956936290 100644 --- a/docs/zh/interfaces/http.md +++ b/docs/zh/interfaces/http.md @@ -1,4 +1,5 @@ --- +slug: /zh/interfaces/http sidebar_position: 19 sidebar_label: HTTP客户端 --- diff --git a/docs/zh/interfaces/index.md b/docs/zh/interfaces/index.md index 16f2c64bb85..0a5c9fe20f4 100644 --- a/docs/zh/interfaces/index.md +++ b/docs/zh/interfaces/index.md @@ -1,4 +1,5 @@ --- +slug: /zh/interfaces/ sidebar_label: 接口 sidebar_position: 14 --- diff --git a/docs/zh/interfaces/jdbc.md b/docs/zh/interfaces/jdbc.md index a51846dab3d..388c11593c6 100644 --- a/docs/zh/interfaces/jdbc.md +++ b/docs/zh/interfaces/jdbc.md @@ -1,4 +1,5 @@ --- +slug: /zh/interfaces/jdbc sidebar_position: 22 sidebar_label: JDBC驱动 --- diff --git a/docs/zh/interfaces/mysql.md b/docs/zh/interfaces/mysql.md index b0cd47665cb..a6d2aefdcc3 100644 --- a/docs/zh/interfaces/mysql.md +++ b/docs/zh/interfaces/mysql.md @@ -1,4 +1,5 @@ --- +slug: /zh/interfaces/mysql sidebar_position: 20 sidebar_label: MySQL接口 --- diff --git a/docs/zh/interfaces/odbc.md b/docs/zh/interfaces/odbc.md index dec7480a519..30d3b27249a 100644 --- a/docs/zh/interfaces/odbc.md +++ b/docs/zh/interfaces/odbc.md @@ -1,4 +1,5 @@ --- +slug: /zh/interfaces/odbc sidebar_position: 23 sidebar_label: ODBC驱动 --- diff --git a/docs/zh/interfaces/tcp.md b/docs/zh/interfaces/tcp.md index 7d5092ed7be..230feadaf40 100644 --- a/docs/zh/interfaces/tcp.md +++ b/docs/zh/interfaces/tcp.md @@ -1,4 +1,5 @@ --- +slug: /zh/interfaces/tcp sidebar_position: 18 sidebar_label: 原生接口(TCP) --- diff --git a/docs/zh/interfaces/third-party/client-libraries.md b/docs/zh/interfaces/third-party/client-libraries.md index 8ed482eee73..d4959e37668 100644 --- a/docs/zh/interfaces/third-party/client-libraries.md +++ b/docs/zh/interfaces/third-party/client-libraries.md @@ -1,4 +1,5 @@ --- +slug: /zh/interfaces/third-party/client-libraries sidebar_position: 26 sidebar_label: 客户端开发库 --- diff --git a/docs/zh/interfaces/third-party/gui.md b/docs/zh/interfaces/third-party/gui.md index fd82792d116..75dd4d7f2ec 100644 --- a/docs/zh/interfaces/third-party/gui.md +++ b/docs/zh/interfaces/third-party/gui.md @@ -1,3 +1,6 @@ +--- +slug: /zh/interfaces/third-party/gui +--- # 第三方开发的可视化界面 {#di-san-fang-kai-fa-de-ke-shi-hua-jie-mian} ## 开源 {#kai-yuan} diff --git a/docs/zh/interfaces/third-party/index.md b/docs/zh/interfaces/third-party/index.md index e00e347ce50..0c6b194d04a 100644 --- a/docs/zh/interfaces/third-party/index.md +++ b/docs/zh/interfaces/third-party/index.md @@ -1,4 +1,5 @@ --- +slug: /zh/interfaces/third-party/ sidebar_label: 第三方工具 sidebar_position: 24 --- diff --git a/docs/zh/interfaces/third-party/integrations.md b/docs/zh/interfaces/third-party/integrations.md index 81b3694b4a2..94d9cc7bc09 100644 --- a/docs/zh/interfaces/third-party/integrations.md +++ b/docs/zh/interfaces/third-party/integrations.md @@ -1,4 +1,5 @@ --- +slug: /zh/interfaces/third-party/integrations sidebar_position: 27 sidebar_label: 第三方集成库 --- diff --git a/docs/zh/interfaces/third-party/proxy.md b/docs/zh/interfaces/third-party/proxy.md index e6c744ea317..375780788df 100644 --- a/docs/zh/interfaces/third-party/proxy.md +++ b/docs/zh/interfaces/third-party/proxy.md @@ -1,4 +1,5 @@ --- +slug: /zh/interfaces/third-party/proxy sidebar_position: 29 sidebar_label: 第三方代理 --- diff --git a/docs/zh/introduction/adopters.md b/docs/zh/introduction/adopters.md index 2219894df5c..4e6fe76b175 100644 --- a/docs/zh/introduction/adopters.md +++ b/docs/zh/introduction/adopters.md @@ -1,4 +1,5 @@ --- +slug: /zh/introduction/adopters sidebar_position: 5 sidebar_label: "ClickHouse用户" --- diff --git a/docs/zh/introduction/distinctive-features.md b/docs/zh/introduction/distinctive-features.md index 1f2b970f03d..c97ab082c09 100644 --- a/docs/zh/introduction/distinctive-features.md +++ b/docs/zh/introduction/distinctive-features.md @@ -1,4 +1,5 @@ --- +slug: /zh/introduction/distinctive-features sidebar_position: 2 sidebar_label: ClickHouse的特性 --- diff --git a/docs/zh/introduction/history.md b/docs/zh/introduction/history.md index ac2ad9a58ce..1986126b761 100644 --- a/docs/zh/introduction/history.md +++ b/docs/zh/introduction/history.md @@ -1,4 +1,5 @@ --- +slug: /zh/introduction/history sidebar_position: 4 sidebar_label: ClickHouse历史 --- diff --git a/docs/zh/introduction/performance.md b/docs/zh/introduction/performance.md index 60652f82940..32ecb4265e3 100644 --- a/docs/zh/introduction/performance.md +++ b/docs/zh/introduction/performance.md @@ -1,4 +1,5 @@ --- +slug: /zh/introduction/performance sidebar_position: 3 sidebar_label: ClickHouse性能 --- diff --git a/docs/zh/operations/access-rights.md b/docs/zh/operations/access-rights.md index 0a32853c436..da5a9058eed 100644 --- a/docs/zh/operations/access-rights.md +++ b/docs/zh/operations/access-rights.md @@ -1,4 +1,5 @@ --- +slug: /zh/operations/access-rights sidebar_position: 48 sidebar_label: "访问权限和账户管理" --- diff --git a/docs/zh/operations/backup.md b/docs/zh/operations/backup.md index e6744228c82..dcc9acfcfe0 100644 --- a/docs/zh/operations/backup.md +++ b/docs/zh/operations/backup.md @@ -1,4 +1,5 @@ --- +slug: /zh/operations/backup sidebar_position: 49 sidebar_label: "\u6570\u636E\u5907\u4EFD" --- diff --git a/docs/zh/operations/clickhouse-keeper.md b/docs/zh/operations/clickhouse-keeper.md index a18930f32ab..3210c1322ff 100644 --- a/docs/zh/operations/clickhouse-keeper.md +++ b/docs/zh/operations/clickhouse-keeper.md @@ -1,4 +1,5 @@ --- +slug: /zh/operations/clickhouse-keeper sidebar_position: 66 sidebar_label: ClickHouse Keeper --- diff --git a/docs/zh/operations/configuration-files.md b/docs/zh/operations/configuration-files.md index c99b8fcfca3..30ed2cafde5 100644 --- a/docs/zh/operations/configuration-files.md +++ b/docs/zh/operations/configuration-files.md @@ -1,3 +1,6 @@ +--- +slug: /zh/operations/configuration-files +--- # 配置文件 {#configuration_files} ClickHouse支持多配置文件管理。主配置文件是`/etc/clickhouse-server/config.xml`。其余文件须在目录`/etc/clickhouse-server/config.d`。 diff --git a/docs/zh/operations/external-authenticators/kerberos.md b/docs/zh/operations/external-authenticators/kerberos.md index 5d2122c5fea..649a0b9bd48 100644 --- a/docs/zh/operations/external-authenticators/kerberos.md +++ b/docs/zh/operations/external-authenticators/kerberos.md @@ -1,4 +1,8 @@ +--- +slug: /zh/operations/external-authenticators/kerberos +--- # Kerberos认证 {#external-authenticators-kerberos} + 现有正确配置的 ClickHouse 用户可以通过 Kerberos 身份验证协议进行身份验证. 目前, Kerberos 只能用作现有用户的外部身份验证器,这些用户在 `users.xml` 或本地访问控制路径中定义. diff --git a/docs/zh/operations/index.md b/docs/zh/operations/index.md index d8ec8ec2b27..e66d35db62c 100644 --- a/docs/zh/operations/index.md +++ b/docs/zh/operations/index.md @@ -1,4 +1,5 @@ --- +slug: /zh/operations/ sidebar_position: 43 sidebar_label: "操作" --- diff --git a/docs/zh/operations/monitoring.md b/docs/zh/operations/monitoring.md index 452449806ea..3f302c47e35 100644 --- a/docs/zh/operations/monitoring.md +++ b/docs/zh/operations/monitoring.md @@ -1,4 +1,5 @@ --- +slug: /zh/operations/monitoring sidebar_position: 45 sidebar_label: "监控" --- diff --git a/docs/zh/operations/optimizing-performance/sampling-query-profiler.md b/docs/zh/operations/optimizing-performance/sampling-query-profiler.md index 8114184af8b..4206274ec0d 100644 --- a/docs/zh/operations/optimizing-performance/sampling-query-profiler.md +++ b/docs/zh/operations/optimizing-performance/sampling-query-profiler.md @@ -1,4 +1,5 @@ --- +slug: /zh/operations/optimizing-performance/sampling-query-profiler machine_translated: true machine_translated_rev: 72537a2d527c63c07aa5d2361a8829f3895cf2bd sidebar_position: 54 diff --git a/docs/zh/operations/performance-test.md b/docs/zh/operations/performance-test.md index 005e2a9d657..524b6140a10 100644 --- a/docs/zh/operations/performance-test.md +++ b/docs/zh/operations/performance-test.md @@ -1,4 +1,5 @@ --- +slug: /zh/operations/performance-test sidebar_position: 54 sidebar_label: "\u6D4B\u8BD5\u786C\u4EF6" --- diff --git a/docs/zh/operations/quotas.md b/docs/zh/operations/quotas.md index 6bbaf0cb6fd..55cdb7de168 100644 --- a/docs/zh/operations/quotas.md +++ b/docs/zh/operations/quotas.md @@ -1,3 +1,6 @@ +--- +slug: /zh/operations/quotas +--- # 配额 {#quotas} 配额允许您在一段时间内限制资源使用情况,或者只是跟踪资源的使用。 diff --git a/docs/zh/operations/requirements.md b/docs/zh/operations/requirements.md index 79af49cfd4e..0bee135bd92 100644 --- a/docs/zh/operations/requirements.md +++ b/docs/zh/operations/requirements.md @@ -1,4 +1,5 @@ --- +slug: /zh/operations/requirements sidebar_position: 44 sidebar_label: "必备条件" --- diff --git a/docs/zh/operations/server-configuration-parameters/index.md b/docs/zh/operations/server-configuration-parameters/index.md index 38ee76ee05d..1131d7fbd4b 100644 --- a/docs/zh/operations/server-configuration-parameters/index.md +++ b/docs/zh/operations/server-configuration-parameters/index.md @@ -1,3 +1,6 @@ +--- +slug: /zh/operations/server-configuration-parameters/ +--- # 服务器配置参数 {#server-settings} 本节包含无法在会话或查询级别更改的服务器设置的说明。 diff --git a/docs/zh/operations/server-configuration-parameters/settings.md b/docs/zh/operations/server-configuration-parameters/settings.md index f5fe3aea3e7..e706d524145 100644 --- a/docs/zh/operations/server-configuration-parameters/settings.md +++ b/docs/zh/operations/server-configuration-parameters/settings.md @@ -1,4 +1,5 @@ --- +slug: /zh/operations/server-configuration-parameters/settings sidebar_position: 57 sidebar_label: "\u670D\u52A1\u5668\u8BBE\u7F6E" --- diff --git a/docs/zh/operations/settings/constraints-on-settings.md b/docs/zh/operations/settings/constraints-on-settings.md index 19d724b483a..dafc89706f6 100644 --- a/docs/zh/operations/settings/constraints-on-settings.md +++ b/docs/zh/operations/settings/constraints-on-settings.md @@ -1,4 +1,5 @@ --- +slug: /zh/operations/settings/constraints-on-settings machine_translated: true machine_translated_rev: 72537a2d527c63c07aa5d2361a8829f3895cf2bd sidebar_position: 62 diff --git a/docs/zh/operations/settings/permissions-for-queries.md b/docs/zh/operations/settings/permissions-for-queries.md index 5aa579fb0cf..b5c858e4591 100644 --- a/docs/zh/operations/settings/permissions-for-queries.md +++ b/docs/zh/operations/settings/permissions-for-queries.md @@ -1,4 +1,5 @@ --- +slug: /zh/operations/settings/permissions-for-queries sidebar_position: 58 sidebar_label: "\u67E5\u8BE2\u6743\u9650" --- diff --git a/docs/zh/operations/settings/query-complexity.md b/docs/zh/operations/settings/query-complexity.md index 77f4a3729e1..146825d85fa 100644 --- a/docs/zh/operations/settings/query-complexity.md +++ b/docs/zh/operations/settings/query-complexity.md @@ -1,3 +1,6 @@ +--- +slug: /zh/operations/settings/query-complexity +--- # 查询复杂性的限制 {#restrictions-on-query-complexity} 对查询复杂性的限制是设置的一部分。 diff --git a/docs/zh/operations/settings/settings-profiles.md b/docs/zh/operations/settings/settings-profiles.md index 0a88b7ff773..f562c583d09 100644 --- a/docs/zh/operations/settings/settings-profiles.md +++ b/docs/zh/operations/settings/settings-profiles.md @@ -1,4 +1,5 @@ --- +slug: /zh/operations/settings/settings-profiles sidebar_position: 61 sidebar_label: "\u8BBE\u7F6E\u914D\u7F6E" --- diff --git a/docs/zh/operations/settings/settings-users.md b/docs/zh/operations/settings/settings-users.md index a7284ac4644..58caff7309e 100644 --- a/docs/zh/operations/settings/settings-users.md +++ b/docs/zh/operations/settings/settings-users.md @@ -1,4 +1,5 @@ --- +slug: /zh/operations/settings/settings-users machine_translated: false machine_translated_rev: 72537a2d527c63c07aa5d2361a8829f3895cf2bd sidebar_position: 63 diff --git a/docs/zh/operations/system-tables/asynchronous_metric_log.md b/docs/zh/operations/system-tables/asynchronous_metric_log.md index ba37713ac44..419ad2a7ed6 100644 --- a/docs/zh/operations/system-tables/asynchronous_metric_log.md +++ b/docs/zh/operations/system-tables/asynchronous_metric_log.md @@ -1,3 +1,6 @@ +--- +slug: /zh/operations/system-tables/asynchronous_metric_log +--- ## system.asynchronous_metric_log {#system-tables-async-log} 包含每分钟记录一次的 `system.asynchronous_metrics`历史值。默认开启。 diff --git a/docs/zh/operations/system-tables/asynchronous_metrics.md b/docs/zh/operations/system-tables/asynchronous_metrics.md index 0303c408497..055f141641c 100644 --- a/docs/zh/operations/system-tables/asynchronous_metrics.md +++ b/docs/zh/operations/system-tables/asynchronous_metrics.md @@ -1,3 +1,6 @@ +--- +slug: /zh/operations/system-tables/asynchronous_metrics +--- # system.asynchronous_metrics {#system_tables-asynchronous_metrics} 包含在后台定期计算的指标。 例如,在使用的RAM量。 diff --git a/docs/zh/operations/system-tables/clusters.md b/docs/zh/operations/system-tables/clusters.md index e41aa2dd8fc..ac4d73be900 100644 --- a/docs/zh/operations/system-tables/clusters.md +++ b/docs/zh/operations/system-tables/clusters.md @@ -1,3 +1,6 @@ +--- +slug: /zh/operations/system-tables/clusters +--- # system.clusters{#system-clusters} 包含有关配置文件中可用的集群及其中的服务器的信息。 diff --git a/docs/zh/operations/system-tables/columns.md b/docs/zh/operations/system-tables/columns.md index 6d4299a9056..1151596e0d4 100644 --- a/docs/zh/operations/system-tables/columns.md +++ b/docs/zh/operations/system-tables/columns.md @@ -1,3 +1,6 @@ +--- +slug: /zh/operations/system-tables/columns +--- # system.columns {#system-columns} 此系统表包含所有表中列的信息。 diff --git a/docs/zh/operations/system-tables/contributors.md b/docs/zh/operations/system-tables/contributors.md index fd876da3594..54ed6dfddbf 100644 --- a/docs/zh/operations/system-tables/contributors.md +++ b/docs/zh/operations/system-tables/contributors.md @@ -1,3 +1,6 @@ +--- +slug: /zh/operations/system-tables/contributors +--- # system.contributors {#system-contributors} 此系统表包含有关贡献者的信息。排列顺序是在查询执行时随机生成的。 diff --git a/docs/zh/operations/system-tables/crash-log.md b/docs/zh/operations/system-tables/crash-log.md index bd5c97937de..d0ed406fa0c 100644 --- a/docs/zh/operations/system-tables/crash-log.md +++ b/docs/zh/operations/system-tables/crash-log.md @@ -1,3 +1,6 @@ +--- +slug: /zh/operations/system-tables/crash-log +--- # system.crash_log {#system-tables_crash_log} 包含有关致命错误堆栈跟踪的信息.该表默认不存在于数据库中, 仅在发生致命错误时才创建. diff --git a/docs/zh/operations/system-tables/current-roles.md b/docs/zh/operations/system-tables/current-roles.md index 8b9ddec2e0b..29e12488290 100644 --- a/docs/zh/operations/system-tables/current-roles.md +++ b/docs/zh/operations/system-tables/current-roles.md @@ -1,3 +1,6 @@ +--- +slug: /zh/operations/system-tables/current-roles +--- # system.current_roles {#system_tables-current_roles} 包含当前用户的激活角色. `SET ROLE` 修改该表的内容. diff --git a/docs/zh/operations/system-tables/data_skipping_indices.md b/docs/zh/operations/system-tables/data_skipping_indices.md index 3278a7f830f..11ffd0b3917 100644 --- a/docs/zh/operations/system-tables/data_skipping_indices.md +++ b/docs/zh/operations/system-tables/data_skipping_indices.md @@ -1,3 +1,6 @@ +--- +slug: /zh/operations/system-tables/data_skipping_indices +--- # system.data_skipping_indices {#system-data-skipping-indices} 包含有关所有表中现有数据跳过索引的信息. diff --git a/docs/zh/operations/system-tables/data_type_families.md b/docs/zh/operations/system-tables/data_type_families.md index b1d114a6df0..18e9455476d 100644 --- a/docs/zh/operations/system-tables/data_type_families.md +++ b/docs/zh/operations/system-tables/data_type_families.md @@ -1,4 +1,6 @@ - +--- +slug: /zh/operations/system-tables/data_type_families +--- # system.data_type_families {#system_tables-data_type_families} 包含有关受支持的[数据类型](../../sql-reference/data-types/)的信息. diff --git a/docs/zh/operations/system-tables/databases.md b/docs/zh/operations/system-tables/databases.md index 3fadb02446d..41d62701e7d 100644 --- a/docs/zh/operations/system-tables/databases.md +++ b/docs/zh/operations/system-tables/databases.md @@ -1,3 +1,6 @@ +--- +slug: /zh/operations/system-tables/databases +--- # system.databases {#system-databases} 包含当前用户可用的数据库的相关信息。 diff --git a/docs/zh/operations/system-tables/detached_parts.md b/docs/zh/operations/system-tables/detached_parts.md index efcbb61d37e..f1c09dd73e0 100644 --- a/docs/zh/operations/system-tables/detached_parts.md +++ b/docs/zh/operations/system-tables/detached_parts.md @@ -1,3 +1,6 @@ +--- +slug: /zh/operations/system-tables/detached_parts +--- # system.detached_parts {#system_tables-detached_parts} 包含关于 [MergeTree](../../engines/table-engines/mergetree-family/mergetree.md) 表的分离分区的信息。`reason` 列详细说明了该分区被分离的原因。 diff --git a/docs/zh/operations/system-tables/dictionaries.md b/docs/zh/operations/system-tables/dictionaries.md index 6cfe71de3cb..105a591cf69 100644 --- a/docs/zh/operations/system-tables/dictionaries.md +++ b/docs/zh/operations/system-tables/dictionaries.md @@ -1,4 +1,5 @@ --- +slug: /zh/operations/system-tables/dictionaries machine_translated: true machine_translated_rev: 5decc73b5dc60054f19087d3690c4eb99446a6c3 --- diff --git a/docs/zh/operations/system-tables/disks.md b/docs/zh/operations/system-tables/disks.md index 2a6dcc9ae45..4c3ad27e944 100644 --- a/docs/zh/operations/system-tables/disks.md +++ b/docs/zh/operations/system-tables/disks.md @@ -1,3 +1,6 @@ +--- +slug: /zh/operations/system-tables/disks +--- # system.disks {#system_tables-disks} 包含在 [服务器配置](../../engines/table-engines/mergetree-family/mergetree.md#table_engine-mergetree-multiple-volumes_configure) 中定义的磁盘信息. diff --git a/docs/zh/operations/system-tables/distributed_ddl_queue.md b/docs/zh/operations/system-tables/distributed_ddl_queue.md index 5237673d5d0..a983125bf71 100644 --- a/docs/zh/operations/system-tables/distributed_ddl_queue.md +++ b/docs/zh/operations/system-tables/distributed_ddl_queue.md @@ -1,3 +1,6 @@ +--- +slug: /zh/operations/system-tables/distributed_ddl_queue +--- # system.distributed_ddl_queue {#system_tables-distributed_ddl_queue} 包含有关在集群上执行的[分布式ddl查询(集群环境)](../../sql-reference/distributed-ddl.md)的信息. diff --git a/docs/zh/operations/system-tables/distribution_queue.md b/docs/zh/operations/system-tables/distribution_queue.md index 2317e0677a7..09958f517f1 100644 --- a/docs/zh/operations/system-tables/distribution_queue.md +++ b/docs/zh/operations/system-tables/distribution_queue.md @@ -1,3 +1,6 @@ +--- +slug: /zh/operations/system-tables/distribution_queue +--- # system.distribution_queue {#system_tables-distribution_queue} 包含关于队列中要发送到分片的本地文件的信息. 这些本地文件包含通过以异步模式将新数据插入到Distributed表中而创建的新部分. diff --git a/docs/zh/operations/system-tables/enabled-roles.md b/docs/zh/operations/system-tables/enabled-roles.md index f02c8ec1e65..c22f0d4e114 100644 --- a/docs/zh/operations/system-tables/enabled-roles.md +++ b/docs/zh/operations/system-tables/enabled-roles.md @@ -1,3 +1,6 @@ +--- +slug: /zh/operations/system-tables/enabled-roles +--- # system.enabled_roles {#system_tables-enabled_roles} 包含当前所有活动角色, 包括当前用户的当前角色和当前角色的已授予角色. diff --git a/docs/zh/operations/system-tables/errors.md b/docs/zh/operations/system-tables/errors.md index 4d7baafb865..2b58b94adee 100644 --- a/docs/zh/operations/system-tables/errors.md +++ b/docs/zh/operations/system-tables/errors.md @@ -1,3 +1,6 @@ +--- +slug: /zh/operations/system-tables/errors +--- # system.errors {#system_tables-errors} 包含错误代码和它们被触发的次数. diff --git a/docs/zh/operations/system-tables/events.md b/docs/zh/operations/system-tables/events.md index e7ce47116bf..2ff050493e8 100644 --- a/docs/zh/operations/system-tables/events.md +++ b/docs/zh/operations/system-tables/events.md @@ -1,4 +1,5 @@ --- +slug: /zh/operations/system-tables/events machine_translated: true machine_translated_rev: 5decc73b5dc60054f19087d3690c4eb99446a6c3 --- diff --git a/docs/zh/operations/system-tables/functions.md b/docs/zh/operations/system-tables/functions.md index 75df1f65c1f..7e0d2901df8 100644 --- a/docs/zh/operations/system-tables/functions.md +++ b/docs/zh/operations/system-tables/functions.md @@ -1,3 +1,6 @@ +--- +slug: /zh/operations/system-tables/functions +--- # system.functions {#system-functions} 包含有关常规函数和聚合函数的信息。 diff --git a/docs/zh/operations/system-tables/grants.md b/docs/zh/operations/system-tables/grants.md index 8795f5ebf19..fce47fcc0d5 100644 --- a/docs/zh/operations/system-tables/grants.md +++ b/docs/zh/operations/system-tables/grants.md @@ -1,3 +1,6 @@ +--- +slug: /zh/operations/system-tables/grants +--- # system.grants {#system_tables-grants} 授予ClickHouse用户帐户的权限. diff --git a/docs/zh/operations/system-tables/graphite_retentions.md b/docs/zh/operations/system-tables/graphite_retentions.md index e61f58496d0..ba1b78f8f8f 100644 --- a/docs/zh/operations/system-tables/graphite_retentions.md +++ b/docs/zh/operations/system-tables/graphite_retentions.md @@ -1,4 +1,5 @@ --- +slug: /zh/operations/system-tables/graphite_retentions machine_translated: true machine_translated_rev: 5decc73b5dc60054f19087d3690c4eb99446a6c3 --- diff --git a/docs/zh/operations/system-tables/index.md b/docs/zh/operations/system-tables/index.md index d68e486b23e..36f862c0777 100644 --- a/docs/zh/operations/system-tables/index.md +++ b/docs/zh/operations/system-tables/index.md @@ -1,4 +1,5 @@ --- +slug: /zh/operations/system-tables/ machine_translated: true machine_translated_rev: 5decc73b5dc60054f19087d3690c4eb99446a6c3 sidebar_position: 52 diff --git a/docs/zh/operations/system-tables/licenses.md b/docs/zh/operations/system-tables/licenses.md index 7e59ba865f5..a2e9a59c0b5 100644 --- a/docs/zh/operations/system-tables/licenses.md +++ b/docs/zh/operations/system-tables/licenses.md @@ -1,3 +1,6 @@ +--- +slug: /zh/operations/system-tables/licenses +--- # system.licenses {#system-tables_system.licenses} 包含位于 ClickHouse 源的 [contrib](https://github.com/ClickHouse/ClickHouse/tree/master/contrib) 目录中的第三方库的许可证. diff --git a/docs/zh/operations/system-tables/merge_tree_settings.md b/docs/zh/operations/system-tables/merge_tree_settings.md index 48d9a7dd9af..38928ce8630 100644 --- a/docs/zh/operations/system-tables/merge_tree_settings.md +++ b/docs/zh/operations/system-tables/merge_tree_settings.md @@ -1,3 +1,6 @@ +--- +slug: /zh/operations/system-tables/merge_tree_settings +--- # system.merge_tree_settings {#system-merge_tree_settings} 包含 `MergeTree` 表的设置 (Setting) 信息。 diff --git a/docs/zh/operations/system-tables/merges.md b/docs/zh/operations/system-tables/merges.md index f5cf2a56118..892f9de07ce 100644 --- a/docs/zh/operations/system-tables/merges.md +++ b/docs/zh/operations/system-tables/merges.md @@ -1,4 +1,5 @@ --- +slug: /zh/operations/system-tables/merges machine_translated: true machine_translated_rev: 5decc73b5dc60054f19087d3690c4eb99446a6c3 --- diff --git a/docs/zh/operations/system-tables/metric_log.md b/docs/zh/operations/system-tables/metric_log.md index 5eb038feea7..a9e5c8511e3 100644 --- a/docs/zh/operations/system-tables/metric_log.md +++ b/docs/zh/operations/system-tables/metric_log.md @@ -1,4 +1,5 @@ --- +slug: /zh/operations/system-tables/metric_log machine_translated: true machine_translated_rev: 5decc73b5dc60054f19087d3690c4eb99446a6c3 --- diff --git a/docs/zh/operations/system-tables/metrics.md b/docs/zh/operations/system-tables/metrics.md index 4b1a66dcaad..39453f247a0 100644 --- a/docs/zh/operations/system-tables/metrics.md +++ b/docs/zh/operations/system-tables/metrics.md @@ -1,3 +1,6 @@ +--- +slug: /zh/operations/system-tables/metrics +--- # system.metrics {#system_tables-metrics} 此系统表包含可以即时计算或具有当前值的指标。例如,同时处理的查询数量或当前的复制延迟。这个表始终是最新的。 diff --git a/docs/zh/operations/system-tables/mutations.md b/docs/zh/operations/system-tables/mutations.md index 8620436b8e3..dbce0a59063 100644 --- a/docs/zh/operations/system-tables/mutations.md +++ b/docs/zh/operations/system-tables/mutations.md @@ -1,3 +1,6 @@ +--- +slug: /zh/operations/system-tables/mutations +--- # system.mutations {#system_tables-mutations} 该表包含关于MergeTree表的[mutation](../../sql-reference/statements/alter.md#alter-mutations)及其进度信息 。每条mutation命令都用一行来表示。 @@ -47,4 +50,4 @@ - [MergeTree](../../engines/table-engines/mergetree-family/mergetree.md) 表引擎 - [ReplicatedMergeTree](../../engines/table-engines/mergetree-family/replication.md) 族 -[Original article](https://clickhouse.com/docs/en/operations/system_tables/mutations) \ No newline at end of file +[Original article](https://clickhouse.com/docs/en/operations/system_tables/mutations) diff --git a/docs/zh/operations/system-tables/numbers.md b/docs/zh/operations/system-tables/numbers.md index fd67baa01a5..dd942d5b3c1 100644 --- a/docs/zh/operations/system-tables/numbers.md +++ b/docs/zh/operations/system-tables/numbers.md @@ -1,3 +1,6 @@ +--- +slug: /zh/operations/system-tables/numbers +--- # system.numbers {#system-numbers} 这个表有一个名为 `number` 的 UInt64 列,包含了几乎所有从 0 开始的自然数。 diff --git a/docs/zh/operations/system-tables/numbers_mt.md b/docs/zh/operations/system-tables/numbers_mt.md index cf1c96acaab..9663d1507ce 100644 --- a/docs/zh/operations/system-tables/numbers_mt.md +++ b/docs/zh/operations/system-tables/numbers_mt.md @@ -1,3 +1,6 @@ +--- +slug: /zh/operations/system-tables/numbers_mt +--- # system.numbers_mt {#system-numbers-mt} 与[system.numbers](../../operations/system-tables/numbers.md)相似,但读取是并行的。 这些数字可以以任何顺序返回。 diff --git a/docs/zh/operations/system-tables/one.md b/docs/zh/operations/system-tables/one.md index 79d2c0199d8..18d699c03c9 100644 --- a/docs/zh/operations/system-tables/one.md +++ b/docs/zh/operations/system-tables/one.md @@ -1,3 +1,6 @@ +--- +slug: /zh/operations/system-tables/one +--- # system.one {#system-one} 此表包含一行只有一个值为 0 的 `dummy` UInt8 列的数据。 diff --git a/docs/zh/operations/system-tables/opentelemetry_span_log.md b/docs/zh/operations/system-tables/opentelemetry_span_log.md index 6eceb3da889..514ad854e17 100644 --- a/docs/zh/operations/system-tables/opentelemetry_span_log.md +++ b/docs/zh/operations/system-tables/opentelemetry_span_log.md @@ -1,3 +1,6 @@ +--- +slug: /zh/operations/system-tables/opentelemetry_span_log +--- # system.opentelemetry_span_log {#system_tables-opentelemetry_span_log} 包含已执行查询的[跟踪范围](https://opentracing.io/docs/overview/spans/)的信息. diff --git a/docs/zh/operations/system-tables/part_log.md b/docs/zh/operations/system-tables/part_log.md index e316531dbd9..cd0bfa272ea 100644 --- a/docs/zh/operations/system-tables/part_log.md +++ b/docs/zh/operations/system-tables/part_log.md @@ -1,4 +1,5 @@ --- +slug: /zh/operations/system-tables/part_log machine_translated: true machine_translated_rev: 5decc73b5dc60054f19087d3690c4eb99446a6c3 --- diff --git a/docs/zh/operations/system-tables/parts.md b/docs/zh/operations/system-tables/parts.md index dc98288305f..ea442194a21 100644 --- a/docs/zh/operations/system-tables/parts.md +++ b/docs/zh/operations/system-tables/parts.md @@ -1,3 +1,6 @@ +--- +slug: /zh/operations/system-tables/parts +--- # system.parts {#system_tables-parts} 此系统表包含 [MergeTree](../../engines/table-engines/mergetree-family/mergetree.md) 表分区的相关信息。 diff --git a/docs/zh/operations/system-tables/parts_columns.md b/docs/zh/operations/system-tables/parts_columns.md index d603f251d30..2896b13adec 100644 --- a/docs/zh/operations/system-tables/parts_columns.md +++ b/docs/zh/operations/system-tables/parts_columns.md @@ -1,3 +1,6 @@ +--- +slug: /zh/operations/system-tables/parts_columns +--- # system.parts_columns {#system_tables-parts_columns} 包含关于[MergeTree](../../engines/table-engines/mergetree-family/mergetree.md)表的部分和列的信息. diff --git a/docs/zh/operations/system-tables/processes.md b/docs/zh/operations/system-tables/processes.md index 5ac74a29ee3..ec3683597bb 100644 --- a/docs/zh/operations/system-tables/processes.md +++ b/docs/zh/operations/system-tables/processes.md @@ -1,4 +1,5 @@ --- +slug: /zh/operations/system-tables/processes machine_translated: true machine_translated_rev: 5decc73b5dc60054f19087d3690c4eb99446a6c3 --- diff --git a/docs/zh/operations/system-tables/query_log.md b/docs/zh/operations/system-tables/query_log.md index b7661b73a50..93e5771d4b5 100644 --- a/docs/zh/operations/system-tables/query_log.md +++ b/docs/zh/operations/system-tables/query_log.md @@ -1,4 +1,5 @@ --- +slug: /zh/operations/system-tables/query_log machine_translated: true machine_translated_rev: 5decc73b5dc60054f19087d3690c4eb99446a6c3 --- diff --git a/docs/zh/operations/system-tables/query_thread_log.md b/docs/zh/operations/system-tables/query_thread_log.md index 64f9ed27393..8a41c1501a6 100644 --- a/docs/zh/operations/system-tables/query_thread_log.md +++ b/docs/zh/operations/system-tables/query_thread_log.md @@ -1,3 +1,6 @@ +--- +slug: /zh/operations/system-tables/query_thread_log +--- # 系统。query_thread_log {#system_tables-query_thread_log} 包含有关执行查询的线程的信息,例如,线程名称、线程开始时间、查询处理的持续时间。 diff --git a/docs/zh/operations/system-tables/query_views_log.md b/docs/zh/operations/system-tables/query_views_log.md index 57946e5104a..3fbffab1adc 100644 --- a/docs/zh/operations/system-tables/query_views_log.md +++ b/docs/zh/operations/system-tables/query_views_log.md @@ -1,3 +1,6 @@ +--- +slug: /zh/operations/system-tables/query_views_log +--- # system.query_views_log {#system_tables-query_views_log} 包含有关运行查询时执行的从属视图的信息,例如视图类型或执行时间. diff --git a/docs/zh/operations/system-tables/quota_limits.md b/docs/zh/operations/system-tables/quota_limits.md index 67a2a01eb71..804f635f012 100644 --- a/docs/zh/operations/system-tables/quota_limits.md +++ b/docs/zh/operations/system-tables/quota_limits.md @@ -1,3 +1,6 @@ +--- +slug: /zh/operations/system-tables/quota_limits +--- # system.quota_limits {#system_tables-quota_limits} 包含关于所有配额的所有间隔的最大值的信息. 任何行数或0行都可以对应一个配额. diff --git a/docs/zh/operations/system-tables/quota_usage.md b/docs/zh/operations/system-tables/quota_usage.md index 020d14aee5e..2134b9f3cd6 100644 --- a/docs/zh/operations/system-tables/quota_usage.md +++ b/docs/zh/operations/system-tables/quota_usage.md @@ -1,3 +1,6 @@ +--- +slug: /zh/operations/system-tables/quota_usage +--- # system.quota_usage {#system_tables-quota_usage} 当前用户的配额使用情况: 使用了多少, 还剩多少. diff --git a/docs/zh/operations/system-tables/quotas.md b/docs/zh/operations/system-tables/quotas.md index b25c213bdd1..611c6a238e3 100644 --- a/docs/zh/operations/system-tables/quotas.md +++ b/docs/zh/operations/system-tables/quotas.md @@ -1,3 +1,6 @@ +--- +slug: /zh/operations/system-tables/quotas +--- # system.quotas {#system_tables-quotas} 包含 [quotas](../../operations/system-tables/quotas.md) 信息. diff --git a/docs/zh/operations/system-tables/quotas_usage.md b/docs/zh/operations/system-tables/quotas_usage.md index f554539ecfe..f261cfb9f31 100644 --- a/docs/zh/operations/system-tables/quotas_usage.md +++ b/docs/zh/operations/system-tables/quotas_usage.md @@ -1,3 +1,6 @@ +--- +slug: /zh/operations/system-tables/quotas_usage +--- # system.quotas_usage {#system_tables-quotas_usage} 所有用户配额使用情况. diff --git a/docs/zh/operations/system-tables/replicas.md b/docs/zh/operations/system-tables/replicas.md index b5bf0c891d4..2845549d511 100644 --- a/docs/zh/operations/system-tables/replicas.md +++ b/docs/zh/operations/system-tables/replicas.md @@ -1,4 +1,5 @@ --- +slug: /zh/operations/system-tables/replicas machine_translated: true machine_translated_rev: 5decc73b5dc60054f19087d3690c4eb99446a6c3 --- diff --git a/docs/zh/operations/system-tables/replicated_fetches.md b/docs/zh/operations/system-tables/replicated_fetches.md index fededbaf28d..84aecd2bd2a 100644 --- a/docs/zh/operations/system-tables/replicated_fetches.md +++ b/docs/zh/operations/system-tables/replicated_fetches.md @@ -1,3 +1,6 @@ +--- +slug: /zh/operations/system-tables/replicated_fetches +--- # system.replicated_fetches {#system_tables-replicated_fetches} 包含当前正在运行的后台提取的信息. diff --git a/docs/zh/operations/system-tables/replication_queue.md b/docs/zh/operations/system-tables/replication_queue.md index 0c9e19f76d0..e82569e378d 100644 --- a/docs/zh/operations/system-tables/replication_queue.md +++ b/docs/zh/operations/system-tables/replication_queue.md @@ -1,3 +1,6 @@ +--- +slug: /zh/operations/system-tables/replication_queue +--- # system.replication_queue {#system_tables-replication_queue} 包含用于 `ReplicatedMergeTree` 系列表的复制队列中存储在ZooKeeper中的任务的信息. diff --git a/docs/zh/operations/system-tables/role-grants.md b/docs/zh/operations/system-tables/role-grants.md index 8957c73df73..ab2f9200d74 100644 --- a/docs/zh/operations/system-tables/role-grants.md +++ b/docs/zh/operations/system-tables/role-grants.md @@ -1,3 +1,6 @@ +--- +slug: /zh/operations/system-tables/role-grants +--- #system.role_grants {#system_tables-role_grants} 包含用户和角色的角色授予. 向该表添加项, 请使用`GRANT role TO user`. diff --git a/docs/zh/operations/system-tables/roles.md b/docs/zh/operations/system-tables/roles.md index c3537b978e4..fd0c9012566 100644 --- a/docs/zh/operations/system-tables/roles.md +++ b/docs/zh/operations/system-tables/roles.md @@ -1,3 +1,6 @@ +--- +slug: /zh/operations/system-tables/roles +--- # system.roles {#system_tables-roles} 包含有关已配置的 [角色](../../operations/access-rights.md#role-management) 信息. diff --git a/docs/zh/operations/system-tables/row_policies.md b/docs/zh/operations/system-tables/row_policies.md index 4eaf291cc68..792eaca91b1 100644 --- a/docs/zh/operations/system-tables/row_policies.md +++ b/docs/zh/operations/system-tables/row_policies.md @@ -1,3 +1,6 @@ +--- +slug: /zh/operations/system-tables/row_policies +--- # system.row_policies {#system_tables-row_policies} 包含一个特定表的过滤器, 以及应该使用此行策略的角色和/或用户列表. diff --git a/docs/zh/operations/system-tables/settings.md b/docs/zh/operations/system-tables/settings.md index 144eb0179c4..3c20fbfa5ca 100644 --- a/docs/zh/operations/system-tables/settings.md +++ b/docs/zh/operations/system-tables/settings.md @@ -1,3 +1,6 @@ +--- +slug: /zh/operations/system-tables/settings +--- # system.settings {#system-tables-system-settings} 包含当前用户会话设置的相关信息。 diff --git a/docs/zh/operations/system-tables/settings_profile_elements.md b/docs/zh/operations/system-tables/settings_profile_elements.md index 2f5eb5b3044..233562c09e9 100644 --- a/docs/zh/operations/system-tables/settings_profile_elements.md +++ b/docs/zh/operations/system-tables/settings_profile_elements.md @@ -1,3 +1,6 @@ +--- +slug: /zh/operations/system-tables/settings_profile_elements +--- # system.settings_profile_elements {#system_tables-settings_profile_elements} 描述settings配置文件的内容: diff --git a/docs/zh/operations/system-tables/settings_profiles.md b/docs/zh/operations/system-tables/settings_profiles.md index 46102f9c2a6..b3ac45e31b6 100644 --- a/docs/zh/operations/system-tables/settings_profiles.md +++ b/docs/zh/operations/system-tables/settings_profiles.md @@ -1,3 +1,6 @@ +--- +slug: /zh/operations/system-tables/settings_profiles +--- # system.settings_profiles {#system_tables-settings_profiles} 包含 Setting 配置文件中指定的属性. diff --git a/docs/zh/operations/system-tables/stack_trace.md b/docs/zh/operations/system-tables/stack_trace.md index 8e46efd17ab..b17cd7e1540 100644 --- a/docs/zh/operations/system-tables/stack_trace.md +++ b/docs/zh/operations/system-tables/stack_trace.md @@ -1,3 +1,6 @@ +--- +slug: /zh/operations/system-tables/stack_trace +--- # system.stack_trace {#system-tables_stack_trace} 包含所有服务器线程的堆栈跟踪. 允许开发人员对服务器状态进行自省. diff --git a/docs/zh/operations/system-tables/storage_policies.md b/docs/zh/operations/system-tables/storage_policies.md index e2531649493..e40c54e6e8b 100644 --- a/docs/zh/operations/system-tables/storage_policies.md +++ b/docs/zh/operations/system-tables/storage_policies.md @@ -1,3 +1,6 @@ +--- +slug: /zh/operations/system-tables/storage_policies +--- # system.storage_policies {#system_tables-storage_policies} 包含有关 [服务器配置](../../engines/table-engines/mergetree-family/mergetree.md#table_engine-mergetree-multiple-volumes_configure) 中定义的存储策略和卷信息。 diff --git a/docs/zh/operations/system-tables/table_engines.md b/docs/zh/operations/system-tables/table_engines.md index 4e107868aa5..c4c3958ce80 100644 --- a/docs/zh/operations/system-tables/table_engines.md +++ b/docs/zh/operations/system-tables/table_engines.md @@ -1,4 +1,5 @@ --- +slug: /zh/operations/system-tables/table_engines machine_translated: true machine_translated_rev: 5decc73b5dc60054f19087d3690c4eb99446a6c3 --- diff --git a/docs/zh/operations/system-tables/tables.md b/docs/zh/operations/system-tables/tables.md index 03ea9f93d26..e81361874fc 100644 --- a/docs/zh/operations/system-tables/tables.md +++ b/docs/zh/operations/system-tables/tables.md @@ -1,3 +1,6 @@ +--- +slug: /zh/operations/system-tables/tables +--- # system.tables {#system-tables} 包含服务器知道的每个表的元数据。 [分离的](../../sql-reference/statements/detach.md)表不在 `system.tables` 显示。 diff --git a/docs/zh/operations/system-tables/text_log.md b/docs/zh/operations/system-tables/text_log.md index 3c56c8e42e1..ebd2e634218 100644 --- a/docs/zh/operations/system-tables/text_log.md +++ b/docs/zh/operations/system-tables/text_log.md @@ -1,4 +1,5 @@ --- +slug: /zh/operations/system-tables/text_log machine_translated: true machine_translated_rev: 5decc73b5dc60054f19087d3690c4eb99446a6c3 --- diff --git a/docs/zh/operations/system-tables/time_zones.md b/docs/zh/operations/system-tables/time_zones.md index d01725edf5b..8e6bb3e8058 100644 --- a/docs/zh/operations/system-tables/time_zones.md +++ b/docs/zh/operations/system-tables/time_zones.md @@ -1,3 +1,6 @@ +--- +slug: /zh/operations/system-tables/time_zones +--- # system.time_zones {#system-time_zones} 包含 ClickHouse 服务器支持的时区列表. 此时区列表可能因 ClickHouse 的版本而异 diff --git a/docs/zh/operations/system-tables/trace_log.md b/docs/zh/operations/system-tables/trace_log.md index 6c9404d6e82..479a8b78762 100644 --- a/docs/zh/operations/system-tables/trace_log.md +++ b/docs/zh/operations/system-tables/trace_log.md @@ -1,4 +1,5 @@ --- +slug: /zh/operations/system-tables/trace_log machine_translated: true machine_translated_rev: 5decc73b5dc60054f19087d3690c4eb99446a6c3 --- diff --git a/docs/zh/operations/system-tables/users.md b/docs/zh/operations/system-tables/users.md index 521c54de801..f53175c8759 100644 --- a/docs/zh/operations/system-tables/users.md +++ b/docs/zh/operations/system-tables/users.md @@ -1,3 +1,6 @@ +--- +slug: /zh/operations/system-tables/users +--- # system.users {#system_tables-users} 包含服务器上配置的[用户账号](../../operations/access-rights.md#user-account-management)的列表. diff --git a/docs/zh/operations/system-tables/zookeeper.md b/docs/zh/operations/system-tables/zookeeper.md index 70eedc7294e..83b410ece03 100644 --- a/docs/zh/operations/system-tables/zookeeper.md +++ b/docs/zh/operations/system-tables/zookeeper.md @@ -1,4 +1,5 @@ --- +slug: /zh/operations/system-tables/zookeeper machine_translated: true machine_translated_rev: 5decc73b5dc60054f19087d3690c4eb99446a6c3 --- diff --git a/docs/zh/operations/system-tables/zookeeper_log.md b/docs/zh/operations/system-tables/zookeeper_log.md index 678e2b8a93b..59dcdaecdc1 100644 --- a/docs/zh/operations/system-tables/zookeeper_log.md +++ b/docs/zh/operations/system-tables/zookeeper_log.md @@ -1,3 +1,6 @@ +--- +slug: /zh/operations/system-tables/zookeeper_log +--- # system.zookeeper_log {#system-zookeeper_log} 此表包含有关对 ZooKeeper 服务器的请求及其响应的参数的信息. diff --git a/docs/zh/operations/tips.md b/docs/zh/operations/tips.md index 63a74abd7d8..de63c7a687a 100644 --- a/docs/zh/operations/tips.md +++ b/docs/zh/operations/tips.md @@ -1,3 +1,6 @@ +--- +slug: /zh/operations/tips +--- # 使用建议 {#usage-recommendations} ## CPU频率调节器 {#cpu-scaling-governor} diff --git a/docs/zh/operations/troubleshooting.md b/docs/zh/operations/troubleshooting.md index 3252b59ee8f..47908f1b75c 100644 --- a/docs/zh/operations/troubleshooting.md +++ b/docs/zh/operations/troubleshooting.md @@ -1,4 +1,5 @@ --- +slug: /zh/operations/troubleshooting sidebar_position: 46 sidebar_label: "常见问题" --- diff --git a/docs/zh/operations/update.md b/docs/zh/operations/update.md index b837acf5979..428fe7f5cba 100644 --- a/docs/zh/operations/update.md +++ b/docs/zh/operations/update.md @@ -1,4 +1,5 @@ --- +slug: /zh/operations/update sidebar_position: 47 sidebar_label: "更新" --- diff --git a/docs/zh/operations/utilities/clickhouse-benchmark.md b/docs/zh/operations/utilities/clickhouse-benchmark.md index 3123582c150..a0e881f652b 100644 --- a/docs/zh/operations/utilities/clickhouse-benchmark.md +++ b/docs/zh/operations/utilities/clickhouse-benchmark.md @@ -1,4 +1,5 @@ --- +slug: /zh/operations/utilities/clickhouse-benchmark sidebar_position: 61 sidebar_label: "性能测试" --- diff --git a/docs/zh/operations/utilities/clickhouse-copier.md b/docs/zh/operations/utilities/clickhouse-copier.md index f6a4f11f81c..4662824e7f0 100644 --- a/docs/zh/operations/utilities/clickhouse-copier.md +++ b/docs/zh/operations/utilities/clickhouse-copier.md @@ -1,3 +1,6 @@ +--- +slug: /zh/operations/utilities/clickhouse-copier +--- # clickhouse-copier {#clickhouse-copier} 将数据从一个群集中的表复制到另一个(或相同)群集中的表。 diff --git a/docs/zh/operations/utilities/clickhouse-local.md b/docs/zh/operations/utilities/clickhouse-local.md index f6b25a1faf0..dec37e53926 100644 --- a/docs/zh/operations/utilities/clickhouse-local.md +++ b/docs/zh/operations/utilities/clickhouse-local.md @@ -1,4 +1,5 @@ --- +slug: /zh/operations/utilities/clickhouse-local sidebar_position: 60 sidebar_label: clickhouse-local --- diff --git a/docs/zh/operations/utilities/index.md b/docs/zh/operations/utilities/index.md index ab65922c35c..fcc9a12ab4b 100644 --- a/docs/zh/operations/utilities/index.md +++ b/docs/zh/operations/utilities/index.md @@ -1,3 +1,6 @@ +--- +slug: /zh/operations/utilities/ +--- # 实用工具 {#clickhouse-utility} - [本地查询](clickhouse-local.md) — 在不停止ClickHouse服务的情况下,对数据执行查询操作(类似于 `awk` 命令)。 diff --git a/docs/zh/sql-reference/aggregate-functions/combinators.md b/docs/zh/sql-reference/aggregate-functions/combinators.md index f2caeca4405..68fd240aafa 100644 --- a/docs/zh/sql-reference/aggregate-functions/combinators.md +++ b/docs/zh/sql-reference/aggregate-functions/combinators.md @@ -1,4 +1,5 @@ --- +slug: /zh/sql-reference/aggregate-functions/combinators sidebar_position: 37 sidebar_label: 聚合函数组合器 --- diff --git a/docs/zh/sql-reference/aggregate-functions/index.md b/docs/zh/sql-reference/aggregate-functions/index.md index 7d613c69c2b..959bd65445a 100644 --- a/docs/zh/sql-reference/aggregate-functions/index.md +++ b/docs/zh/sql-reference/aggregate-functions/index.md @@ -1,4 +1,5 @@ --- +slug: /zh/sql-reference/aggregate-functions/ sidebar_label: 聚合函数 sidebar_position: 33 --- diff --git a/docs/zh/sql-reference/aggregate-functions/parametric-functions.md b/docs/zh/sql-reference/aggregate-functions/parametric-functions.md index a8a79593d10..6673e58c32c 100644 --- a/docs/zh/sql-reference/aggregate-functions/parametric-functions.md +++ b/docs/zh/sql-reference/aggregate-functions/parametric-functions.md @@ -1,4 +1,5 @@ --- +slug: /zh/sql-reference/aggregate-functions/parametric-functions sidebar_position: 38 sidebar_label: 参数聚合函数 --- diff --git a/docs/zh/sql-reference/aggregate-functions/reference/any.md b/docs/zh/sql-reference/aggregate-functions/reference/any.md index 45dac247a02..cdcc0341aba 100644 --- a/docs/zh/sql-reference/aggregate-functions/reference/any.md +++ b/docs/zh/sql-reference/aggregate-functions/reference/any.md @@ -1,4 +1,5 @@ --- +slug: /zh/sql-reference/aggregate-functions/reference/any sidebar_position: 6 --- diff --git a/docs/zh/sql-reference/aggregate-functions/reference/anyheavy.md b/docs/zh/sql-reference/aggregate-functions/reference/anyheavy.md index f4785cdaeab..0d6390f54a1 100644 --- a/docs/zh/sql-reference/aggregate-functions/reference/anyheavy.md +++ b/docs/zh/sql-reference/aggregate-functions/reference/anyheavy.md @@ -1,4 +1,5 @@ --- +slug: /zh/sql-reference/aggregate-functions/reference/anyheavy sidebar_position: 103 --- diff --git a/docs/zh/sql-reference/aggregate-functions/reference/anylast.md b/docs/zh/sql-reference/aggregate-functions/reference/anylast.md index bd69d93d179..14579a7065c 100644 --- a/docs/zh/sql-reference/aggregate-functions/reference/anylast.md +++ b/docs/zh/sql-reference/aggregate-functions/reference/anylast.md @@ -1,4 +1,5 @@ --- +slug: /zh/sql-reference/aggregate-functions/reference/anylast sidebar_position: 104 --- diff --git a/docs/zh/sql-reference/aggregate-functions/reference/argmax.md b/docs/zh/sql-reference/aggregate-functions/reference/argmax.md index f9ad61fb920..6ae7155ca24 100644 --- a/docs/zh/sql-reference/aggregate-functions/reference/argmax.md +++ b/docs/zh/sql-reference/aggregate-functions/reference/argmax.md @@ -1,4 +1,5 @@ --- +slug: /zh/sql-reference/aggregate-functions/reference/argmax sidebar_position: 106 --- diff --git a/docs/zh/sql-reference/aggregate-functions/reference/argmin.md b/docs/zh/sql-reference/aggregate-functions/reference/argmin.md index 3d331cb1725..cb21a13021b 100644 --- a/docs/zh/sql-reference/aggregate-functions/reference/argmin.md +++ b/docs/zh/sql-reference/aggregate-functions/reference/argmin.md @@ -1,4 +1,5 @@ --- +slug: /zh/sql-reference/aggregate-functions/reference/argmin sidebar_position: 105 --- diff --git a/docs/zh/sql-reference/aggregate-functions/reference/avg.md b/docs/zh/sql-reference/aggregate-functions/reference/avg.md index 2c5f7c3f827..21f324eb6d3 100644 --- a/docs/zh/sql-reference/aggregate-functions/reference/avg.md +++ b/docs/zh/sql-reference/aggregate-functions/reference/avg.md @@ -1,4 +1,5 @@ --- +slug: /zh/sql-reference/aggregate-functions/reference/avg sidebar_position: 5 --- diff --git a/docs/zh/sql-reference/aggregate-functions/reference/avgweighted.md b/docs/zh/sql-reference/aggregate-functions/reference/avgweighted.md index 9fa56d1726c..5dc8d06df4a 100644 --- a/docs/zh/sql-reference/aggregate-functions/reference/avgweighted.md +++ b/docs/zh/sql-reference/aggregate-functions/reference/avgweighted.md @@ -1,4 +1,5 @@ --- +slug: /zh/sql-reference/aggregate-functions/reference/avgweighted sidebar_position: 107 --- diff --git a/docs/zh/sql-reference/aggregate-functions/reference/categoricalinformationvalue.md b/docs/zh/sql-reference/aggregate-functions/reference/categoricalinformationvalue.md index 942933bb35d..b7ac0438c5c 100644 --- a/docs/zh/sql-reference/aggregate-functions/reference/categoricalinformationvalue.md +++ b/docs/zh/sql-reference/aggregate-functions/reference/categoricalinformationvalue.md @@ -1,4 +1,5 @@ --- +slug: /zh/sql-reference/aggregate-functions/reference/categoricalinformationvalue sidebar_position: 250 --- diff --git a/docs/zh/sql-reference/aggregate-functions/reference/corr.md b/docs/zh/sql-reference/aggregate-functions/reference/corr.md index 796da3dd72a..01a89e428ab 100644 --- a/docs/zh/sql-reference/aggregate-functions/reference/corr.md +++ b/docs/zh/sql-reference/aggregate-functions/reference/corr.md @@ -1,4 +1,5 @@ --- +slug: /zh/sql-reference/aggregate-functions/reference/corr sidebar_position: 107 --- diff --git a/docs/zh/sql-reference/aggregate-functions/reference/count.md b/docs/zh/sql-reference/aggregate-functions/reference/count.md index 96e20323b09..3ed4fdd51bc 100644 --- a/docs/zh/sql-reference/aggregate-functions/reference/count.md +++ b/docs/zh/sql-reference/aggregate-functions/reference/count.md @@ -1,4 +1,5 @@ --- +slug: /zh/sql-reference/aggregate-functions/reference/count sidebar_position: 1 --- diff --git a/docs/zh/sql-reference/aggregate-functions/reference/covarpop.md b/docs/zh/sql-reference/aggregate-functions/reference/covarpop.md index 7137861d2e6..93bfee15684 100644 --- a/docs/zh/sql-reference/aggregate-functions/reference/covarpop.md +++ b/docs/zh/sql-reference/aggregate-functions/reference/covarpop.md @@ -1,4 +1,5 @@ --- +slug: /zh/sql-reference/aggregate-functions/reference/covarpop sidebar_position: 36 --- diff --git a/docs/zh/sql-reference/aggregate-functions/reference/covarsamp.md b/docs/zh/sql-reference/aggregate-functions/reference/covarsamp.md index ab0fa218c8f..7c8565211b1 100644 --- a/docs/zh/sql-reference/aggregate-functions/reference/covarsamp.md +++ b/docs/zh/sql-reference/aggregate-functions/reference/covarsamp.md @@ -1,4 +1,5 @@ --- +slug: /zh/sql-reference/aggregate-functions/reference/covarsamp sidebar_position: 37 --- diff --git a/docs/zh/sql-reference/aggregate-functions/reference/deltasum.md b/docs/zh/sql-reference/aggregate-functions/reference/deltasum.md index 220dac2b500..ce0d9c23685 100644 --- a/docs/zh/sql-reference/aggregate-functions/reference/deltasum.md +++ b/docs/zh/sql-reference/aggregate-functions/reference/deltasum.md @@ -1,4 +1,5 @@ --- +slug: /zh/sql-reference/aggregate-functions/reference/deltasum sidebar_position: 141 --- diff --git a/docs/zh/sql-reference/aggregate-functions/reference/grouparray.md b/docs/zh/sql-reference/aggregate-functions/reference/grouparray.md index a9948b2c372..50bc17047ac 100644 --- a/docs/zh/sql-reference/aggregate-functions/reference/grouparray.md +++ b/docs/zh/sql-reference/aggregate-functions/reference/grouparray.md @@ -1,4 +1,5 @@ --- +slug: /zh/sql-reference/aggregate-functions/reference/grouparray sidebar_position: 110 --- diff --git a/docs/zh/sql-reference/aggregate-functions/reference/grouparrayinsertat.md b/docs/zh/sql-reference/aggregate-functions/reference/grouparrayinsertat.md index 7f6fa55280f..8431b5a1110 100644 --- a/docs/zh/sql-reference/aggregate-functions/reference/grouparrayinsertat.md +++ b/docs/zh/sql-reference/aggregate-functions/reference/grouparrayinsertat.md @@ -1,4 +1,5 @@ --- +slug: /zh/sql-reference/aggregate-functions/reference/grouparrayinsertat sidebar_position: 112 --- diff --git a/docs/zh/sql-reference/aggregate-functions/reference/grouparraymovingavg.md b/docs/zh/sql-reference/aggregate-functions/reference/grouparraymovingavg.md index 4ede3a2fffd..bfbb832d0a2 100644 --- a/docs/zh/sql-reference/aggregate-functions/reference/grouparraymovingavg.md +++ b/docs/zh/sql-reference/aggregate-functions/reference/grouparraymovingavg.md @@ -1,4 +1,5 @@ --- +slug: /zh/sql-reference/aggregate-functions/reference/grouparraymovingavg sidebar_position: 114 --- diff --git a/docs/zh/sql-reference/aggregate-functions/reference/grouparraymovingsum.md b/docs/zh/sql-reference/aggregate-functions/reference/grouparraymovingsum.md index 9cea9d98bd9..c594e6b7427 100644 --- a/docs/zh/sql-reference/aggregate-functions/reference/grouparraymovingsum.md +++ b/docs/zh/sql-reference/aggregate-functions/reference/grouparraymovingsum.md @@ -1,4 +1,5 @@ --- +slug: /zh/sql-reference/aggregate-functions/reference/grouparraymovingsum sidebar_position: 113 --- diff --git a/docs/zh/sql-reference/aggregate-functions/reference/grouparraysample.md b/docs/zh/sql-reference/aggregate-functions/reference/grouparraysample.md index 069fa867527..87470d9f4d3 100644 --- a/docs/zh/sql-reference/aggregate-functions/reference/grouparraysample.md +++ b/docs/zh/sql-reference/aggregate-functions/reference/grouparraysample.md @@ -1,4 +1,5 @@ --- +slug: /zh/sql-reference/aggregate-functions/reference/grouparraysample sidebar_position: 114 --- diff --git a/docs/zh/sql-reference/aggregate-functions/reference/groupbitand.md b/docs/zh/sql-reference/aggregate-functions/reference/groupbitand.md index fb8a76d019d..9275413a45c 100644 --- a/docs/zh/sql-reference/aggregate-functions/reference/groupbitand.md +++ b/docs/zh/sql-reference/aggregate-functions/reference/groupbitand.md @@ -1,4 +1,5 @@ --- +slug: /zh/sql-reference/aggregate-functions/reference/groupbitand sidebar_position: 125 --- diff --git a/docs/zh/sql-reference/aggregate-functions/reference/groupbitmap.md b/docs/zh/sql-reference/aggregate-functions/reference/groupbitmap.md index e835cdbc74d..a20069faa4e 100644 --- a/docs/zh/sql-reference/aggregate-functions/reference/groupbitmap.md +++ b/docs/zh/sql-reference/aggregate-functions/reference/groupbitmap.md @@ -1,4 +1,5 @@ --- +slug: /zh/sql-reference/aggregate-functions/reference/groupbitmap sidebar_position: 128 --- diff --git a/docs/zh/sql-reference/aggregate-functions/reference/groupbitmapand.md b/docs/zh/sql-reference/aggregate-functions/reference/groupbitmapand.md index 879b8454716..9b7e171747b 100644 --- a/docs/zh/sql-reference/aggregate-functions/reference/groupbitmapand.md +++ b/docs/zh/sql-reference/aggregate-functions/reference/groupbitmapand.md @@ -1,4 +1,5 @@ --- +slug: /zh/sql-reference/aggregate-functions/reference/groupbitmapand sidebar_position: 129 --- diff --git a/docs/zh/sql-reference/aggregate-functions/reference/groupbitmapor.md b/docs/zh/sql-reference/aggregate-functions/reference/groupbitmapor.md index d1e8e2a34fc..f1ed2454a84 100644 --- a/docs/zh/sql-reference/aggregate-functions/reference/groupbitmapor.md +++ b/docs/zh/sql-reference/aggregate-functions/reference/groupbitmapor.md @@ -1,4 +1,5 @@ --- +slug: /zh/sql-reference/aggregate-functions/reference/groupbitmapor sidebar_position: 130 --- diff --git a/docs/zh/sql-reference/aggregate-functions/reference/groupbitmapxor.md b/docs/zh/sql-reference/aggregate-functions/reference/groupbitmapxor.md index 6348cc56578..4524f11354d 100644 --- a/docs/zh/sql-reference/aggregate-functions/reference/groupbitmapxor.md +++ b/docs/zh/sql-reference/aggregate-functions/reference/groupbitmapxor.md @@ -1,4 +1,5 @@ --- +slug: /zh/sql-reference/aggregate-functions/reference/groupbitmapxor sidebar_position: 131 --- diff --git a/docs/zh/sql-reference/aggregate-functions/reference/groupbitor.md b/docs/zh/sql-reference/aggregate-functions/reference/groupbitor.md index 41dd33d6dfc..69e3954780d 100644 --- a/docs/zh/sql-reference/aggregate-functions/reference/groupbitor.md +++ b/docs/zh/sql-reference/aggregate-functions/reference/groupbitor.md @@ -1,4 +1,5 @@ --- +slug: /zh/sql-reference/aggregate-functions/reference/groupbitor sidebar_position: 126 --- diff --git a/docs/zh/sql-reference/aggregate-functions/reference/groupbitxor.md b/docs/zh/sql-reference/aggregate-functions/reference/groupbitxor.md index 463bec3a219..571548cba48 100644 --- a/docs/zh/sql-reference/aggregate-functions/reference/groupbitxor.md +++ b/docs/zh/sql-reference/aggregate-functions/reference/groupbitxor.md @@ -1,4 +1,5 @@ --- +slug: /zh/sql-reference/aggregate-functions/reference/groupbitxor sidebar_position: 127 --- diff --git a/docs/zh/sql-reference/aggregate-functions/reference/groupuniqarray.md b/docs/zh/sql-reference/aggregate-functions/reference/groupuniqarray.md index 5c9a60eb870..5e670193afb 100644 --- a/docs/zh/sql-reference/aggregate-functions/reference/groupuniqarray.md +++ b/docs/zh/sql-reference/aggregate-functions/reference/groupuniqarray.md @@ -1,4 +1,5 @@ --- +slug: /zh/sql-reference/aggregate-functions/reference/groupuniqarray sidebar_position: 111 --- diff --git a/docs/zh/sql-reference/aggregate-functions/reference/index.md b/docs/zh/sql-reference/aggregate-functions/reference/index.md index 0158b2cfe4c..b02b1192943 100644 --- a/docs/zh/sql-reference/aggregate-functions/reference/index.md +++ b/docs/zh/sql-reference/aggregate-functions/reference/index.md @@ -1,4 +1,5 @@ --- +slug: /zh/sql-reference/aggregate-functions/reference/ sidebar_label: Reference sidebar_position: 36 --- diff --git a/docs/zh/sql-reference/aggregate-functions/reference/initializeAggregation.md b/docs/zh/sql-reference/aggregate-functions/reference/initializeAggregation.md index 9e9e68426e8..ea34e2bc7a3 100644 --- a/docs/zh/sql-reference/aggregate-functions/reference/initializeAggregation.md +++ b/docs/zh/sql-reference/aggregate-functions/reference/initializeAggregation.md @@ -1,4 +1,5 @@ --- +slug: /zh/sql-reference/aggregate-functions/reference/initializeAggregation sidebar_position: 150 --- diff --git a/docs/zh/sql-reference/aggregate-functions/reference/kurtpop.md b/docs/zh/sql-reference/aggregate-functions/reference/kurtpop.md index e5de630e44a..666fc01c826 100644 --- a/docs/zh/sql-reference/aggregate-functions/reference/kurtpop.md +++ b/docs/zh/sql-reference/aggregate-functions/reference/kurtpop.md @@ -1,4 +1,5 @@ --- +slug: /zh/sql-reference/aggregate-functions/reference/kurtpop sidebar_position: 153 --- diff --git a/docs/zh/sql-reference/aggregate-functions/reference/kurtsamp.md b/docs/zh/sql-reference/aggregate-functions/reference/kurtsamp.md index a5755f431af..c74066d7905 100644 --- a/docs/zh/sql-reference/aggregate-functions/reference/kurtsamp.md +++ b/docs/zh/sql-reference/aggregate-functions/reference/kurtsamp.md @@ -1,4 +1,5 @@ --- +slug: /zh/sql-reference/aggregate-functions/reference/kurtsamp sidebar_position: 154 --- diff --git a/docs/zh/sql-reference/aggregate-functions/reference/mannwhitneyutest.md b/docs/zh/sql-reference/aggregate-functions/reference/mannwhitneyutest.md index 966c8175889..46c877ef17d 100644 --- a/docs/zh/sql-reference/aggregate-functions/reference/mannwhitneyutest.md +++ b/docs/zh/sql-reference/aggregate-functions/reference/mannwhitneyutest.md @@ -1,4 +1,5 @@ --- +slug: /zh/sql-reference/aggregate-functions/reference/mannwhitneyutest sidebar_position: 310 sidebar_label: mannWhitneyUTest --- diff --git a/docs/zh/sql-reference/aggregate-functions/reference/max.md b/docs/zh/sql-reference/aggregate-functions/reference/max.md index 6ccbf75f690..783a74c9597 100644 --- a/docs/zh/sql-reference/aggregate-functions/reference/max.md +++ b/docs/zh/sql-reference/aggregate-functions/reference/max.md @@ -1,4 +1,5 @@ --- +slug: /zh/sql-reference/aggregate-functions/reference/max sidebar_position: 3 --- diff --git a/docs/zh/sql-reference/aggregate-functions/reference/maxmap.md b/docs/zh/sql-reference/aggregate-functions/reference/maxmap.md index 3daaf98380e..20c7375f640 100644 --- a/docs/zh/sql-reference/aggregate-functions/reference/maxmap.md +++ b/docs/zh/sql-reference/aggregate-functions/reference/maxmap.md @@ -1,4 +1,5 @@ --- +slug: /zh/sql-reference/aggregate-functions/reference/maxmap sidebar_position: 143 --- diff --git a/docs/zh/sql-reference/aggregate-functions/reference/median.md b/docs/zh/sql-reference/aggregate-functions/reference/median.md index 83879f6cb34..a66c3e0f7b3 100644 --- a/docs/zh/sql-reference/aggregate-functions/reference/median.md +++ b/docs/zh/sql-reference/aggregate-functions/reference/median.md @@ -1,3 +1,6 @@ +--- +slug: /zh/sql-reference/aggregate-functions/reference/median +--- # median {#median} `median*` 函数是 `quantile*` 函数的别名。它们计算数字数据样本的中位数。 diff --git a/docs/zh/sql-reference/aggregate-functions/reference/min.md b/docs/zh/sql-reference/aggregate-functions/reference/min.md index 6fd68848b02..14b142882d9 100644 --- a/docs/zh/sql-reference/aggregate-functions/reference/min.md +++ b/docs/zh/sql-reference/aggregate-functions/reference/min.md @@ -1,4 +1,5 @@ --- +slug: /zh/sql-reference/aggregate-functions/reference/min sidebar_position: 2 --- diff --git a/docs/zh/sql-reference/aggregate-functions/reference/minmap.md b/docs/zh/sql-reference/aggregate-functions/reference/minmap.md index c2bad84ce04..2e1af1c555f 100644 --- a/docs/zh/sql-reference/aggregate-functions/reference/minmap.md +++ b/docs/zh/sql-reference/aggregate-functions/reference/minmap.md @@ -1,4 +1,5 @@ --- +slug: /zh/sql-reference/aggregate-functions/reference/minmap sidebar_position: 142 --- diff --git a/docs/zh/sql-reference/aggregate-functions/reference/quantile.md b/docs/zh/sql-reference/aggregate-functions/reference/quantile.md index 9b4c3062a19..11e0448da7f 100644 --- a/docs/zh/sql-reference/aggregate-functions/reference/quantile.md +++ b/docs/zh/sql-reference/aggregate-functions/reference/quantile.md @@ -1,4 +1,5 @@ --- +slug: /zh/sql-reference/aggregate-functions/reference/quantile sidebar_position: 200 --- diff --git a/docs/zh/sql-reference/aggregate-functions/reference/quantiledeterministic.md b/docs/zh/sql-reference/aggregate-functions/reference/quantiledeterministic.md index 9c6ec868a24..2d02c7daa4a 100644 --- a/docs/zh/sql-reference/aggregate-functions/reference/quantiledeterministic.md +++ b/docs/zh/sql-reference/aggregate-functions/reference/quantiledeterministic.md @@ -1,4 +1,5 @@ --- +slug: /zh/sql-reference/aggregate-functions/reference/quantiledeterministic sidebar_position: 206 --- diff --git a/docs/zh/sql-reference/aggregate-functions/reference/quantileexact.md b/docs/zh/sql-reference/aggregate-functions/reference/quantileexact.md index e872fa2cab6..24bc2583774 100644 --- a/docs/zh/sql-reference/aggregate-functions/reference/quantileexact.md +++ b/docs/zh/sql-reference/aggregate-functions/reference/quantileexact.md @@ -1,4 +1,5 @@ --- +slug: /zh/sql-reference/aggregate-functions/reference/quantileexact sidebar_position: 202 --- diff --git a/docs/zh/sql-reference/aggregate-functions/reference/quantileexactweighted.md b/docs/zh/sql-reference/aggregate-functions/reference/quantileexactweighted.md index 7980777f735..1366c1059ca 100644 --- a/docs/zh/sql-reference/aggregate-functions/reference/quantileexactweighted.md +++ b/docs/zh/sql-reference/aggregate-functions/reference/quantileexactweighted.md @@ -1,4 +1,5 @@ --- +slug: /zh/sql-reference/aggregate-functions/reference/quantileexactweighted sidebar_position: 203 --- diff --git a/docs/zh/sql-reference/aggregate-functions/reference/quantiles.md b/docs/zh/sql-reference/aggregate-functions/reference/quantiles.md index 575ff00887e..4dce65af1ed 100644 --- a/docs/zh/sql-reference/aggregate-functions/reference/quantiles.md +++ b/docs/zh/sql-reference/aggregate-functions/reference/quantiles.md @@ -1,4 +1,5 @@ --- +slug: /zh/sql-reference/aggregate-functions/reference/quantiles sidebar_position: 201 --- diff --git a/docs/zh/sql-reference/aggregate-functions/reference/quantiletdigest.md b/docs/zh/sql-reference/aggregate-functions/reference/quantiletdigest.md index 1f3823d51a6..7b1c9b600ae 100644 --- a/docs/zh/sql-reference/aggregate-functions/reference/quantiletdigest.md +++ b/docs/zh/sql-reference/aggregate-functions/reference/quantiletdigest.md @@ -1,4 +1,5 @@ --- +slug: /zh/sql-reference/aggregate-functions/reference/quantiletdigest sidebar_position: 207 --- diff --git a/docs/zh/sql-reference/aggregate-functions/reference/quantiletdigestweighted.md b/docs/zh/sql-reference/aggregate-functions/reference/quantiletdigestweighted.md index 5e0313af220..e9933f557b6 100644 --- a/docs/zh/sql-reference/aggregate-functions/reference/quantiletdigestweighted.md +++ b/docs/zh/sql-reference/aggregate-functions/reference/quantiletdigestweighted.md @@ -1,4 +1,5 @@ --- +slug: /zh/sql-reference/aggregate-functions/reference/quantiletdigestweighted sidebar_position: 208 --- diff --git a/docs/zh/sql-reference/aggregate-functions/reference/quantiletiming.md b/docs/zh/sql-reference/aggregate-functions/reference/quantiletiming.md index 33bde24122a..5e14ce6a11c 100644 --- a/docs/zh/sql-reference/aggregate-functions/reference/quantiletiming.md +++ b/docs/zh/sql-reference/aggregate-functions/reference/quantiletiming.md @@ -1,4 +1,5 @@ --- +slug: /zh/sql-reference/aggregate-functions/reference/quantiletiming sidebar_position: 204 --- diff --git a/docs/zh/sql-reference/aggregate-functions/reference/quantiletimingweighted.md b/docs/zh/sql-reference/aggregate-functions/reference/quantiletimingweighted.md index 1f2d0315edd..2c28583343a 100644 --- a/docs/zh/sql-reference/aggregate-functions/reference/quantiletimingweighted.md +++ b/docs/zh/sql-reference/aggregate-functions/reference/quantiletimingweighted.md @@ -1,4 +1,5 @@ --- +slug: /zh/sql-reference/aggregate-functions/reference/quantiletimingweighted sidebar_position: 205 --- diff --git a/docs/zh/sql-reference/aggregate-functions/reference/rankCorr.md b/docs/zh/sql-reference/aggregate-functions/reference/rankCorr.md index 716a9fb2440..a29b76fc07a 100644 --- a/docs/zh/sql-reference/aggregate-functions/reference/rankCorr.md +++ b/docs/zh/sql-reference/aggregate-functions/reference/rankCorr.md @@ -1,3 +1,6 @@ +--- +slug: /zh/sql-reference/aggregate-functions/reference/rankCorr +--- ## rankCorr {#agg_function-rankcorr} 计算等级相关系数。 @@ -50,4 +53,4 @@ SELECT roundBankers(rankCorr(exp(number), sin(number)), 3) FROM numbers(100); ``` **参见** -- 斯皮尔曼等级相关系数[Spearman's rank correlation coefficient](https://en.wikipedia.org/wiki/Spearman%27s_rank_correlation_coefficient) \ No newline at end of file +- 斯皮尔曼等级相关系数[Spearman's rank correlation coefficient](https://en.wikipedia.org/wiki/Spearman%27s_rank_correlation_coefficient) diff --git a/docs/zh/sql-reference/aggregate-functions/reference/simplelinearregression.md b/docs/zh/sql-reference/aggregate-functions/reference/simplelinearregression.md index a193ef3ca73..a7b36539b09 100644 --- a/docs/zh/sql-reference/aggregate-functions/reference/simplelinearregression.md +++ b/docs/zh/sql-reference/aggregate-functions/reference/simplelinearregression.md @@ -1,4 +1,5 @@ --- +slug: /zh/sql-reference/aggregate-functions/reference/simplelinearregression sidebar_position: 220 --- diff --git a/docs/zh/sql-reference/aggregate-functions/reference/skewpop.md b/docs/zh/sql-reference/aggregate-functions/reference/skewpop.md index dc0ec27fe4b..2c5afade477 100644 --- a/docs/zh/sql-reference/aggregate-functions/reference/skewpop.md +++ b/docs/zh/sql-reference/aggregate-functions/reference/skewpop.md @@ -1,4 +1,5 @@ --- +slug: /zh/sql-reference/aggregate-functions/reference/skewpop sidebar_position: 150 --- diff --git a/docs/zh/sql-reference/aggregate-functions/reference/skewsamp.md b/docs/zh/sql-reference/aggregate-functions/reference/skewsamp.md index 177bd9cbdd4..e0358b65707 100644 --- a/docs/zh/sql-reference/aggregate-functions/reference/skewsamp.md +++ b/docs/zh/sql-reference/aggregate-functions/reference/skewsamp.md @@ -1,4 +1,5 @@ --- +slug: /zh/sql-reference/aggregate-functions/reference/skewsamp sidebar_position: 151 --- diff --git a/docs/zh/sql-reference/aggregate-functions/reference/stddevpop.md b/docs/zh/sql-reference/aggregate-functions/reference/stddevpop.md index c7e91fe7434..ea82e21e46f 100644 --- a/docs/zh/sql-reference/aggregate-functions/reference/stddevpop.md +++ b/docs/zh/sql-reference/aggregate-functions/reference/stddevpop.md @@ -1,4 +1,5 @@ --- +slug: /zh/sql-reference/aggregate-functions/reference/stddevpop sidebar_position: 30 --- diff --git a/docs/zh/sql-reference/aggregate-functions/reference/stddevsamp.md b/docs/zh/sql-reference/aggregate-functions/reference/stddevsamp.md index 75b8f23cf04..efeafb71072 100644 --- a/docs/zh/sql-reference/aggregate-functions/reference/stddevsamp.md +++ b/docs/zh/sql-reference/aggregate-functions/reference/stddevsamp.md @@ -1,4 +1,5 @@ --- +slug: /zh/sql-reference/aggregate-functions/reference/stddevsamp sidebar_position: 31 --- diff --git a/docs/zh/sql-reference/aggregate-functions/reference/stochasticlinearregression.md b/docs/zh/sql-reference/aggregate-functions/reference/stochasticlinearregression.md index 7b36d652e76..1ffcaf2b2be 100644 --- a/docs/zh/sql-reference/aggregate-functions/reference/stochasticlinearregression.md +++ b/docs/zh/sql-reference/aggregate-functions/reference/stochasticlinearregression.md @@ -1,4 +1,5 @@ --- +slug: /zh/sql-reference/aggregate-functions/reference/stochasticlinearregression sidebar_position: 221 --- diff --git a/docs/zh/sql-reference/aggregate-functions/reference/stochasticlogisticregression.md b/docs/zh/sql-reference/aggregate-functions/reference/stochasticlogisticregression.md index 1ab7fd28fc8..e0b865366b7 100644 --- a/docs/zh/sql-reference/aggregate-functions/reference/stochasticlogisticregression.md +++ b/docs/zh/sql-reference/aggregate-functions/reference/stochasticlogisticregression.md @@ -1,4 +1,5 @@ --- +slug: /zh/sql-reference/aggregate-functions/reference/stochasticlogisticregression sidebar_position: 222 --- diff --git a/docs/zh/sql-reference/aggregate-functions/reference/studentttest.md b/docs/zh/sql-reference/aggregate-functions/reference/studentttest.md index 6591db6f402..582176b806b 100644 --- a/docs/zh/sql-reference/aggregate-functions/reference/studentttest.md +++ b/docs/zh/sql-reference/aggregate-functions/reference/studentttest.md @@ -1,4 +1,5 @@ --- +slug: /zh/sql-reference/aggregate-functions/reference/studentttest sidebar_position: 300 sidebar_label: studentTTest --- diff --git a/docs/zh/sql-reference/aggregate-functions/reference/sum.md b/docs/zh/sql-reference/aggregate-functions/reference/sum.md index c6b94d2ef88..d4e9436d204 100644 --- a/docs/zh/sql-reference/aggregate-functions/reference/sum.md +++ b/docs/zh/sql-reference/aggregate-functions/reference/sum.md @@ -1,4 +1,5 @@ --- +slug: /zh/sql-reference/aggregate-functions/reference/sum sidebar_position: 4 --- diff --git a/docs/zh/sql-reference/aggregate-functions/reference/summap.md b/docs/zh/sql-reference/aggregate-functions/reference/summap.md index a2bb381741b..106a3eae115 100644 --- a/docs/zh/sql-reference/aggregate-functions/reference/summap.md +++ b/docs/zh/sql-reference/aggregate-functions/reference/summap.md @@ -1,4 +1,5 @@ --- +slug: /zh/sql-reference/aggregate-functions/reference/summap sidebar_position: 141 --- diff --git a/docs/zh/sql-reference/aggregate-functions/reference/sumwithoverflow.md b/docs/zh/sql-reference/aggregate-functions/reference/sumwithoverflow.md index 78e749a72df..d33b8befe3d 100644 --- a/docs/zh/sql-reference/aggregate-functions/reference/sumwithoverflow.md +++ b/docs/zh/sql-reference/aggregate-functions/reference/sumwithoverflow.md @@ -1,4 +1,5 @@ --- +slug: /zh/sql-reference/aggregate-functions/reference/sumwithoverflow sidebar_position: 140 --- diff --git a/docs/zh/sql-reference/aggregate-functions/reference/topk.md b/docs/zh/sql-reference/aggregate-functions/reference/topk.md index ab24d728781..9dc4a11a939 100644 --- a/docs/zh/sql-reference/aggregate-functions/reference/topk.md +++ b/docs/zh/sql-reference/aggregate-functions/reference/topk.md @@ -1,4 +1,5 @@ --- +slug: /zh/sql-reference/aggregate-functions/reference/topk sidebar_position: 108 --- diff --git a/docs/zh/sql-reference/aggregate-functions/reference/topkweighted.md b/docs/zh/sql-reference/aggregate-functions/reference/topkweighted.md index c3558274b90..d6ed34f792e 100644 --- a/docs/zh/sql-reference/aggregate-functions/reference/topkweighted.md +++ b/docs/zh/sql-reference/aggregate-functions/reference/topkweighted.md @@ -1,4 +1,5 @@ --- +slug: /zh/sql-reference/aggregate-functions/reference/topkweighted sidebar_position: 109 --- diff --git a/docs/zh/sql-reference/aggregate-functions/reference/uniq.md b/docs/zh/sql-reference/aggregate-functions/reference/uniq.md index 66e6b1b3c0a..c75a71611a4 100644 --- a/docs/zh/sql-reference/aggregate-functions/reference/uniq.md +++ b/docs/zh/sql-reference/aggregate-functions/reference/uniq.md @@ -1,4 +1,5 @@ --- +slug: /zh/sql-reference/aggregate-functions/reference/uniq sidebar_position: 190 --- diff --git a/docs/zh/sql-reference/aggregate-functions/reference/uniqcombined.md b/docs/zh/sql-reference/aggregate-functions/reference/uniqcombined.md index b1888466f95..edc790ec00f 100644 --- a/docs/zh/sql-reference/aggregate-functions/reference/uniqcombined.md +++ b/docs/zh/sql-reference/aggregate-functions/reference/uniqcombined.md @@ -1,4 +1,5 @@ --- +slug: /zh/sql-reference/aggregate-functions/reference/uniqcombined sidebar_position: 192 --- diff --git a/docs/zh/sql-reference/aggregate-functions/reference/uniqcombined64.md b/docs/zh/sql-reference/aggregate-functions/reference/uniqcombined64.md index f8995ac2aa9..46b62d08a3b 100644 --- a/docs/zh/sql-reference/aggregate-functions/reference/uniqcombined64.md +++ b/docs/zh/sql-reference/aggregate-functions/reference/uniqcombined64.md @@ -1,4 +1,5 @@ --- +slug: /zh/sql-reference/aggregate-functions/reference/uniqcombined64 sidebar_position: 193 --- diff --git a/docs/zh/sql-reference/aggregate-functions/reference/uniqexact.md b/docs/zh/sql-reference/aggregate-functions/reference/uniqexact.md index b7f1de67a54..2b3366637b3 100644 --- a/docs/zh/sql-reference/aggregate-functions/reference/uniqexact.md +++ b/docs/zh/sql-reference/aggregate-functions/reference/uniqexact.md @@ -1,4 +1,5 @@ --- +slug: /zh/sql-reference/aggregate-functions/reference/uniqexact sidebar_position: 191 --- diff --git a/docs/zh/sql-reference/aggregate-functions/reference/uniqhll12.md b/docs/zh/sql-reference/aggregate-functions/reference/uniqhll12.md index b2cfe875bb5..3bfde2590a9 100644 --- a/docs/zh/sql-reference/aggregate-functions/reference/uniqhll12.md +++ b/docs/zh/sql-reference/aggregate-functions/reference/uniqhll12.md @@ -1,4 +1,5 @@ --- +slug: /zh/sql-reference/aggregate-functions/reference/uniqhll12 sidebar_position: 194 --- diff --git a/docs/zh/sql-reference/aggregate-functions/reference/varpop.md b/docs/zh/sql-reference/aggregate-functions/reference/varpop.md index 5258fddadbe..eb17955210b 100644 --- a/docs/zh/sql-reference/aggregate-functions/reference/varpop.md +++ b/docs/zh/sql-reference/aggregate-functions/reference/varpop.md @@ -1,4 +1,5 @@ --- +slug: /zh/sql-reference/aggregate-functions/reference/varpop sidebar_position: 32 --- diff --git a/docs/zh/sql-reference/aggregate-functions/reference/varsamp.md b/docs/zh/sql-reference/aggregate-functions/reference/varsamp.md index faa20a3997a..51cd4ee3310 100644 --- a/docs/zh/sql-reference/aggregate-functions/reference/varsamp.md +++ b/docs/zh/sql-reference/aggregate-functions/reference/varsamp.md @@ -1,4 +1,5 @@ --- +slug: /zh/sql-reference/aggregate-functions/reference/varsamp sidebar_position: 33 --- diff --git a/docs/zh/sql-reference/aggregate-functions/reference/welchttest.md b/docs/zh/sql-reference/aggregate-functions/reference/welchttest.md index da3d248d586..2093e30d44e 100644 --- a/docs/zh/sql-reference/aggregate-functions/reference/welchttest.md +++ b/docs/zh/sql-reference/aggregate-functions/reference/welchttest.md @@ -1,4 +1,5 @@ --- +slug: /zh/sql-reference/aggregate-functions/reference/welchttest sidebar_position: 301 sidebar_label: welchTTest --- diff --git a/docs/zh/sql-reference/ansi.md b/docs/zh/sql-reference/ansi.md index 06aed9e00f7..9cf335f89ef 100644 --- a/docs/zh/sql-reference/ansi.md +++ b/docs/zh/sql-reference/ansi.md @@ -1,4 +1,5 @@ --- +slug: /zh/sql-reference/ansi sidebar_position: 40 sidebar_label: "ANSI\u517C\u5BB9\u6027" --- diff --git a/docs/zh/sql-reference/data-types/aggregatefunction.md b/docs/zh/sql-reference/data-types/aggregatefunction.md index e6e07b02e11..279de7effe3 100644 --- a/docs/zh/sql-reference/data-types/aggregatefunction.md +++ b/docs/zh/sql-reference/data-types/aggregatefunction.md @@ -1,3 +1,6 @@ +--- +slug: /zh/sql-reference/data-types/aggregatefunction +--- # AggregateFunction(name, types_of_arguments…) {#data-type-aggregatefunction} 聚合函数的中间状态,可以通过聚合函数名称加`-State`后缀的形式得到它。与此同时,当您需要访问该类型的最终状态数据时,您需要以相同的聚合函数名加`-Merge`后缀的形式来得到最终状态数据。 diff --git a/docs/zh/sql-reference/data-types/array.md b/docs/zh/sql-reference/data-types/array.md index 41eeb72c045..e2f18a42de8 100644 --- a/docs/zh/sql-reference/data-types/array.md +++ b/docs/zh/sql-reference/data-types/array.md @@ -1,3 +1,6 @@ +--- +slug: /zh/sql-reference/data-types/array +--- # 阵列(T) {#data-type-array} 由 `T` 类型元素组成的数组。 diff --git a/docs/zh/sql-reference/data-types/boolean.md b/docs/zh/sql-reference/data-types/boolean.md index bd0a1ee7312..0506ee0f04b 100644 --- a/docs/zh/sql-reference/data-types/boolean.md +++ b/docs/zh/sql-reference/data-types/boolean.md @@ -1,3 +1,6 @@ +--- +slug: /zh/sql-reference/data-types/boolean +--- # 布尔值 {#boolean-values} 从 https://github.com/ClickHouse/ClickHouse/commit/4076ae77b46794e73594a9f400200088ed1e7a6e 之后,有单独的类型来存储布尔值。 diff --git a/docs/zh/sql-reference/data-types/date.md b/docs/zh/sql-reference/data-types/date.md index ab5d3acae1b..9b1acdbe939 100644 --- a/docs/zh/sql-reference/data-types/date.md +++ b/docs/zh/sql-reference/data-types/date.md @@ -1,6 +1,9 @@ +--- +slug: /zh/sql-reference/data-types/date +--- # 日期 {#date} -日期类型,用两个字节存储,表示从 1970-01-01 (无符号) 到当前的日期值。允许存储从 Unix 纪元开始到编译阶段定义的上限阈值常量(目前上限是2106年,但最终完全支持的年份为2105)。最小值输出为1970-01-01。 +日期类型,用两个字节存储,表示从 1970-01-01 (无符号) 到当前的日期值。允许存储从 Unix 纪元开始到编译阶段定义的上限阈值常量(目前上限是2149年,但最终完全支持的年份为2148)。最小值输出为1970-01-01。 值的范围: \[1970-01-01, 2149-06-06\]。 diff --git a/docs/zh/sql-reference/data-types/datetime.md b/docs/zh/sql-reference/data-types/datetime.md index b6c8c3d2d35..35d3b60e901 100644 --- a/docs/zh/sql-reference/data-types/datetime.md +++ b/docs/zh/sql-reference/data-types/datetime.md @@ -1,3 +1,6 @@ +--- +slug: /zh/sql-reference/data-types/datetime +--- # 日期时间 {#data_type-datetime} 时间戳类型。用四个字节(无符号的)存储 Unix 时间戳)。允许存储与日期类型相同的范围内的值。最小值为 1970-01-01 00:00:00。时间戳类型值精确到秒(没有闰秒)。 diff --git a/docs/zh/sql-reference/data-types/datetime64.md b/docs/zh/sql-reference/data-types/datetime64.md index da637929180..ee2d7a6f258 100644 --- a/docs/zh/sql-reference/data-types/datetime64.md +++ b/docs/zh/sql-reference/data-types/datetime64.md @@ -1,4 +1,5 @@ --- +slug: /zh/sql-reference/data-types/datetime64 machine_translated: true machine_translated_rev: 72537a2d527c63c07aa5d2361a8829f3895cf2bd sidebar_position: 49 diff --git a/docs/zh/sql-reference/data-types/decimal.md b/docs/zh/sql-reference/data-types/decimal.md index b0a55de1685..be823205f9c 100644 --- a/docs/zh/sql-reference/data-types/decimal.md +++ b/docs/zh/sql-reference/data-types/decimal.md @@ -1,3 +1,6 @@ +--- +slug: /zh/sql-reference/data-types/decimal +--- # Decimal(P,S),Decimal32(S),Decimal64(S),Decimal128(S) {#decimalp-s-decimal32s-decimal64s-decimal128s} 有符号的定点数,可在加、减和乘法运算过程中保持精度。对于除法,最低有效数字会被丢弃(不舍入)。 diff --git a/docs/zh/sql-reference/data-types/domains/index.md b/docs/zh/sql-reference/data-types/domains/index.md index 8f0e6920fc9..98ef5eddeba 100644 --- a/docs/zh/sql-reference/data-types/domains/index.md +++ b/docs/zh/sql-reference/data-types/domains/index.md @@ -1,4 +1,5 @@ --- +slug: /zh/sql-reference/data-types/domains/ sidebar_label: "域" sidebar_position: 56 --- diff --git a/docs/zh/sql-reference/data-types/domains/ipv4.md b/docs/zh/sql-reference/data-types/domains/ipv4.md index 2e027baf9b7..22f00b0f5cf 100644 --- a/docs/zh/sql-reference/data-types/domains/ipv4.md +++ b/docs/zh/sql-reference/data-types/domains/ipv4.md @@ -1,3 +1,6 @@ +--- +slug: /zh/sql-reference/data-types/domains/ipv4 +--- ## IPv4 {#ipv4} `IPv4`是与`UInt32`类型保持二进制兼容的Domain类型,其用于存储IPv4地址的值。它提供了更为紧凑的二进制存储的同时支持识别可读性更加友好的输入输出格式。 diff --git a/docs/zh/sql-reference/data-types/domains/ipv6.md b/docs/zh/sql-reference/data-types/domains/ipv6.md index 6545aa61bc2..9a95f39cfda 100644 --- a/docs/zh/sql-reference/data-types/domains/ipv6.md +++ b/docs/zh/sql-reference/data-types/domains/ipv6.md @@ -1,3 +1,6 @@ +--- +slug: /zh/sql-reference/data-types/domains/ipv6 +--- ## IPv6 {#ipv6} `IPv6`是与`FixedString(16)`类型保持二进制兼容的Domain类型,其用于存储IPv6地址的值。它提供了更为紧凑的二进制存储的同时支持识别可读性更加友好的输入输出格式。 diff --git a/docs/zh/sql-reference/data-types/enum.md b/docs/zh/sql-reference/data-types/enum.md index a362b60a81d..0cf8a02d76b 100644 --- a/docs/zh/sql-reference/data-types/enum.md +++ b/docs/zh/sql-reference/data-types/enum.md @@ -1,3 +1,6 @@ +--- +slug: /zh/sql-reference/data-types/enum +--- # Enum8,Enum16 {#enum8-enum16} 包括 `Enum8` 和 `Enum16` 类型。`Enum` 保存 `'string'= integer` 的对应关系。在 ClickHouse 中,尽管用户使用的是字符串常量,但所有含有 `Enum` 数据类型的操作都是按照包含整数的值来执行。这在性能方面比使用 `String` 数据类型更有效。 diff --git a/docs/zh/sql-reference/data-types/fixedstring.md b/docs/zh/sql-reference/data-types/fixedstring.md index 4e881b790b4..90ebb0fe127 100644 --- a/docs/zh/sql-reference/data-types/fixedstring.md +++ b/docs/zh/sql-reference/data-types/fixedstring.md @@ -1,3 +1,6 @@ +--- +slug: /zh/sql-reference/data-types/fixedstring +--- # 固定字符串 {#fixedstring} 固定长度 N 的字符串(N 必须是严格的正自然数)。 diff --git a/docs/zh/sql-reference/data-types/float.md b/docs/zh/sql-reference/data-types/float.md index 0fe6d2cf000..9184b6fcdcd 100644 --- a/docs/zh/sql-reference/data-types/float.md +++ b/docs/zh/sql-reference/data-types/float.md @@ -1,3 +1,6 @@ +--- +slug: /zh/sql-reference/data-types/float +--- # Float32,Float64 {#float32-float64} [浮点数](https://en.wikipedia.org/wiki/IEEE_754)。 diff --git a/docs/zh/sql-reference/data-types/index.md b/docs/zh/sql-reference/data-types/index.md index 6c2b2af58d7..0ff545699d7 100644 --- a/docs/zh/sql-reference/data-types/index.md +++ b/docs/zh/sql-reference/data-types/index.md @@ -1,4 +1,5 @@ --- +slug: /zh/sql-reference/data-types/ sidebar_label: 数据类型 sidebar_position: 37 --- diff --git a/docs/zh/sql-reference/data-types/int-uint.md b/docs/zh/sql-reference/data-types/int-uint.md index ce7a2f23484..f80cb7cd7c9 100644 --- a/docs/zh/sql-reference/data-types/int-uint.md +++ b/docs/zh/sql-reference/data-types/int-uint.md @@ -1,4 +1,5 @@ --- +slug: /zh/sql-reference/data-types/int-uint sidebar_position: 40 sidebar_label: UInt8, UInt16, UInt32, UInt64, UInt128, UInt256, Int8, Int16, Int32, Int64, Int128, Int256 --- diff --git a/docs/zh/sql-reference/data-types/lowcardinality.md b/docs/zh/sql-reference/data-types/lowcardinality.md index 8192f9147b0..e089a7f9d41 100644 --- a/docs/zh/sql-reference/data-types/lowcardinality.md +++ b/docs/zh/sql-reference/data-types/lowcardinality.md @@ -1,4 +1,5 @@ --- +slug: /zh/sql-reference/data-types/lowcardinality sidebar_position: 51 sidebar_label: 低基数类型 --- diff --git a/docs/zh/sql-reference/data-types/map.md b/docs/zh/sql-reference/data-types/map.md index 7828d182f34..89170530375 100644 --- a/docs/zh/sql-reference/data-types/map.md +++ b/docs/zh/sql-reference/data-types/map.md @@ -1,4 +1,5 @@ --- +slug: /zh/sql-reference/data-types/map sidebar_position: 65 sidebar_label: Map(key, value) --- diff --git a/docs/zh/sql-reference/data-types/nested-data-structures/index.md b/docs/zh/sql-reference/data-types/nested-data-structures/index.md index 3914064674e..af9d264b87f 100644 --- a/docs/zh/sql-reference/data-types/nested-data-structures/index.md +++ b/docs/zh/sql-reference/data-types/nested-data-structures/index.md @@ -1 +1,4 @@ +--- +slug: /zh/sql-reference/data-types/nested-data-structures/ +--- # 嵌套数据结构 {#qian-tao-shu-ju-jie-gou} diff --git a/docs/zh/sql-reference/data-types/nested-data-structures/nested.md b/docs/zh/sql-reference/data-types/nested-data-structures/nested.md index d2fd1e3a630..5ef8256b483 100644 --- a/docs/zh/sql-reference/data-types/nested-data-structures/nested.md +++ b/docs/zh/sql-reference/data-types/nested-data-structures/nested.md @@ -1,3 +1,6 @@ +--- +slug: /zh/sql-reference/data-types/nested-data-structures/nested +--- # Nested(Name1 Type1, Name2 Type2, …) {#nestedname1-type1-name2-type2} 嵌套数据结构类似于嵌套表。嵌套数据结构的参数(列名和类型)与 CREATE 查询类似。每个表可以包含任意多行嵌套数据结构。 diff --git a/docs/zh/sql-reference/data-types/nullable.md b/docs/zh/sql-reference/data-types/nullable.md index 90fb302b713..3bc255a9e46 100644 --- a/docs/zh/sql-reference/data-types/nullable.md +++ b/docs/zh/sql-reference/data-types/nullable.md @@ -1,3 +1,6 @@ +--- +slug: /zh/sql-reference/data-types/nullable +--- # 可为空(类型名称) {#data_type-nullable} 允许用特殊标记 ([NULL](../../sql-reference/data-types/nullable.md)) 表示«缺失值»,可以与 `TypeName` 的正常值存放一起。例如,`Nullable(Int8)` 类型的列可以存储 `Int8` 类型值,而没有值的行将存储 `NULL`。 diff --git a/docs/zh/sql-reference/data-types/simpleaggregatefunction.md b/docs/zh/sql-reference/data-types/simpleaggregatefunction.md index fc47f2c258a..83e33b912e2 100644 --- a/docs/zh/sql-reference/data-types/simpleaggregatefunction.md +++ b/docs/zh/sql-reference/data-types/simpleaggregatefunction.md @@ -1,3 +1,6 @@ +--- +slug: /zh/sql-reference/data-types/simpleaggregatefunction +--- # SimpleAggregateFunction {#data-type-simpleaggregatefunction} `SimpleAggregateFunction(name, types_of_arguments…)` 数据类型存储聚合函数的当前值, 并不像 [`AggregateFunction`](../../sql-reference/data-types/aggregatefunction.md) 那样存储其全部状态。这种优化可以应用于具有以下属性函数: 将函数 `f` 应用于行集合 `S1 UNION ALL S2` 的结果,可以通过将 `f` 分别应用于行集合的部分, 然后再将 `f` 应用于结果来获得: `f(S1 UNION ALL S2) = f(f(S1) UNION ALL f(S2))`。 这个属性保证了部分聚合结果足以计算出合并的结果,所以我们不必存储和处理任何额外的数据。 diff --git a/docs/zh/sql-reference/data-types/special-data-types/expression.md b/docs/zh/sql-reference/data-types/special-data-types/expression.md index e0618aa2aaa..ca1066cb31e 100644 --- a/docs/zh/sql-reference/data-types/special-data-types/expression.md +++ b/docs/zh/sql-reference/data-types/special-data-types/expression.md @@ -1,3 +1,6 @@ +--- +slug: /zh/sql-reference/data-types/special-data-types/expression +--- # 表达式 {#expression} 用于表示高阶函数中的Lambd表达式。 diff --git a/docs/zh/sql-reference/data-types/special-data-types/index.md b/docs/zh/sql-reference/data-types/special-data-types/index.md index 1c104f51d43..e980e24c58a 100644 --- a/docs/zh/sql-reference/data-types/special-data-types/index.md +++ b/docs/zh/sql-reference/data-types/special-data-types/index.md @@ -1,3 +1,6 @@ +--- +slug: /zh/sql-reference/data-types/special-data-types/ +--- # 特殊数据类型 {#special-data-types} 特殊数据类型的值既不能存在表中也不能在结果中输出,但可用于查询的中间结果。 diff --git a/docs/zh/sql-reference/data-types/special-data-types/interval.md b/docs/zh/sql-reference/data-types/special-data-types/interval.md index 3eb3efda399..e05869b2df8 100644 --- a/docs/zh/sql-reference/data-types/special-data-types/interval.md +++ b/docs/zh/sql-reference/data-types/special-data-types/interval.md @@ -1,4 +1,5 @@ --- +slug: /zh/sql-reference/data-types/special-data-types/interval machine_translated: true machine_translated_rev: 72537a2d527c63c07aa5d2361a8829f3895cf2bd sidebar_position: 61 diff --git a/docs/zh/sql-reference/data-types/special-data-types/nothing.md b/docs/zh/sql-reference/data-types/special-data-types/nothing.md index 64f656185c1..2b10934f566 100644 --- a/docs/zh/sql-reference/data-types/special-data-types/nothing.md +++ b/docs/zh/sql-reference/data-types/special-data-types/nothing.md @@ -1,3 +1,6 @@ +--- +slug: /zh/sql-reference/data-types/special-data-types/nothing +--- # 没什么 {#nothing} 此数据类型的唯一目的是表示不是期望值的情况。 所以不能创建一个 `Nothing` 类型的值。 diff --git a/docs/zh/sql-reference/data-types/special-data-types/set.md b/docs/zh/sql-reference/data-types/special-data-types/set.md index b66f25cef18..eb3c015a210 100644 --- a/docs/zh/sql-reference/data-types/special-data-types/set.md +++ b/docs/zh/sql-reference/data-types/special-data-types/set.md @@ -1,3 +1,6 @@ +--- +slug: /zh/sql-reference/data-types/special-data-types/set +--- # 集合 {#set} 可以用在 IN 表达式的右半部分。 diff --git a/docs/zh/sql-reference/data-types/string.md b/docs/zh/sql-reference/data-types/string.md index ab0fbc450e7..54012592233 100644 --- a/docs/zh/sql-reference/data-types/string.md +++ b/docs/zh/sql-reference/data-types/string.md @@ -1,3 +1,6 @@ +--- +slug: /zh/sql-reference/data-types/string +--- # 字符串 {#string} 字符串可以任意长度的。它可以包含任意的字节集,包含空字节。因此,字符串类型可以代替其他 DBMSs 中的 VARCHAR、BLOB、CLOB 等类型。 diff --git a/docs/zh/sql-reference/data-types/tuple.md b/docs/zh/sql-reference/data-types/tuple.md index 14e13e61d32..e991fa7145a 100644 --- a/docs/zh/sql-reference/data-types/tuple.md +++ b/docs/zh/sql-reference/data-types/tuple.md @@ -1,3 +1,6 @@ +--- +slug: /zh/sql-reference/data-types/tuple +--- # Tuple(T1, T2, …) {#tuplet1-t2} 元组,其中每个元素都有单独的 [类型](index.md#data_types)。 diff --git a/docs/zh/sql-reference/data-types/uuid.md b/docs/zh/sql-reference/data-types/uuid.md index f16cfdfc6b3..86d272b5cbf 100644 --- a/docs/zh/sql-reference/data-types/uuid.md +++ b/docs/zh/sql-reference/data-types/uuid.md @@ -1,4 +1,5 @@ --- +slug: /zh/sql-reference/data-types/uuid sidebar_position: 46 sidebar_label: UUID --- diff --git a/docs/zh/sql-reference/dictionaries/external-dictionaries/external-dicts-dict-hierarchical.md b/docs/zh/sql-reference/dictionaries/external-dictionaries/external-dicts-dict-hierarchical.md index 4e8ede61326..6a2070065be 100644 --- a/docs/zh/sql-reference/dictionaries/external-dictionaries/external-dicts-dict-hierarchical.md +++ b/docs/zh/sql-reference/dictionaries/external-dictionaries/external-dicts-dict-hierarchical.md @@ -1,4 +1,5 @@ --- +slug: /zh/sql-reference/dictionaries/external-dictionaries/external-dicts-dict-hierarchical machine_translated: true machine_translated_rev: 72537a2d527c63c07aa5d2361a8829f3895cf2bd sidebar_position: 45 diff --git a/docs/zh/sql-reference/dictionaries/external-dictionaries/external-dicts-dict-layout.md b/docs/zh/sql-reference/dictionaries/external-dictionaries/external-dicts-dict-layout.md index 86bfb9e69d0..fdad11b2f36 100644 --- a/docs/zh/sql-reference/dictionaries/external-dictionaries/external-dicts-dict-layout.md +++ b/docs/zh/sql-reference/dictionaries/external-dictionaries/external-dicts-dict-layout.md @@ -1,4 +1,5 @@ --- +slug: /zh/sql-reference/dictionaries/external-dictionaries/external-dicts-dict-layout machine_translated: true machine_translated_rev: 72537a2d527c63c07aa5d2361a8829f3895cf2bd sidebar_position: 41 diff --git a/docs/zh/sql-reference/dictionaries/external-dictionaries/external-dicts-dict-lifetime.md b/docs/zh/sql-reference/dictionaries/external-dictionaries/external-dicts-dict-lifetime.md index eb37c9d9983..d6a0d3b244f 100644 --- a/docs/zh/sql-reference/dictionaries/external-dictionaries/external-dicts-dict-lifetime.md +++ b/docs/zh/sql-reference/dictionaries/external-dictionaries/external-dicts-dict-lifetime.md @@ -1,4 +1,5 @@ --- +slug: /zh/sql-reference/dictionaries/external-dictionaries/external-dicts-dict-lifetime machine_translated: true machine_translated_rev: 72537a2d527c63c07aa5d2361a8829f3895cf2bd sidebar_position: 42 diff --git a/docs/zh/sql-reference/dictionaries/external-dictionaries/external-dicts-dict-sources.md b/docs/zh/sql-reference/dictionaries/external-dictionaries/external-dicts-dict-sources.md index 4d8a0014f47..49a4ce355b7 100644 --- a/docs/zh/sql-reference/dictionaries/external-dictionaries/external-dicts-dict-sources.md +++ b/docs/zh/sql-reference/dictionaries/external-dictionaries/external-dicts-dict-sources.md @@ -1,4 +1,5 @@ --- +slug: /zh/sql-reference/dictionaries/external-dictionaries/external-dicts-dict-sources machine_translated: true machine_translated_rev: 72537a2d527c63c07aa5d2361a8829f3895cf2bd sidebar_position: 43 diff --git a/docs/zh/sql-reference/dictionaries/external-dictionaries/external-dicts-dict-structure.md b/docs/zh/sql-reference/dictionaries/external-dictionaries/external-dicts-dict-structure.md index fddd6667949..69f91c06dee 100644 --- a/docs/zh/sql-reference/dictionaries/external-dictionaries/external-dicts-dict-structure.md +++ b/docs/zh/sql-reference/dictionaries/external-dictionaries/external-dicts-dict-structure.md @@ -1,4 +1,5 @@ --- +slug: /zh/sql-reference/dictionaries/external-dictionaries/external-dicts-dict-structure machine_translated: true machine_translated_rev: 72537a2d527c63c07aa5d2361a8829f3895cf2bd sidebar_position: 44 diff --git a/docs/zh/sql-reference/dictionaries/external-dictionaries/external-dicts-dict.md b/docs/zh/sql-reference/dictionaries/external-dictionaries/external-dicts-dict.md index f170705fbec..bb1d4c31c37 100644 --- a/docs/zh/sql-reference/dictionaries/external-dictionaries/external-dicts-dict.md +++ b/docs/zh/sql-reference/dictionaries/external-dictionaries/external-dicts-dict.md @@ -1,4 +1,5 @@ --- +slug: /zh/sql-reference/dictionaries/external-dictionaries/external-dicts-dict machine_translated: true machine_translated_rev: 72537a2d527c63c07aa5d2361a8829f3895cf2bd sidebar_position: 40 diff --git a/docs/zh/sql-reference/dictionaries/external-dictionaries/external-dicts.md b/docs/zh/sql-reference/dictionaries/external-dictionaries/external-dicts.md index 5649e915f90..8b62815967f 100644 --- a/docs/zh/sql-reference/dictionaries/external-dictionaries/external-dicts.md +++ b/docs/zh/sql-reference/dictionaries/external-dictionaries/external-dicts.md @@ -1,4 +1,5 @@ --- +slug: /zh/sql-reference/dictionaries/external-dictionaries/external-dicts machine_translated: true machine_translated_rev: 72537a2d527c63c07aa5d2361a8829f3895cf2bd sidebar_position: 39 diff --git a/docs/zh/sql-reference/dictionaries/index.md b/docs/zh/sql-reference/dictionaries/index.md index f07d815ab9d..dca127b2ecf 100644 --- a/docs/zh/sql-reference/dictionaries/index.md +++ b/docs/zh/sql-reference/dictionaries/index.md @@ -1,4 +1,5 @@ --- +slug: /zh/sql-reference/dictionaries/ sidebar_position: 35 sidebar_label: 字典 --- diff --git a/docs/zh/sql-reference/dictionaries/internal-dicts.md b/docs/zh/sql-reference/dictionaries/internal-dicts.md index 67d5f4a5130..328055044fa 100644 --- a/docs/zh/sql-reference/dictionaries/internal-dicts.md +++ b/docs/zh/sql-reference/dictionaries/internal-dicts.md @@ -1,4 +1,5 @@ --- +slug: /zh/sql-reference/dictionaries/internal-dicts machine_translated: true machine_translated_rev: 72537a2d527c63c07aa5d2361a8829f3895cf2bd sidebar_position: 39 diff --git a/docs/zh/sql-reference/distributed-ddl.md b/docs/zh/sql-reference/distributed-ddl.md index f72d1ee36e9..f1c0e1c2f11 100644 --- a/docs/zh/sql-reference/distributed-ddl.md +++ b/docs/zh/sql-reference/distributed-ddl.md @@ -1,4 +1,5 @@ --- +slug: /zh/sql-reference/distributed-ddl sidebar_position: 32 sidebar_label: Distributed DDL --- diff --git a/docs/zh/sql-reference/functions/arithmetic-functions.md b/docs/zh/sql-reference/functions/arithmetic-functions.md index acba761b619..5bf6fc68a48 100644 --- a/docs/zh/sql-reference/functions/arithmetic-functions.md +++ b/docs/zh/sql-reference/functions/arithmetic-functions.md @@ -1,4 +1,5 @@ --- +slug: /zh/sql-reference/functions/arithmetic-functions sidebar_position: 35 sidebar_label: 算术函数 --- diff --git a/docs/zh/sql-reference/functions/array-functions.md b/docs/zh/sql-reference/functions/array-functions.md index 0d4f51e1ddc..eccf5d375ec 100644 --- a/docs/zh/sql-reference/functions/array-functions.md +++ b/docs/zh/sql-reference/functions/array-functions.md @@ -1,3 +1,6 @@ +--- +slug: /zh/sql-reference/functions/array-functions +--- # 数组函数 {#shu-zu-han-shu} ## empty {#empty函数} diff --git a/docs/zh/sql-reference/functions/array-join.md b/docs/zh/sql-reference/functions/array-join.md index 2bf3ca17764..3a1f1d2d354 100644 --- a/docs/zh/sql-reference/functions/array-join.md +++ b/docs/zh/sql-reference/functions/array-join.md @@ -1,3 +1,6 @@ +--- +slug: /zh/sql-reference/functions/array-join +--- # arrayJoin函数 {#functions_arrayjoin} 这是一个非常有用的函数。 diff --git a/docs/zh/sql-reference/functions/bit-functions.md b/docs/zh/sql-reference/functions/bit-functions.md index b2e9230c799..67e97f7a741 100644 --- a/docs/zh/sql-reference/functions/bit-functions.md +++ b/docs/zh/sql-reference/functions/bit-functions.md @@ -1,3 +1,6 @@ +--- +slug: /zh/sql-reference/functions/bit-functions +--- # 位操作函数 {#wei-cao-zuo-han-shu} 位操作函数适用于UInt8,UInt16,UInt32,UInt64,Int8,Int16,Int32,Int64,Float32或Float64中的任何类型。 diff --git a/docs/zh/sql-reference/functions/bitmap-functions.md b/docs/zh/sql-reference/functions/bitmap-functions.md index 2a6314e7727..3e7c1857055 100644 --- a/docs/zh/sql-reference/functions/bitmap-functions.md +++ b/docs/zh/sql-reference/functions/bitmap-functions.md @@ -1,3 +1,6 @@ +--- +slug: /zh/sql-reference/functions/bitmap-functions +--- # 位图函数 {#wei-tu-han-shu} 位图函数用于对两个位图对象进行计算,对于任何一个位图函数,它都将返回一个位图对象,例如and,or,xor,not等等。 diff --git a/docs/zh/sql-reference/functions/comparison-functions.md b/docs/zh/sql-reference/functions/comparison-functions.md index 1db6a5256ab..7e9dc069689 100644 --- a/docs/zh/sql-reference/functions/comparison-functions.md +++ b/docs/zh/sql-reference/functions/comparison-functions.md @@ -1,4 +1,5 @@ --- +slug: /zh/sql-reference/functions/comparison-functions sidebar_position: 36 sidebar_label: 比较函数 --- diff --git a/docs/zh/sql-reference/functions/conditional-functions.md b/docs/zh/sql-reference/functions/conditional-functions.md index 64b4c96478a..62ec1523d22 100644 --- a/docs/zh/sql-reference/functions/conditional-functions.md +++ b/docs/zh/sql-reference/functions/conditional-functions.md @@ -1,3 +1,6 @@ +--- +slug: /zh/sql-reference/functions/conditional-functions +--- # 条件函数 {#tiao-jian-han-shu} ## if {#if} diff --git a/docs/zh/sql-reference/functions/date-time-functions.md b/docs/zh/sql-reference/functions/date-time-functions.md index b9fdc4e21f2..e2ba2bb3144 100644 --- a/docs/zh/sql-reference/functions/date-time-functions.md +++ b/docs/zh/sql-reference/functions/date-time-functions.md @@ -1,3 +1,6 @@ +--- +slug: /zh/sql-reference/functions/date-time-functions +--- # 时间日期函数 {#shi-jian-ri-qi-han-shu} 支持时区。 diff --git a/docs/zh/sql-reference/functions/encoding-functions.md b/docs/zh/sql-reference/functions/encoding-functions.md index b9a3cbf0550..5e9260ff94a 100644 --- a/docs/zh/sql-reference/functions/encoding-functions.md +++ b/docs/zh/sql-reference/functions/encoding-functions.md @@ -1,3 +1,6 @@ +--- +slug: /zh/sql-reference/functions/encoding-functions +--- # 编码函数 {#bian-ma-han-shu} ## char {#char} diff --git a/docs/zh/sql-reference/functions/ext-dict-functions.md b/docs/zh/sql-reference/functions/ext-dict-functions.md index 87e19dc0119..28ddb07b3a7 100644 --- a/docs/zh/sql-reference/functions/ext-dict-functions.md +++ b/docs/zh/sql-reference/functions/ext-dict-functions.md @@ -1,3 +1,6 @@ +--- +slug: /zh/sql-reference/functions/ext-dict-functions +--- # 字典函数 {#zi-dian-han-shu} 有关连接和配置外部词典的信息,请参阅[外部词典](../../sql-reference/functions/ext-dict-functions.md)。 diff --git a/docs/zh/sql-reference/functions/functions-for-nulls.md b/docs/zh/sql-reference/functions/functions-for-nulls.md index b228cbe6f99..7f9716db085 100644 --- a/docs/zh/sql-reference/functions/functions-for-nulls.md +++ b/docs/zh/sql-reference/functions/functions-for-nulls.md @@ -1,3 +1,6 @@ +--- +slug: /zh/sql-reference/functions/functions-for-nulls +--- # Nullable处理函数 {#nullablechu-li-han-shu} ## isNull {#isnull} diff --git a/docs/zh/sql-reference/functions/geo.md b/docs/zh/sql-reference/functions/geo.md index fa8fecb1284..bbd67325bc5 100644 --- a/docs/zh/sql-reference/functions/geo.md +++ b/docs/zh/sql-reference/functions/geo.md @@ -1,3 +1,6 @@ +--- +slug: /zh/sql-reference/functions/geo +--- # GEO函数 {#geohan-shu} ## 大圆形距离 {#greatcircledistance} diff --git a/docs/zh/sql-reference/functions/hash-functions.md b/docs/zh/sql-reference/functions/hash-functions.md index 0800631a807..1b3b877201a 100644 --- a/docs/zh/sql-reference/functions/hash-functions.md +++ b/docs/zh/sql-reference/functions/hash-functions.md @@ -1,3 +1,6 @@ +--- +slug: /zh/sql-reference/functions/hash-functions +--- # Hash函数 {#hashhan-shu} Hash函数可以用于将元素不可逆的伪随机打乱。 diff --git a/docs/zh/sql-reference/functions/higher-order-functions.md b/docs/zh/sql-reference/functions/higher-order-functions.md index dc5c99ae298..5ac2e92a5a6 100644 --- a/docs/zh/sql-reference/functions/higher-order-functions.md +++ b/docs/zh/sql-reference/functions/higher-order-functions.md @@ -1,3 +1,6 @@ +--- +slug: /zh/sql-reference/functions/higher-order-functions +--- # 高阶函数 {#gao-jie-han-shu} ## `->` 运算符, lambda(params, expr) 函数 {#yun-suan-fu-lambdaparams-expr-han-shu} diff --git a/docs/zh/sql-reference/functions/in-functions.md b/docs/zh/sql-reference/functions/in-functions.md index a689c6fe5ed..c527eb3bea8 100644 --- a/docs/zh/sql-reference/functions/in-functions.md +++ b/docs/zh/sql-reference/functions/in-functions.md @@ -1,4 +1,5 @@ --- +slug: /zh/sql-reference/functions/in-functions sidebar_position: 60 sidebar_label: IN 运算符 --- diff --git a/docs/zh/sql-reference/functions/index.md b/docs/zh/sql-reference/functions/index.md index a65e7d5bc44..138f42c7cfa 100644 --- a/docs/zh/sql-reference/functions/index.md +++ b/docs/zh/sql-reference/functions/index.md @@ -1,4 +1,5 @@ --- +slug: /zh/sql-reference/functions/ sidebar_label: 函数 sidebar_position: 32 --- diff --git a/docs/zh/sql-reference/functions/introspection.md b/docs/zh/sql-reference/functions/introspection.md index 001363744f6..9b8385a1196 100644 --- a/docs/zh/sql-reference/functions/introspection.md +++ b/docs/zh/sql-reference/functions/introspection.md @@ -1,4 +1,5 @@ --- +slug: /zh/sql-reference/functions/introspection machine_translated: true machine_translated_rev: 72537a2d527c63c07aa5d2361a8829f3895cf2bd sidebar_position: 65 diff --git a/docs/zh/sql-reference/functions/ip-address-functions.md b/docs/zh/sql-reference/functions/ip-address-functions.md index 6428070e704..a44c4d6e428 100644 --- a/docs/zh/sql-reference/functions/ip-address-functions.md +++ b/docs/zh/sql-reference/functions/ip-address-functions.md @@ -1,3 +1,6 @@ +--- +slug: /zh/sql-reference/functions/ip-address-functions +--- # IP函数 {#iphan-shu} ## IPv4NumToString(num) {#ipv4numtostringnum} diff --git a/docs/zh/sql-reference/functions/json-functions.md b/docs/zh/sql-reference/functions/json-functions.md index 8459f2ceccc..f26f0ef2db9 100644 --- a/docs/zh/sql-reference/functions/json-functions.md +++ b/docs/zh/sql-reference/functions/json-functions.md @@ -1,3 +1,6 @@ +--- +slug: /zh/sql-reference/functions/json-functions +--- # JSON函数 {#jsonhan-shu} 在Yandex.Metrica中,用户使用JSON作为访问参数。为了处理这些JSON,实现了一些函数。(尽管在大多数情况下,JSON是预先进行额外处理的,并将结果值放在单独的列中。)所有的这些函数都进行了尽可能的假设。以使函数能够尽快的完成工作。 diff --git a/docs/zh/sql-reference/functions/logical-functions.md b/docs/zh/sql-reference/functions/logical-functions.md index ecd52c2f2ad..2a94c2de7bd 100644 --- a/docs/zh/sql-reference/functions/logical-functions.md +++ b/docs/zh/sql-reference/functions/logical-functions.md @@ -1,4 +1,5 @@ --- +slug: /zh/sql-reference/functions/logical-functions sidebar_position: 37 sidebar_label: 逻辑函数 --- diff --git a/docs/zh/sql-reference/functions/machine-learning-functions.md b/docs/zh/sql-reference/functions/machine-learning-functions.md index 77f8d76a722..d7060e7dac2 100644 --- a/docs/zh/sql-reference/functions/machine-learning-functions.md +++ b/docs/zh/sql-reference/functions/machine-learning-functions.md @@ -1,3 +1,6 @@ +--- +slug: /zh/sql-reference/functions/machine-learning-functions +--- # 机器学习函数 {#ji-qi-xue-xi-han-shu} ## evalMLMethod(预测) {#machine_learning_methods-evalmlmethod} diff --git a/docs/zh/sql-reference/functions/math-functions.md b/docs/zh/sql-reference/functions/math-functions.md index 5458ac37dfd..4deac50fd57 100644 --- a/docs/zh/sql-reference/functions/math-functions.md +++ b/docs/zh/sql-reference/functions/math-functions.md @@ -1,3 +1,6 @@ +--- +slug: /zh/sql-reference/functions/math-functions +--- # 数学函数 {#shu-xue-han-shu} 以下所有的函数都返回一个Float64类型的数值。返回结果总是以尽可能最大精度返回,但还是可能与机器中可表示最接近该值的数字不同。 diff --git a/docs/zh/sql-reference/functions/other-functions.md b/docs/zh/sql-reference/functions/other-functions.md index 2588f623227..fde55ec884f 100644 --- a/docs/zh/sql-reference/functions/other-functions.md +++ b/docs/zh/sql-reference/functions/other-functions.md @@ -1,3 +1,6 @@ +--- +slug: /zh/sql-reference/functions/other-functions +--- # 其他函数 {#qi-ta-han-shu} ## 主机名() {#hostname} diff --git a/docs/zh/sql-reference/functions/random-functions.md b/docs/zh/sql-reference/functions/random-functions.md index 34ff2d9943a..1d0d32fd215 100644 --- a/docs/zh/sql-reference/functions/random-functions.md +++ b/docs/zh/sql-reference/functions/random-functions.md @@ -1,3 +1,6 @@ +--- +slug: /zh/sql-reference/functions/random-functions +--- # 随机函数 {#sui-ji-han-shu} 随机函数使用非加密方式生成伪随机数字。 diff --git a/docs/zh/sql-reference/functions/rounding-functions.md b/docs/zh/sql-reference/functions/rounding-functions.md index 46f137da995..7186766ec78 100644 --- a/docs/zh/sql-reference/functions/rounding-functions.md +++ b/docs/zh/sql-reference/functions/rounding-functions.md @@ -1,3 +1,6 @@ +--- +slug: /zh/sql-reference/functions/rounding-functions +--- # 取整函数 {#qu-zheng-han-shu} ## 楼(x\[,N\]) {#floorx-n} diff --git a/docs/zh/sql-reference/functions/splitting-merging-functions.md b/docs/zh/sql-reference/functions/splitting-merging-functions.md index ade5bae21da..ddf30cd139d 100644 --- a/docs/zh/sql-reference/functions/splitting-merging-functions.md +++ b/docs/zh/sql-reference/functions/splitting-merging-functions.md @@ -1,3 +1,6 @@ +--- +slug: /zh/sql-reference/functions/splitting-merging-functions +--- # 字符串拆分合并函数 {#zi-fu-chuan-chai-fen-he-bing-han-shu} ## splitByChar(分隔符,s) {#splitbycharseparator-s} diff --git a/docs/zh/sql-reference/functions/string-functions.md b/docs/zh/sql-reference/functions/string-functions.md index d4f8d36273d..3bca7e29a6b 100644 --- a/docs/zh/sql-reference/functions/string-functions.md +++ b/docs/zh/sql-reference/functions/string-functions.md @@ -1,3 +1,6 @@ +--- +slug: /zh/sql-reference/functions/string-functions +--- # 字符串函数 {#zi-fu-chuan-han-shu} ## empty {#string-functions-empty} diff --git a/docs/zh/sql-reference/functions/string-replace-functions.md b/docs/zh/sql-reference/functions/string-replace-functions.md index 01bb809c78f..3f1d43da7fc 100644 --- a/docs/zh/sql-reference/functions/string-replace-functions.md +++ b/docs/zh/sql-reference/functions/string-replace-functions.md @@ -1,3 +1,6 @@ +--- +slug: /zh/sql-reference/functions/string-replace-functions +--- # 字符串替换函数 {#zi-fu-chuan-ti-huan-han-shu} ## replaceOne(haystack, pattern, replacement) {#replaceonehaystack-pattern-replacement} diff --git a/docs/zh/sql-reference/functions/string-search-functions.md b/docs/zh/sql-reference/functions/string-search-functions.md index 4475dae270b..69e82e378cc 100644 --- a/docs/zh/sql-reference/functions/string-search-functions.md +++ b/docs/zh/sql-reference/functions/string-search-functions.md @@ -1,3 +1,6 @@ +--- +slug: /zh/sql-reference/functions/string-search-functions +--- # 字符串搜索函数 {#zi-fu-chuan-sou-suo-han-shu} 下列所有函数在默认的情况下区分大小写。对于不区分大小写的搜索,存在单独的变体。 diff --git a/docs/zh/sql-reference/functions/time-window-functions.md b/docs/zh/sql-reference/functions/time-window-functions.md index 75a24ceb5fd..f27e8281c62 100644 --- a/docs/zh/sql-reference/functions/time-window-functions.md +++ b/docs/zh/sql-reference/functions/time-window-functions.md @@ -1,4 +1,5 @@ --- +slug: /zh/sql-reference/functions/time-window-functions sidebar_position: 68 sidebar_label: 时间窗口 --- diff --git a/docs/zh/sql-reference/functions/type-conversion-functions.md b/docs/zh/sql-reference/functions/type-conversion-functions.md index d2330df6cb1..2db1f45ac0b 100644 --- a/docs/zh/sql-reference/functions/type-conversion-functions.md +++ b/docs/zh/sql-reference/functions/type-conversion-functions.md @@ -1,4 +1,5 @@ --- +slug: /zh/sql-reference/functions/type-conversion-functions sidebar_position: 38 sidebar_label: 类型转换函数 --- diff --git a/docs/zh/sql-reference/functions/url-functions.md b/docs/zh/sql-reference/functions/url-functions.md index 040bd1e6aae..e4e4cca4a0f 100644 --- a/docs/zh/sql-reference/functions/url-functions.md +++ b/docs/zh/sql-reference/functions/url-functions.md @@ -1,3 +1,6 @@ +--- +slug: /zh/sql-reference/functions/url-functions +--- # URL函数 {#urlhan-shu} 所有这些功能都不遵循RFC。它们被最大程度简化以提高性能。 diff --git a/docs/zh/sql-reference/functions/uuid-functions.md b/docs/zh/sql-reference/functions/uuid-functions.md index 99f5c3407ef..021628ce388 100644 --- a/docs/zh/sql-reference/functions/uuid-functions.md +++ b/docs/zh/sql-reference/functions/uuid-functions.md @@ -1,3 +1,6 @@ +--- +slug: /zh/sql-reference/functions/uuid-functions +--- # UUID函数 {#uuidhan-shu} 下面列出了所有UUID的相关函数 diff --git a/docs/zh/sql-reference/functions/ym-dict-functions.md b/docs/zh/sql-reference/functions/ym-dict-functions.md index f8ac29e6d4c..bf27ecf958e 100644 --- a/docs/zh/sql-reference/functions/ym-dict-functions.md +++ b/docs/zh/sql-reference/functions/ym-dict-functions.md @@ -1,3 +1,6 @@ +--- +slug: /zh/sql-reference/functions/ym-dict-functions +--- # 使用 Yandex.Metrica 字典函数 {#functions-for-working-with-yandex-metrica-dictionaries} 为了使下面的功能正常工作,服务器配置必须指定获取所有 Yandex.Metrica 字典的路径和地址。Yandex.Metrica 字典在任何这些函数的第一次调用时加载。 如果无法加载引用列表,则会引发异常。 diff --git a/docs/zh/sql-reference/index.md b/docs/zh/sql-reference/index.md index e324dd932aa..2ac669fa295 100644 --- a/docs/zh/sql-reference/index.md +++ b/docs/zh/sql-reference/index.md @@ -1,4 +1,5 @@ --- +slug: /zh/sql-reference/ sidebar_label: SQL参考 toc_hidden: true sidebar_position: 28 diff --git a/docs/zh/sql-reference/operators/in.md b/docs/zh/sql-reference/operators/in.md index 9f97fb71234..df4c8772e86 100644 --- a/docs/zh/sql-reference/operators/in.md +++ b/docs/zh/sql-reference/operators/in.md @@ -1,4 +1,5 @@ --- +slug: /zh/sql-reference/operators/in machine_translated: true machine_translated_rev: 5decc73b5dc60054f19087d3690c4eb99446a6c3 --- diff --git a/docs/zh/sql-reference/operators/index.md b/docs/zh/sql-reference/operators/index.md index 19894ac33b0..aee489e03f5 100644 --- a/docs/zh/sql-reference/operators/index.md +++ b/docs/zh/sql-reference/operators/index.md @@ -1,3 +1,6 @@ +--- +slug: /zh/sql-reference/operators/ +--- # 操作符 {#cao-zuo-fu} 所有的操作符(运算符)都会在查询时依据他们的优先级及其结合顺序在被解析时转换为对应的函数。下面按优先级从高到低列出各组运算符及其对应的函数: diff --git a/docs/zh/sql-reference/statements/alter.md b/docs/zh/sql-reference/statements/alter.md index da3d4c7023a..2e143d3b654 100644 --- a/docs/zh/sql-reference/statements/alter.md +++ b/docs/zh/sql-reference/statements/alter.md @@ -1,4 +1,5 @@ --- +slug: /zh/sql-reference/statements/alter sidebar_position: 36 sidebar_label: ALTER --- diff --git a/docs/zh/sql-reference/statements/alter/constraint.md b/docs/zh/sql-reference/statements/alter/constraint.md index 38f2437f9c7..c1204e4b9aa 100644 --- a/docs/zh/sql-reference/statements/alter/constraint.md +++ b/docs/zh/sql-reference/statements/alter/constraint.md @@ -1,4 +1,5 @@ --- +slug: /zh/sql-reference/statements/alter/constraint sidebar_position: 43 sidebar_label: 约束 --- diff --git a/docs/zh/sql-reference/statements/alter/delete.md b/docs/zh/sql-reference/statements/alter/delete.md index ac7d8fefe08..85d3d3077a7 100644 --- a/docs/zh/sql-reference/statements/alter/delete.md +++ b/docs/zh/sql-reference/statements/alter/delete.md @@ -1,4 +1,5 @@ --- +slug: /zh/sql-reference/statements/alter/delete sidebar_position: 39 sidebar_label: DELETE --- diff --git a/docs/zh/sql-reference/statements/alter/index.md b/docs/zh/sql-reference/statements/alter/index.md index 74ca2e6d79e..d9878f76c38 100644 --- a/docs/zh/sql-reference/statements/alter/index.md +++ b/docs/zh/sql-reference/statements/alter/index.md @@ -1,4 +1,5 @@ --- +slug: /zh/sql-reference/statements/alter/ sidebar_position: 35 sidebar_label: ALTER --- diff --git a/docs/zh/sql-reference/statements/alter/index/index.md b/docs/zh/sql-reference/statements/alter/index/index.md index 49bd973531c..8c22c9e0c84 100644 --- a/docs/zh/sql-reference/statements/alter/index/index.md +++ b/docs/zh/sql-reference/statements/alter/index/index.md @@ -1,4 +1,5 @@ --- +slug: /zh/sql-reference/statements/alter/ toc_hidden_folder: true sidebar_position: 42 sidebar_label: INDEX diff --git a/docs/zh/sql-reference/statements/alter/order-by.md b/docs/zh/sql-reference/statements/alter/order-by.md index 7f0f68cf0b4..e70a8b59c85 100644 --- a/docs/zh/sql-reference/statements/alter/order-by.md +++ b/docs/zh/sql-reference/statements/alter/order-by.md @@ -1,4 +1,5 @@ --- +slug: /zh/sql-reference/statements/alter/order-by sidebar_position: 41 sidebar_label: ORDER BY --- diff --git a/docs/zh/sql-reference/statements/alter/role.md b/docs/zh/sql-reference/statements/alter/role.md index ef35cb8c051..e364571359f 100644 --- a/docs/zh/sql-reference/statements/alter/role.md +++ b/docs/zh/sql-reference/statements/alter/role.md @@ -1,4 +1,5 @@ --- +slug: /zh/sql-reference/statements/alter/role sidebar_position: 46 sidebar_label: 角色 --- diff --git a/docs/zh/sql-reference/statements/alter/row-policy.md b/docs/zh/sql-reference/statements/alter/row-policy.md index 931e49ed6bd..227c881eda4 100644 --- a/docs/zh/sql-reference/statements/alter/row-policy.md +++ b/docs/zh/sql-reference/statements/alter/row-policy.md @@ -1,4 +1,5 @@ --- +slug: /zh/sql-reference/statements/alter/row-policy sidebar_position: 47 sidebar_label: 行策略 --- diff --git a/docs/zh/sql-reference/statements/alter/setting.md b/docs/zh/sql-reference/statements/alter/setting.md index 54baf3e9fc7..16bf7c1f337 100644 --- a/docs/zh/sql-reference/statements/alter/setting.md +++ b/docs/zh/sql-reference/statements/alter/setting.md @@ -1,4 +1,5 @@ --- +slug: /zh/sql-reference/statements/alter/setting sidebar_position: 38 sidebar_label: SETTING --- diff --git a/docs/zh/sql-reference/statements/alter/settings-profile.md b/docs/zh/sql-reference/statements/alter/settings-profile.md index 8dd0d8ca08f..e4365b25c1a 100644 --- a/docs/zh/sql-reference/statements/alter/settings-profile.md +++ b/docs/zh/sql-reference/statements/alter/settings-profile.md @@ -1,4 +1,5 @@ --- +slug: /zh/sql-reference/statements/alter/settings-profile sidebar_position: 48 sidebar_label: 配置文件设置 --- diff --git a/docs/zh/sql-reference/statements/alter/ttl.md b/docs/zh/sql-reference/statements/alter/ttl.md index 624d8dd36dc..58af4183cc5 100644 --- a/docs/zh/sql-reference/statements/alter/ttl.md +++ b/docs/zh/sql-reference/statements/alter/ttl.md @@ -1,4 +1,5 @@ --- +slug: /zh/sql-reference/statements/alter/ttl sidebar_position: 44 sidebar_label: TTL --- diff --git a/docs/zh/sql-reference/statements/alter/update.md b/docs/zh/sql-reference/statements/alter/update.md index 4b71d670fd1..c6b44336deb 100644 --- a/docs/zh/sql-reference/statements/alter/update.md +++ b/docs/zh/sql-reference/statements/alter/update.md @@ -1,4 +1,5 @@ --- +slug: /zh/sql-reference/statements/alter/update sidebar_position: 40 sidebar_label: UPDATE --- diff --git a/docs/zh/sql-reference/statements/alter/view.md b/docs/zh/sql-reference/statements/alter/view.md index 2489f09c4a4..0eee462296c 100644 --- a/docs/zh/sql-reference/statements/alter/view.md +++ b/docs/zh/sql-reference/statements/alter/view.md @@ -1,4 +1,5 @@ --- +slug: /zh/sql-reference/statements/alter/view sidebar_position: 50 sidebar_label: VIEW --- diff --git a/docs/zh/sql-reference/statements/create.md b/docs/zh/sql-reference/statements/create.md index 21a3e28ebc4..aef21a704b5 100644 --- a/docs/zh/sql-reference/statements/create.md +++ b/docs/zh/sql-reference/statements/create.md @@ -1,3 +1,6 @@ +--- +slug: /zh/sql-reference/statements/create +--- ## CREATE DATABASE {#create-database} 该查询用于根据指定名称创建数据库。 diff --git a/docs/zh/sql-reference/statements/create/database.md b/docs/zh/sql-reference/statements/create/database.md index 1a49960ac73..2c6e53c0f06 100644 --- a/docs/zh/sql-reference/statements/create/database.md +++ b/docs/zh/sql-reference/statements/create/database.md @@ -1,4 +1,5 @@ --- +slug: /zh/sql-reference/statements/create/database sidebar_position: 35 sidebar_label: DATABASE --- diff --git a/docs/zh/sql-reference/statements/create/function.md b/docs/zh/sql-reference/statements/create/function.md index a5511d8e5c3..8a183324b28 100644 --- a/docs/zh/sql-reference/statements/create/function.md +++ b/docs/zh/sql-reference/statements/create/function.md @@ -1,4 +1,5 @@ --- +slug: /zh/sql-reference/statements/create/function sidebar_position: 38 sidebar_label: FUNCTION --- diff --git a/docs/zh/sql-reference/statements/create/index.md b/docs/zh/sql-reference/statements/create/index.md index 017a4c5cabd..084eaf3f1ad 100644 --- a/docs/zh/sql-reference/statements/create/index.md +++ b/docs/zh/sql-reference/statements/create/index.md @@ -1,4 +1,5 @@ --- +slug: /zh/sql-reference/statements/create/ sidebar_label: CREATE sidebar_position: 34 --- diff --git a/docs/zh/sql-reference/statements/create/view.md b/docs/zh/sql-reference/statements/create/view.md index b4748133f19..5daa4092732 100644 --- a/docs/zh/sql-reference/statements/create/view.md +++ b/docs/zh/sql-reference/statements/create/view.md @@ -1,4 +1,5 @@ --- +slug: /zh/sql-reference/statements/create/view sidebar_position: 37 sidebar_label: VIEW --- diff --git a/docs/zh/sql-reference/statements/drop.md b/docs/zh/sql-reference/statements/drop.md index 7f00875fc41..00832eda4db 100644 --- a/docs/zh/sql-reference/statements/drop.md +++ b/docs/zh/sql-reference/statements/drop.md @@ -1,4 +1,5 @@ --- +slug: /zh/sql-reference/statements/drop sidebar_position: 44 sidebar_label: DROP --- diff --git a/docs/zh/sql-reference/statements/exchange.md b/docs/zh/sql-reference/statements/exchange.md index e4cf74ac821..e6ac1dbf1dc 100644 --- a/docs/zh/sql-reference/statements/exchange.md +++ b/docs/zh/sql-reference/statements/exchange.md @@ -1,4 +1,5 @@ --- +slug: /zh/sql-reference/statements/exchange sidebar_position: 49 sidebar_label: EXCHANGE --- diff --git a/docs/zh/sql-reference/statements/exists.md b/docs/zh/sql-reference/statements/exists.md index 59da3986fc5..e31a4a436b9 100644 --- a/docs/zh/sql-reference/statements/exists.md +++ b/docs/zh/sql-reference/statements/exists.md @@ -1,4 +1,5 @@ --- +slug: /zh/sql-reference/statements/exists sidebar_position: 45 sidebar_label: EXISTS --- diff --git a/docs/zh/sql-reference/statements/grant.md b/docs/zh/sql-reference/statements/grant.md index af644717736..7bea0d95fba 100644 --- a/docs/zh/sql-reference/statements/grant.md +++ b/docs/zh/sql-reference/statements/grant.md @@ -1,4 +1,5 @@ --- +slug: /zh/sql-reference/statements/grant sidebar_position: 39 sidebar_label: 授权操作 --- diff --git a/docs/zh/sql-reference/statements/index.md b/docs/zh/sql-reference/statements/index.md index 2c0c1577d93..e5f0081ab69 100644 --- a/docs/zh/sql-reference/statements/index.md +++ b/docs/zh/sql-reference/statements/index.md @@ -1,4 +1,5 @@ --- +slug: /zh/sql-reference/statements/ sidebar_label: SQL 语句 sidebar_position: 31 --- diff --git a/docs/zh/sql-reference/statements/insert-into.md b/docs/zh/sql-reference/statements/insert-into.md index 4f958e31b18..d552119f41c 100644 --- a/docs/zh/sql-reference/statements/insert-into.md +++ b/docs/zh/sql-reference/statements/insert-into.md @@ -1,3 +1,6 @@ +--- +slug: /zh/sql-reference/statements/insert-into +--- ## INSERT INTO 语句 {#insert} INSERT INTO 语句主要用于向系统中添加数据. diff --git a/docs/zh/sql-reference/statements/misc.md b/docs/zh/sql-reference/statements/misc.md index fce621fa8f7..98387f9cad3 100644 --- a/docs/zh/sql-reference/statements/misc.md +++ b/docs/zh/sql-reference/statements/misc.md @@ -1,4 +1,5 @@ --- +slug: /zh/sql-reference/statements/misc machine_translated: true machine_translated_rev: 72537a2d527c63c07aa5d2361a8829f3895cf2bd sidebar_position: 41 diff --git a/docs/zh/sql-reference/statements/rename.md b/docs/zh/sql-reference/statements/rename.md index 00c692abefd..c26dce306cc 100644 --- a/docs/zh/sql-reference/statements/rename.md +++ b/docs/zh/sql-reference/statements/rename.md @@ -1,4 +1,5 @@ --- +slug: /zh/sql-reference/statements/rename sidebar_position: 48 sidebar_label: RENAME --- diff --git a/docs/zh/sql-reference/statements/revoke.md b/docs/zh/sql-reference/statements/revoke.md index c769912cddd..409eedc40e5 100644 --- a/docs/zh/sql-reference/statements/revoke.md +++ b/docs/zh/sql-reference/statements/revoke.md @@ -1,4 +1,5 @@ --- +slug: /zh/sql-reference/statements/revoke sidebar_position: 40 sidebar_label: REVOKE --- diff --git a/docs/zh/sql-reference/statements/select/all.md b/docs/zh/sql-reference/statements/select/all.md index b23a3973502..121ac89fffc 100644 --- a/docs/zh/sql-reference/statements/select/all.md +++ b/docs/zh/sql-reference/statements/select/all.md @@ -1,3 +1,6 @@ +--- +slug: /zh/sql-reference/statements/select/all +--- # ALL 子句 {#select-all} `SELECT ALL` 和 `SELECT` 不带 `DISTINCT` 是一样的。 diff --git a/docs/zh/sql-reference/statements/select/array-join.md b/docs/zh/sql-reference/statements/select/array-join.md index b4088d69180..b0352a7bb0a 100644 --- a/docs/zh/sql-reference/statements/select/array-join.md +++ b/docs/zh/sql-reference/statements/select/array-join.md @@ -1,4 +1,5 @@ --- +slug: /zh/sql-reference/statements/select/array-join sidebar_label: ARRAY JOIN --- diff --git a/docs/zh/sql-reference/statements/select/distinct.md b/docs/zh/sql-reference/statements/select/distinct.md index 7a788e9351a..98fd18f043d 100644 --- a/docs/zh/sql-reference/statements/select/distinct.md +++ b/docs/zh/sql-reference/statements/select/distinct.md @@ -1,4 +1,5 @@ --- +slug: /zh/sql-reference/statements/select/distinct sidebar_label: DISTINCT --- diff --git a/docs/zh/sql-reference/statements/select/format.md b/docs/zh/sql-reference/statements/select/format.md index a17b8284a31..bb7a641b458 100644 --- a/docs/zh/sql-reference/statements/select/format.md +++ b/docs/zh/sql-reference/statements/select/format.md @@ -1,4 +1,5 @@ --- +slug: /zh/sql-reference/statements/select/format sidebar_label: FORMAT --- diff --git a/docs/zh/sql-reference/statements/select/from.md b/docs/zh/sql-reference/statements/select/from.md index cc5dfd8ae01..3a8b15d7ab1 100644 --- a/docs/zh/sql-reference/statements/select/from.md +++ b/docs/zh/sql-reference/statements/select/from.md @@ -1,4 +1,5 @@ --- +slug: /zh/sql-reference/statements/select/from sidebar_label: FROM --- diff --git a/docs/zh/sql-reference/statements/select/group-by.md b/docs/zh/sql-reference/statements/select/group-by.md index 33fa27b8dd9..90b3c7660ee 100644 --- a/docs/zh/sql-reference/statements/select/group-by.md +++ b/docs/zh/sql-reference/statements/select/group-by.md @@ -1,4 +1,5 @@ --- +slug: /zh/sql-reference/statements/select/group-by sidebar_label: GROUP BY --- diff --git a/docs/zh/sql-reference/statements/select/having.md b/docs/zh/sql-reference/statements/select/having.md index 9a37a2ca78c..0b2db29bd19 100644 --- a/docs/zh/sql-reference/statements/select/having.md +++ b/docs/zh/sql-reference/statements/select/having.md @@ -1,4 +1,5 @@ --- +slug: /zh/sql-reference/statements/select/having sidebar_label: HAVING --- diff --git a/docs/zh/sql-reference/statements/select/index.md b/docs/zh/sql-reference/statements/select/index.md index a1738b64bab..2d4044cbd20 100644 --- a/docs/zh/sql-reference/statements/select/index.md +++ b/docs/zh/sql-reference/statements/select/index.md @@ -1,4 +1,5 @@ --- +slug: /zh/sql-reference/statements/select/ sidebar_label: SELECT sidebar_position: 33 --- diff --git a/docs/zh/sql-reference/statements/select/into-outfile.md b/docs/zh/sql-reference/statements/select/into-outfile.md index 673038015ba..ca814c5f0b7 100644 --- a/docs/zh/sql-reference/statements/select/into-outfile.md +++ b/docs/zh/sql-reference/statements/select/into-outfile.md @@ -1,4 +1,5 @@ --- +slug: /zh/sql-reference/statements/select/into-outfile sidebar_label: INTO OUTFILE --- diff --git a/docs/zh/sql-reference/statements/select/join.md b/docs/zh/sql-reference/statements/select/join.md index 7760284368f..08290a02de5 100644 --- a/docs/zh/sql-reference/statements/select/join.md +++ b/docs/zh/sql-reference/statements/select/join.md @@ -1,4 +1,5 @@ --- +slug: /zh/sql-reference/statements/select/join sidebar_label: JOIN --- diff --git a/docs/zh/sql-reference/statements/select/limit-by.md b/docs/zh/sql-reference/statements/select/limit-by.md index 35d9b3cc721..22052a4f814 100644 --- a/docs/zh/sql-reference/statements/select/limit-by.md +++ b/docs/zh/sql-reference/statements/select/limit-by.md @@ -1,4 +1,5 @@ --- +slug: /zh/sql-reference/statements/select/limit-by sidebar_label: LIMIT BY --- diff --git a/docs/zh/sql-reference/statements/select/limit.md b/docs/zh/sql-reference/statements/select/limit.md index 4b877663ffc..2bbf2949707 100644 --- a/docs/zh/sql-reference/statements/select/limit.md +++ b/docs/zh/sql-reference/statements/select/limit.md @@ -1,4 +1,5 @@ --- +slug: /zh/sql-reference/statements/select/limit sidebar_label: LIMIT --- diff --git a/docs/zh/sql-reference/statements/select/order-by.md b/docs/zh/sql-reference/statements/select/order-by.md index f94be2f9cbd..01f702a4b1e 100644 --- a/docs/zh/sql-reference/statements/select/order-by.md +++ b/docs/zh/sql-reference/statements/select/order-by.md @@ -1,4 +1,5 @@ --- +slug: /zh/sql-reference/statements/select/order-by sidebar_label: ORDER BY --- diff --git a/docs/zh/sql-reference/statements/select/prewhere.md b/docs/zh/sql-reference/statements/select/prewhere.md index a2eb3acd83f..1f74fc0e676 100644 --- a/docs/zh/sql-reference/statements/select/prewhere.md +++ b/docs/zh/sql-reference/statements/select/prewhere.md @@ -1,4 +1,5 @@ --- +slug: /zh/sql-reference/statements/select/prewhere sidebar_label: PREWHERE --- diff --git a/docs/zh/sql-reference/statements/select/sample.md b/docs/zh/sql-reference/statements/select/sample.md index 574d076c7f2..f701bd3b805 100644 --- a/docs/zh/sql-reference/statements/select/sample.md +++ b/docs/zh/sql-reference/statements/select/sample.md @@ -1,4 +1,5 @@ --- +slug: /zh/sql-reference/statements/select/sample sidebar_label: SAMPLE --- diff --git a/docs/zh/sql-reference/statements/select/union.md b/docs/zh/sql-reference/statements/select/union.md index 932f53c3cbf..3b9fc93795e 100644 --- a/docs/zh/sql-reference/statements/select/union.md +++ b/docs/zh/sql-reference/statements/select/union.md @@ -1,4 +1,5 @@ --- +slug: /zh/sql-reference/statements/select/union sidebar_label: UNION ALL --- diff --git a/docs/zh/sql-reference/statements/select/where.md b/docs/zh/sql-reference/statements/select/where.md index f2cdce243b4..fe165e8ad16 100644 --- a/docs/zh/sql-reference/statements/select/where.md +++ b/docs/zh/sql-reference/statements/select/where.md @@ -1,4 +1,5 @@ --- +slug: /zh/sql-reference/statements/select/where sidebar_label: WHERE --- diff --git a/docs/zh/sql-reference/statements/select/with.md b/docs/zh/sql-reference/statements/select/with.md index dae39233b61..110989212f9 100644 --- a/docs/zh/sql-reference/statements/select/with.md +++ b/docs/zh/sql-reference/statements/select/with.md @@ -1,4 +1,5 @@ --- +slug: /zh/sql-reference/statements/select/with sidebar_label: WITH --- diff --git a/docs/zh/sql-reference/statements/set.md b/docs/zh/sql-reference/statements/set.md index a614fdd9e44..c867da05419 100644 --- a/docs/zh/sql-reference/statements/set.md +++ b/docs/zh/sql-reference/statements/set.md @@ -1,4 +1,5 @@ --- +slug: /zh/sql-reference/statements/set sidebar_position: 50 sidebar_label: SET --- diff --git a/docs/zh/sql-reference/statements/show.md b/docs/zh/sql-reference/statements/show.md index d7a42c504ed..86484785126 100644 --- a/docs/zh/sql-reference/statements/show.md +++ b/docs/zh/sql-reference/statements/show.md @@ -1,4 +1,5 @@ --- +slug: /zh/sql-reference/statements/show sidebar_position: 38 sidebar_label: SHOW --- diff --git a/docs/zh/sql-reference/statements/system.md b/docs/zh/sql-reference/statements/system.md index 1d5d7f169bf..f310b70c24c 100644 --- a/docs/zh/sql-reference/statements/system.md +++ b/docs/zh/sql-reference/statements/system.md @@ -1,4 +1,5 @@ --- +slug: /zh/sql-reference/statements/system sidebar_position: 37 sidebar_label: SYSTEM --- diff --git a/docs/zh/sql-reference/statements/truncate.md b/docs/zh/sql-reference/statements/truncate.md index 9ec6eff5b61..1dbda0bfca8 100644 --- a/docs/zh/sql-reference/statements/truncate.md +++ b/docs/zh/sql-reference/statements/truncate.md @@ -1,4 +1,5 @@ --- +slug: /zh/sql-reference/statements/truncate sidebar_position: 52 sidebar_label: TRUNCATE --- diff --git a/docs/zh/sql-reference/statements/use.md b/docs/zh/sql-reference/statements/use.md index c8708109575..c37c6eaaca7 100644 --- a/docs/zh/sql-reference/statements/use.md +++ b/docs/zh/sql-reference/statements/use.md @@ -1,4 +1,5 @@ --- +slug: /zh/sql-reference/statements/use sidebar_position: 53 sidebar_label: USE --- diff --git a/docs/zh/sql-reference/statements/watch.md b/docs/zh/sql-reference/statements/watch.md index ad5da29fd20..f695cf6da72 100644 --- a/docs/zh/sql-reference/statements/watch.md +++ b/docs/zh/sql-reference/statements/watch.md @@ -1,4 +1,5 @@ --- +slug: /zh/sql-reference/statements/watch sidebar_position: 53 sidebar_label: WATCH --- diff --git a/docs/zh/sql-reference/syntax.md b/docs/zh/sql-reference/syntax.md index 478bea11e00..ab8f2699473 100644 --- a/docs/zh/sql-reference/syntax.md +++ b/docs/zh/sql-reference/syntax.md @@ -1,4 +1,5 @@ --- +slug: /zh/sql-reference/syntax sidebar_position: 31 sidebar_label: SQL语法 diff --git a/docs/zh/sql-reference/table-functions/file.md b/docs/zh/sql-reference/table-functions/file.md index b83d4509df9..b097c888518 100644 --- a/docs/zh/sql-reference/table-functions/file.md +++ b/docs/zh/sql-reference/table-functions/file.md @@ -1,4 +1,5 @@ --- +slug: /zh/sql-reference/table-functions/file sidebar_position: 37 sidebar_label: file --- diff --git a/docs/zh/sql-reference/table-functions/generate.md b/docs/zh/sql-reference/table-functions/generate.md index 7df197379ba..c5fad933669 100644 --- a/docs/zh/sql-reference/table-functions/generate.md +++ b/docs/zh/sql-reference/table-functions/generate.md @@ -1,4 +1,5 @@ --- +slug: /zh/sql-reference/table-functions/generate sidebar_position: 47 sidebar_label: generateRandom --- diff --git a/docs/zh/sql-reference/table-functions/hdfs.md b/docs/zh/sql-reference/table-functions/hdfs.md index c0a58a6410b..8ec869b1ed0 100644 --- a/docs/zh/sql-reference/table-functions/hdfs.md +++ b/docs/zh/sql-reference/table-functions/hdfs.md @@ -1,4 +1,5 @@ --- +slug: /zh/sql-reference/table-functions/hdfs sidebar_position: 45 sidebar_label: hdfs --- diff --git a/docs/zh/sql-reference/table-functions/index.md b/docs/zh/sql-reference/table-functions/index.md index 6ad65f7099a..b617aafe4ba 100644 --- a/docs/zh/sql-reference/table-functions/index.md +++ b/docs/zh/sql-reference/table-functions/index.md @@ -1,4 +1,5 @@ --- +slug: /zh/sql-reference/table-functions/ sidebar_label: 表函数 sidebar_position: 34 --- diff --git a/docs/zh/sql-reference/table-functions/input.md b/docs/zh/sql-reference/table-functions/input.md index cf8f8eeed7e..a1490e7bf9f 100644 --- a/docs/zh/sql-reference/table-functions/input.md +++ b/docs/zh/sql-reference/table-functions/input.md @@ -1,4 +1,5 @@ --- +slug: /zh/sql-reference/table-functions/input sidebar_position: 46 sidebar_label: input --- diff --git a/docs/zh/sql-reference/table-functions/jdbc.md b/docs/zh/sql-reference/table-functions/jdbc.md index 302f01e4c75..a468bd7b41e 100644 --- a/docs/zh/sql-reference/table-functions/jdbc.md +++ b/docs/zh/sql-reference/table-functions/jdbc.md @@ -1,4 +1,5 @@ --- +slug: /zh/sql-reference/table-functions/jdbc sidebar_position: 43 sidebar_label: jdbc --- diff --git a/docs/zh/sql-reference/table-functions/merge.md b/docs/zh/sql-reference/table-functions/merge.md index 316c5bc0f18..ada4e50d9f9 100644 --- a/docs/zh/sql-reference/table-functions/merge.md +++ b/docs/zh/sql-reference/table-functions/merge.md @@ -1,4 +1,5 @@ --- +slug: /zh/sql-reference/table-functions/merge sidebar_position: 38 sidebar_label: merge --- diff --git a/docs/zh/sql-reference/table-functions/mysql.md b/docs/zh/sql-reference/table-functions/mysql.md index f406a6d051e..c08369be61a 100644 --- a/docs/zh/sql-reference/table-functions/mysql.md +++ b/docs/zh/sql-reference/table-functions/mysql.md @@ -1,3 +1,6 @@ +--- +slug: /zh/sql-reference/table-functions/mysql +--- # mysql {#mysql} 允许对存储在远程MySQL服务器上的数据执行`SELECT`和`INSERT`查询。 diff --git a/docs/zh/sql-reference/table-functions/numbers.md b/docs/zh/sql-reference/table-functions/numbers.md index 26dd6f08163..10d2eeb05e9 100644 --- a/docs/zh/sql-reference/table-functions/numbers.md +++ b/docs/zh/sql-reference/table-functions/numbers.md @@ -1,4 +1,5 @@ --- +slug: /zh/sql-reference/table-functions/numbers sidebar_position: 39 sidebar_label: numbers --- diff --git a/docs/zh/sql-reference/table-functions/odbc.md b/docs/zh/sql-reference/table-functions/odbc.md index aa06d6c03a6..69b74de9c7f 100644 --- a/docs/zh/sql-reference/table-functions/odbc.md +++ b/docs/zh/sql-reference/table-functions/odbc.md @@ -1,4 +1,5 @@ --- +slug: /zh/sql-reference/table-functions/odbc sidebar_position: 44 sidebar_label: odbc --- diff --git a/docs/zh/sql-reference/table-functions/postgresql.md b/docs/zh/sql-reference/table-functions/postgresql.md index 6c9db9798c0..3e1864c741a 100644 --- a/docs/zh/sql-reference/table-functions/postgresql.md +++ b/docs/zh/sql-reference/table-functions/postgresql.md @@ -1,4 +1,5 @@ --- +slug: /zh/sql-reference/table-functions/postgresql sidebar_position: 42 sidebar_label: postgresql --- diff --git a/docs/zh/sql-reference/table-functions/remote.md b/docs/zh/sql-reference/table-functions/remote.md index 236ce1a7499..7aeb6c0664a 100644 --- a/docs/zh/sql-reference/table-functions/remote.md +++ b/docs/zh/sql-reference/table-functions/remote.md @@ -1,3 +1,6 @@ +--- +slug: /zh/sql-reference/table-functions/remote +--- # remote, remoteSecure {#remote-remotesecure} 允许您访问远程服务器,而无需创建 `Distributed` 表。`remoteSecure` - 与 `remote` 相同,但是会使用加密链接。 diff --git a/docs/zh/sql-reference/table-functions/s3.md b/docs/zh/sql-reference/table-functions/s3.md index 651dcfcb599..ebfa2fb2e22 100644 --- a/docs/zh/sql-reference/table-functions/s3.md +++ b/docs/zh/sql-reference/table-functions/s3.md @@ -1,4 +1,5 @@ --- +slug: /zh/sql-reference/table-functions/s3 sidebar_position: 45 sidebar_label: s3 --- diff --git a/docs/zh/sql-reference/table-functions/url.md b/docs/zh/sql-reference/table-functions/url.md index cebeea609a3..cd6a53664f9 100644 --- a/docs/zh/sql-reference/table-functions/url.md +++ b/docs/zh/sql-reference/table-functions/url.md @@ -1,4 +1,5 @@ --- +slug: /zh/sql-reference/table-functions/url sidebar_position: 41 sidebar_label: url --- diff --git a/docs/zh/whats-new/changelog/2017.md b/docs/zh/whats-new/changelog/2017.md index aec6aa86d35..762a7f107e3 100644 --- a/docs/zh/whats-new/changelog/2017.md +++ b/docs/zh/whats-new/changelog/2017.md @@ -1,3 +1,6 @@ +--- +slug: /zh/whats-new/changelog/2017 +--- ### ClickHouse 版本 1.1.54327, 2017-12-21 {#clickhouse-release-1-1-54327-2017-12-21} 此版本包含先前版本 1.1.54318 的错误修复: diff --git a/docs/zh/whats-new/changelog/2018.md b/docs/zh/whats-new/changelog/2018.md index c87df7966f4..8b0c305af3d 100644 --- a/docs/zh/whats-new/changelog/2018.md +++ b/docs/zh/whats-new/changelog/2018.md @@ -1,3 +1,6 @@ +--- +slug: /zh/whats-new/changelog/2018 +--- ## ClickHouse 版本 18.16 {#clickhouse-release-18-16} ### ClickHouse 版本 18.16.1, 2018-12-21 {#clickhouse-release-18-16-1-2018-12-21} diff --git a/docs/zh/whats-new/changelog/2019.md b/docs/zh/whats-new/changelog/2019.md index 5eeaf9226c4..72c908c9e0e 100644 --- a/docs/zh/whats-new/changelog/2019.md +++ b/docs/zh/whats-new/changelog/2019.md @@ -1,3 +1,6 @@ +--- +slug: /zh/whats-new/changelog/2019 +--- ## ClickHouse 版本 19.17 {#clickhouse-release-v19-17} ### ClickHouse 版本 19.17.6.36, 2019-12-27 {#clickhouse-release-v19-17-6-36-2019-12-27} diff --git a/docs/zh/whats-new/changelog/2020.md b/docs/zh/whats-new/changelog/2020.md index 3a0427f05d3..30531d74030 100644 --- a/docs/zh/whats-new/changelog/2020.md +++ b/docs/zh/whats-new/changelog/2020.md @@ -1,3 +1,6 @@ +--- +slug: /zh/whats-new/changelog/2020 +--- ### ClickHouse 版本 20.12 ### ClickHouse 版本 v20.12.5.14-stable, 2020-12-28 diff --git a/docs/zh/whats-new/changelog/index.md b/docs/zh/whats-new/changelog/index.md index 95bfcb71fbc..8fd19a0f1d8 100644 --- a/docs/zh/whats-new/changelog/index.md +++ b/docs/zh/whats-new/changelog/index.md @@ -1,4 +1,5 @@ --- +slug: /zh/whats-new/changelog/ sidebar_position: 90 sidebar_label: Changelog --- diff --git a/docs/zh/whats-new/index.md b/docs/zh/whats-new/index.md index 1e3d33d6407..9ef04b1b8c0 100644 --- a/docs/zh/whats-new/index.md +++ b/docs/zh/whats-new/index.md @@ -1,4 +1,5 @@ --- +slug: /zh/whats-new/ sidebar_label: ClickHouse事迹 sidebar_position: 82 --- diff --git a/docs/zh/whats-new/roadmap.md b/docs/zh/whats-new/roadmap.md index 55a696f9dab..cd2940d78eb 100644 --- a/docs/zh/whats-new/roadmap.md +++ b/docs/zh/whats-new/roadmap.md @@ -1,4 +1,5 @@ --- +slug: /zh/whats-new/roadmap sidebar_position: 74 sidebar_label: Roadmap --- diff --git a/docs/zh/whats-new/security-changelog.md b/docs/zh/whats-new/security-changelog.md index e65112a823a..a4e82241cb1 100644 --- a/docs/zh/whats-new/security-changelog.md +++ b/docs/zh/whats-new/security-changelog.md @@ -1,4 +1,5 @@ --- +slug: /zh/whats-new/security-changelog sidebar_position: 76 sidebar_label: 安全更新日志 --- diff --git a/packages/build b/packages/build index 53a7538f80e..c5ebf8641a3 100755 --- a/packages/build +++ b/packages/build @@ -95,7 +95,7 @@ done EOF chmod +x "$PKG_PATH/install/doinst.sh" if [ -f "$PKG_PATH/DEBIAN/postinst" ]; then - tail +2 "$PKG_PATH/DEBIAN/postinst" > "$PKG_PATH/install/doinst.sh" + tail +2 "$PKG_PATH/DEBIAN/postinst" >> "$PKG_PATH/install/doinst.sh" fi rm -rf "$PKG_PATH/DEBIAN" if [ -f "/usr/bin/pigz" ]; then diff --git a/programs/CMakeLists.txt b/programs/CMakeLists.txt index d10a3e9ca14..3c0c0781de6 100644 --- a/programs/CMakeLists.txt +++ b/programs/CMakeLists.txt @@ -18,11 +18,10 @@ option (ENABLE_CLICKHOUSE_SERVER "Server mode (main mode)" ${ENABLE_CLICKHOUSE_A option (ENABLE_CLICKHOUSE_CLIENT "Client mode (interactive tui/shell that connects to the server)" ${ENABLE_CLICKHOUSE_ALL}) -if (SPLIT_SHARED_LIBRARIES) - # Don't create self-extracting clickhouse for split build - option (ENABLE_CLICKHOUSE_SELF_EXTRACTING "Self-extracting executable" OFF) -else () - option (ENABLE_CLICKHOUSE_SELF_EXTRACTING "Self-extracting executable" ON) +# Don't create self-extracting clickhouse for split build +if (ENABLE_CLICKHOUSE_SELF_EXTRACTING AND SPLIT_SHARED_LIBRARIES) + message (STATUS "Self-extracting on split build is not supported") + unset (ENABLE_CLICKHOUSE_SELF_EXTRACTING CACHE) endif () # https://clickhouse.com/docs/en/operations/utilities/clickhouse-local/ diff --git a/programs/bash-completion/completions/clickhouse-bootstrap b/programs/bash-completion/completions/clickhouse-bootstrap index 8684f122503..45a7502919d 100644 --- a/programs/bash-completion/completions/clickhouse-bootstrap +++ b/programs/bash-completion/completions/clickhouse-bootstrap @@ -40,67 +40,98 @@ CLICKHOUSE_QueryKind=( no_query ) +# SELECT name FROM system.formats ORDER BY name FORMAT LineAsString CLICKHOUSE_Format=( - CapnProto - PostgreSQLWire - MySQLWire - JSONStringsEachRowWithProgress - JSONEachRowWithProgress - JSONCompact - JSON - CSV - Vertical - ODBCDriver2 - PrettySpaceNoEscapes - Pretty - JSONCompactStrings - PrettyNoEscapes - ArrowStream - TabSeparatedWithNames - Parquet Arrow - PrettyCompact - AvroConfluent - ORC - PrettyCompactNoEscapes - RawBLOB - Template - MsgPack - JSONCompactEachRow - CustomSeparated - TemplateIgnoreSpaces - Markdown - XML - ProtobufSingle - JSONCompactStringsEachRowWithNamesAndTypes - TSKV - TabSeparated - JSONStringEachRow - JSONStringsEachRow - TSVRaw - Values - TabSeparatedWithNamesAndTypes - PrettyCompactMonoBlock - TSVWithNamesAndTypes + ArrowStream Avro - RowBinaryWithNamesAndTypes - LineAsString - Native - JSONCompactEachRowWithNamesAndTypes - PrettySpace - Regexp - TSV - JSONEachRow - CustomSeparatedIgnoreSpaces + AvroConfluent + CSV CSVWithNames - JSONStrings - Null - TabSeparatedRaw - TSVWithNames - Protobuf - RowBinary + CSVWithNamesAndTypes + CapnProto + CustomSeparated + CustomSeparatedIgnoreSpaces + CustomSeparatedIgnoreSpacesWithNames + CustomSeparatedIgnoreSpacesWithNamesAndTypes + CustomSeparatedWithNames + CustomSeparatedWithNamesAndTypes + HiveText + JSON + JSONAsObject JSONAsString + JSONColumns + JSONColumnsWithMetadata + JSONCompact + JSONCompactColumns + JSONCompactEachRow + JSONCompactEachRowWithNames + JSONCompactEachRowWithNamesAndTypes + JSONCompactStrings JSONCompactStringsEachRow + JSONCompactStringsEachRowWithNames + JSONCompactStringsEachRowWithNamesAndTypes + JSONEachRow + JSONEachRowWithProgress + JSONLines + JSONStringEachRow + JSONStrings + JSONStringsEachRow + JSONStringsEachRowWithProgress + LineAsString + LineAsStringWithNames + LineAsStringWithNamesAndTypes + Markdown + MsgPack + MySQLDump + MySQLWire + NDJSON + Native + Null + ODBCDriver2 + ORC + Parquet + PostgreSQLWire + Pretty + PrettyCompact + PrettyCompactMonoBlock + PrettyCompactNoEscapes + PrettyCompactNoEscapesMonoBlock + PrettyMonoBlock + PrettyNoEscapes + PrettyNoEscapesMonoBlock + PrettySpace + PrettySpaceMonoBlock + PrettySpaceNoEscapes + PrettySpaceNoEscapesMonoBlock + Prometheus + Protobuf + ProtobufList + ProtobufSingle + RawBLOB + Regexp + RowBinary + RowBinaryWithNames + RowBinaryWithNamesAndTypes + SQLInsert + TSKV + TSV + TSVRaw + TSVRawWithNames + TSVRawWithNamesAndTypes + TSVWithNames + TSVWithNamesAndTypes + TabSeparated + TabSeparatedRaw + TabSeparatedRawWithNames + TabSeparatedRawWithNamesAndTypes + TabSeparatedWithNames + TabSeparatedWithNamesAndTypes + Template + TemplateIgnoreSpaces + Values + Vertical + XML ) function _clickhouse_bin_exist() diff --git a/programs/benchmark/Benchmark.cpp b/programs/benchmark/Benchmark.cpp index fcfce7bc2a3..4d55a67f0f8 100644 --- a/programs/benchmark/Benchmark.cpp +++ b/programs/benchmark/Benchmark.cpp @@ -120,7 +120,7 @@ public: void initialize(Poco::Util::Application & self [[maybe_unused]]) override { std::string home_path; - const char * home_path_cstr = getenv("HOME"); + const char * home_path_cstr = getenv("HOME"); // NOLINT(concurrency-mt-unsafe) if (home_path_cstr) home_path = home_path_cstr; @@ -613,15 +613,15 @@ int mainEntryClickHouseBenchmark(int argc, char ** argv) std::optional env_password_str; std::optional env_quota_key_str; - const char * env_user = getenv("CLICKHOUSE_USER"); + const char * env_user = getenv("CLICKHOUSE_USER"); // NOLINT(concurrency-mt-unsafe) if (env_user != nullptr) env_user_str.emplace(std::string(env_user)); - const char * env_password = getenv("CLICKHOUSE_PASSWORD"); + const char * env_password = getenv("CLICKHOUSE_PASSWORD"); // NOLINT(concurrency-mt-unsafe) if (env_password != nullptr) env_password_str.emplace(std::string(env_password)); - const char * env_quota_key = getenv("CLICKHOUSE_QUOTA_KEY"); + const char * env_quota_key = getenv("CLICKHOUSE_QUOTA_KEY"); // NOLINT(concurrency-mt-unsafe) if (env_quota_key != nullptr) env_quota_key_str.emplace(std::string(env_quota_key)); diff --git a/programs/client/Client.cpp b/programs/client/Client.cpp index 9b1dbbe221a..e27845de184 100644 --- a/programs/client/Client.cpp +++ b/programs/client/Client.cpp @@ -183,7 +183,7 @@ void Client::initialize(Poco::Util::Application & self) { Poco::Util::Application::initialize(self); - const char * home_path_cstr = getenv("HOME"); + const char * home_path_cstr = getenv("HOME"); // NOLINT(concurrency-mt-unsafe) if (home_path_cstr) home_path = home_path_cstr; @@ -202,11 +202,11 @@ void Client::initialize(Poco::Util::Application & self) * may be statically allocated, and can be modified by a subsequent call to getenv(), putenv(3), setenv(3), or unsetenv(3). */ - const char * env_user = getenv("CLICKHOUSE_USER"); + const char * env_user = getenv("CLICKHOUSE_USER"); // NOLINT(concurrency-mt-unsafe) if (env_user) config().setString("user", env_user); - const char * env_password = getenv("CLICKHOUSE_PASSWORD"); + const char * env_password = getenv("CLICKHOUSE_PASSWORD"); // NOLINT(concurrency-mt-unsafe) if (env_password) config().setString("password", env_password); @@ -620,7 +620,7 @@ bool Client::processWithFuzzing(const String & full_query) stderr, "Found error: IAST::clone() is broken for some AST node. This is a bug. The original AST ('dump before fuzz') and its cloned copy ('dump of cloned AST') refer to the same nodes, which must never happen. This means that their parent node doesn't implement clone() correctly."); - exit(1); + _exit(1); } auto fuzzed_text = ast_to_process->formatForErrorMessage(); @@ -770,7 +770,7 @@ bool Client::processWithFuzzing(const String & full_query) fmt::print(stderr, "Text-3 (AST-3 formatted):\n'{}'\n", text_3); fmt::print(stderr, "Text-3 must be equal to Text-2, but it is not.\n"); - exit(1); + _exit(1); } } } @@ -909,7 +909,7 @@ void Client::processOptions(const OptionsDescription & options_description, auto exit_code = e.code() % 256; if (exit_code == 0) exit_code = 255; - exit(exit_code); + _exit(exit_code); } } diff --git a/programs/disks/DisksApp.cpp b/programs/disks/DisksApp.cpp index 58a18f6ad2e..08768386808 100644 --- a/programs/disks/DisksApp.cpp +++ b/programs/disks/DisksApp.cpp @@ -110,7 +110,7 @@ void DisksApp::init(std::vector & common_arguments) if (options.count("help")) { printHelpMessage(options_description); - exit(0); + exit(0); // NOLINT(concurrency-mt-unsafe) } if (!supported_commands.contains(command_name)) diff --git a/programs/install/Install.cpp b/programs/install/Install.cpp index 45c7c9a912e..297e2d24c07 100644 --- a/programs/install/Install.cpp +++ b/programs/install/Install.cpp @@ -708,7 +708,7 @@ int mainEntryClickHouseInstall(int argc, char ** argv) /// dpkg or apt installers can ask for non-interactive work explicitly. - const char * debian_frontend_var = getenv("DEBIAN_FRONTEND"); + const char * debian_frontend_var = getenv("DEBIAN_FRONTEND"); // NOLINT(concurrency-mt-unsafe) bool noninteractive = debian_frontend_var && debian_frontend_var == std::string_view("noninteractive"); bool is_interactive = !noninteractive && stdin_is_a_tty && stdout_is_a_tty; diff --git a/programs/local/CMakeLists.txt b/programs/local/CMakeLists.txt index da466f725b3..ad4406156c4 100644 --- a/programs/local/CMakeLists.txt +++ b/programs/local/CMakeLists.txt @@ -18,6 +18,9 @@ if(NOT CLICKHOUSE_ONE_SHARED) target_link_libraries(clickhouse-local-lib PRIVATE clickhouse-server-lib) endif() +# Always use internal readpassphrase +target_link_libraries(clickhouse-local-lib PRIVATE readpassphrase) + if (ENABLE_FUZZING) add_compile_definitions(FUZZING_MODE=1) set (WITH_COVERAGE ON) diff --git a/programs/main.cpp b/programs/main.cpp index 27e17cfd2e0..fef0ad688e2 100644 --- a/programs/main.cpp +++ b/programs/main.cpp @@ -366,10 +366,10 @@ void checkHarmfulEnvironmentVariables(char ** argv) bool require_reexec = false; for (const auto * var : harmful_env_variables) { - if (const char * value = getenv(var); value && value[0]) + if (const char * value = getenv(var); value && value[0]) // NOLINT(concurrency-mt-unsafe) { /// NOTE: setenv() is used over unsetenv() since unsetenv() marked as harmful - if (setenv(var, "", true)) + if (setenv(var, "", true)) // NOLINT(concurrency-mt-unsafe) // this is safe if not called concurrently { fmt::print(stderr, "Cannot override {} environment variable", var); _exit(1); diff --git a/programs/server/CMakeLists.txt b/programs/server/CMakeLists.txt index 643fd2f0ec4..2cfa748d585 100644 --- a/programs/server/CMakeLists.txt +++ b/programs/server/CMakeLists.txt @@ -34,6 +34,6 @@ install(FILES config.xml users.xml DESTINATION "${CLICKHOUSE_ETC_DIR}/clickhouse clickhouse_embed_binaries( TARGET clickhouse_server_configs - RESOURCES config.xml users.xml embedded.xml play.html + RESOURCES config.xml users.xml embedded.xml play.html dashboard.html js/uplot.js ) add_dependencies(clickhouse-server-lib clickhouse_server_configs) diff --git a/programs/server/Server.cpp b/programs/server/Server.cpp index 39d7b1d0e5b..b621a897035 100644 --- a/programs/server/Server.cpp +++ b/programs/server/Server.cpp @@ -4,18 +4,14 @@ #include #include #include -#include -#include #include #include #include -#include #include #include #include #include #include -#include #include #include #include @@ -45,7 +41,6 @@ #include #include #include -#include #include #include #include @@ -84,7 +79,6 @@ #include #include #include -#include #include #include #include @@ -170,7 +164,7 @@ int mainEntryClickHouseServer(int argc, char ** argv) /// Can be overridden by environment variable (cannot use server config at this moment). if (argc > 0) { - const char * env_watchdog = getenv("CLICKHOUSE_WATCHDOG_ENABLE"); + const char * env_watchdog = getenv("CLICKHOUSE_WATCHDOG_ENABLE"); // NOLINT(concurrency-mt-unsafe) if (env_watchdog) { if (0 == strcmp(env_watchdog, "1")) @@ -268,7 +262,6 @@ namespace ErrorCodes extern const int ARGUMENT_OUT_OF_BOUND; extern const int EXCESSIVE_ELEMENT_IN_CONFIG; extern const int INVALID_CONFIG_PARAMETER; - extern const int SYSTEM_ERROR; extern const int FAILED_TO_GETPWUID; extern const int MISMATCHING_USERS_FOR_PROCESS_AND_DATA; extern const int NETWORK_ERROR; @@ -658,6 +651,24 @@ int Server::main(const std::vector & /*args*/) StackTrace::setShowAddresses(config().getBool("show_addresses_in_stack_traces", true)); +#if USE_HDFS + /// This will point libhdfs3 to the right location for its config. + /// Note: this has to be done once at server initialization, because 'setenv' is not thread-safe. + + String libhdfs3_conf = config().getString("hdfs.libhdfs3_conf", ""); + if (!libhdfs3_conf.empty()) + { + if (std::filesystem::path{libhdfs3_conf}.is_relative() && !std::filesystem::exists(libhdfs3_conf)) + { + const String config_path = config().getString("config-file", "config.xml"); + const auto config_dir = std::filesystem::path{config_path}.remove_filename(); + if (std::filesystem::exists(config_dir / libhdfs3_conf)) + libhdfs3_conf = std::filesystem::absolute(config_dir / libhdfs3_conf); + } + setenv("LIBHDFS3_CONF", libhdfs3_conf.c_str(), true /* overwrite */); // NOLINT + } +#endif + registerFunctions(); registerAggregateFunctions(); registerTableFunctions(); @@ -698,8 +709,7 @@ int Server::main(const std::vector & /*args*/) GlobalThreadPool::initialize( config().getUInt("max_thread_pool_size", 10000), config().getUInt("max_thread_pool_free_size", 1000), - config().getUInt("thread_pool_queue_size", 10000) - ); + config().getUInt("thread_pool_queue_size", 10000)); IOThreadPool::initialize( config().getUInt("max_io_thread_pool_size", 100), @@ -840,7 +850,7 @@ int Server::main(const std::vector & /*args*/) LOG_TRACE(log, "Will do mlock to prevent executable memory from being paged out. It may take a few seconds."); if (0 != mlock(addr, len)) - LOG_WARNING(log, "Failed mlock: {}", errnoToString(ErrorCodes::SYSTEM_ERROR)); + LOG_WARNING(log, "Failed mlock: {}", errnoToString()); else LOG_TRACE(log, "The memory map of clickhouse executable has been mlock'ed, total {}", ReadableSize(len)); } @@ -908,7 +918,7 @@ int Server::main(const std::vector & /*args*/) rlim.rlim_cur = config().getUInt("max_open_files", rlim.rlim_max); int rc = setrlimit(RLIMIT_NOFILE, &rlim); if (rc != 0) - LOG_WARNING(log, "Cannot set max number of file descriptors to {}. Try to specify max_open_files according to your system limits. error: {}", rlim.rlim_cur, strerror(errno)); + LOG_WARNING(log, "Cannot set max number of file descriptors to {}. Try to specify max_open_files according to your system limits. error: {}", rlim.rlim_cur, errnoToString()); else LOG_DEBUG(log, "Set max number of file descriptors to {} (was {}).", rlim.rlim_cur, old); } @@ -931,7 +941,7 @@ int Server::main(const std::vector & /*args*/) int rc = setrlimit(RLIMIT_NPROC, &rlim); if (rc != 0) { - LOG_WARNING(log, "Cannot set max number of threads to {}. error: {}", rlim.rlim_cur, strerror(errno)); + LOG_WARNING(log, "Cannot set max number of threads to {}. error: {}", rlim.rlim_cur, errnoToString()); rlim.rlim_cur = old; } else @@ -1156,22 +1166,20 @@ int Server::main(const std::vector & /*args*/) if (config->has("max_partition_size_to_drop")) global_context->setMaxPartitionSizeToDrop(config->getUInt64("max_partition_size_to_drop")); - if (config->has("concurrent_threads_soft_limit")) + ConcurrencyControl::SlotCount concurrent_threads_soft_limit = ConcurrencyControl::Unlimited; + if (config->has("concurrent_threads_soft_limit_num")) { - auto concurrent_threads_soft_limit = config->getInt("concurrent_threads_soft_limit", 0); - if (concurrent_threads_soft_limit == -1) - { - // Based on tests concurrent_threads_soft_limit has an optimal value when it's about 3 times of logical CPU cores - constexpr size_t thread_factor = 3; - concurrent_threads_soft_limit = std::thread::hardware_concurrency() * thread_factor; - } - if (concurrent_threads_soft_limit) - ConcurrencyControl::instance().setMaxConcurrency(concurrent_threads_soft_limit); - else - ConcurrencyControl::instance().setMaxConcurrency(ConcurrencyControl::Unlimited); + auto value = config->getUInt64("concurrent_threads_soft_limit_num", 0); + if (value > 0 && value < concurrent_threads_soft_limit) + concurrent_threads_soft_limit = value; } - else - ConcurrencyControl::instance().setMaxConcurrency(ConcurrencyControl::Unlimited); + if (config->has("concurrent_threads_soft_limit_ratio_to_cores")) + { + auto value = config->getUInt64("concurrent_threads_soft_limit_ratio_to_cores", 0) * std::thread::hardware_concurrency(); + if (value > 0 && value < concurrent_threads_soft_limit) + concurrent_threads_soft_limit = value; + } + ConcurrencyControl::instance().setMaxConcurrency(concurrent_threads_soft_limit); if (config->has("max_concurrent_queries")) global_context->getProcessList().setMaxSize(config->getInt("max_concurrent_queries", 0)); diff --git a/programs/server/config.d/ext-en.txt b/programs/server/config.d/ext-en.txt new file mode 120000 index 00000000000..6bc78ab238a --- /dev/null +++ b/programs/server/config.d/ext-en.txt @@ -0,0 +1 @@ +../../../tests/config/ext-en.txt \ No newline at end of file diff --git a/programs/server/config.d/ext-ru.txt b/programs/server/config.d/ext-ru.txt new file mode 120000 index 00000000000..63ea415a66e --- /dev/null +++ b/programs/server/config.d/ext-ru.txt @@ -0,0 +1 @@ +../../../tests/config/ext-ru.txt \ No newline at end of file diff --git a/programs/server/config.d/lem-en.bin b/programs/server/config.d/lem-en.bin new file mode 120000 index 00000000000..d2c960cf013 --- /dev/null +++ b/programs/server/config.d/lem-en.bin @@ -0,0 +1 @@ +../../../tests/config/lem-en.bin \ No newline at end of file diff --git a/tests/integration/test_nlp/configs/dicts_config.xml b/programs/server/config.d/nlp.xml similarity index 65% rename from tests/integration/test_nlp/configs/dicts_config.xml rename to programs/server/config.d/nlp.xml index 8c05ea67e49..17b11741fbd 100644 --- a/tests/integration/test_nlp/configs/dicts_config.xml +++ b/programs/server/config.d/nlp.xml @@ -4,19 +4,19 @@ en plain - /etc/clickhouse-server/dictionaries/ext-en.txt + config.d/ext-en.txt ru plain - /etc/clickhouse-server/dictionaries/ext-ru.txt + config.d/ext-ru.txt en - /etc/clickhouse-server/dictionaries/lem-en.bin + config.d/lem-en.bin diff --git a/programs/server/config.xml b/programs/server/config.xml index ad82b6b9c6f..2ce3fe7754f 100644 --- a/programs/server/config.xml +++ b/programs/server/config.xml @@ -281,12 +281,12 @@
]]>
--> - - 0 + 0 + 0 100 diff --git a/programs/server/dashboard.html b/programs/server/dashboard.html new file mode 100644 index 00000000000..e63a277497a --- /dev/null +++ b/programs/server/dashboard.html @@ -0,0 +1,905 @@ + + + + + ClickHouse Dashboard + + + + + +
+
+
+ + + +
+
+ + 🌚🌞 +
+
+
+
+
+ + + diff --git a/programs/server/js/uplot.js b/programs/server/js/uplot.js new file mode 100644 index 00000000000..657479be7e1 --- /dev/null +++ b/programs/server/js/uplot.js @@ -0,0 +1,2 @@ +/*! https://github.com/leeoniya/uPlot (v1.6.21) */ +var uPlot=function(){"use strict";const e="u-off",t="u-label",l="width",n="height",i="top",o="bottom",s="left",r="right",u="#000",a="mousemove",f="mousedown",c="mouseup",h="mouseenter",d="mouseleave",p="dblclick",m="change",g="dppxchange",x="undefined"!=typeof window,w=x?document:null,_=x?window:null,k=x?navigator:null;let b,v;function y(e,t){if(null!=t){let l=e.classList;!l.contains(t)&&l.add(t)}}function M(e,t){let l=e.classList;l.contains(t)&&l.remove(t)}function S(e,t,l){e.style[t]=l+"px"}function E(e,t,l,n){let i=w.createElement(e);return null!=t&&y(i,t),null!=l&&l.insertBefore(i,n),i}function D(e,t){return E("div",e,t)}const z=new WeakMap;function T(t,l,n,i,o){let s="translate("+l+"px,"+n+"px)";s!=z.get(t)&&(t.style.transform=s,z.set(t,s),0>l||0>n||l>i||n>o?y(t,e):M(t,e))}const P=new WeakMap;function A(e,t,l){let n=t+l;n!=P.get(e)&&(P.set(e,n),e.style.background=t,e.style.borderColor=l)}const W=new WeakMap;function Y(e,t,l,n){let i=t+""+l;i!=W.get(e)&&(W.set(e,i),e.style.height=l+"px",e.style.width=t+"px",e.style.marginLeft=n?-t/2+"px":0,e.style.marginTop=n?-l/2+"px":0)}const C={passive:!0},F={...C,capture:!0};function H(e,t,l,n){t.addEventListener(e,l,n?F:C)}function R(e,t,l,n){t.removeEventListener(e,l,n?F:C)}function G(e,t,l,n){let i;l=l||0;let o=2147483647>=(n=n||t.length-1);for(;n-l>1;)i=o?l+n>>1:te((l+n)/2),e>t[i]?l=i:n=i;return e-t[l]>t[n]-e?n:l}function L(e,t,l,n){for(let i=1==n?t:l;i>=t&&l>=i;i+=n)if(null!=e[i])return i;return-1}x&&function e(){let t=devicePixelRatio;b!=t&&(b=t,v&&R(m,v,e),v=matchMedia(`(min-resolution: ${b-.001}dppx) and (max-resolution: ${b+.001}dppx)`),H(m,v,e),_.dispatchEvent(new CustomEvent(g)))}();const I=[0,0];function O(e,t,l,n){return I[0]=0>l?ye(e,-l):e,I[1]=0>n?ye(t,-n):t,I}function N(e,t,l,n){let i,o,s,r=re(e),u=10==l?ue:ae;return e==t&&(-1==r?(e*=l,t/=l):(e/=l,t*=l)),n?(i=te(u(e)),o=ne(u(t)),s=O(se(l,i),se(l,o),i,o),e=s[0],t=s[1]):(i=te(u(ee(e))),o=te(u(ee(t))),s=O(se(l,i),se(l,o),i,o),e=ve(e,s[0]),t=be(t,s[1])),[e,t]}function j(e,t,l,n){let i=N(e,t,l,n);return 0==e&&(i[0]=0),0==t&&(i[1]=0),i}const B={mode:3,pad:.1},U={pad:0,soft:null,mode:0},V={min:U,max:U};function J(e,t,l,n){return We(l)?K(e,t,l):(U.pad=l,U.soft=n?0:null,U.mode=n?3:0,K(e,t,V))}function q(e,t){return null==e?t:e}function K(e,t,l){let n=l.min,i=l.max,o=q(n.pad,0),s=q(i.pad,0),r=q(n.hard,-ce),u=q(i.hard,ce),a=q(n.soft,ce),f=q(i.soft,-ce),c=q(n.mode,0),h=q(i.mode,0),d=t-e;1e-9>d&&(d=0,0!=e&&0!=t||(d=1e-9,2==c&&a!=ce&&(o=0),2==h&&f!=-ce&&(s=0)));let p=d||ee(t)||1e3,m=ue(p),g=se(10,te(m)),x=ye(ve(e-p*(0==d?0==e?.1:1:o),g/10),9),w=a>e||1!=c&&(3!=c||x>a)&&(2!=c||a>x)?ce:a,_=oe(r,w>x&&e>=w?w:ie(w,x)),k=ye(be(t+p*(0==d?0==t?.1:1:s),g/10),9),b=t>f||1!=h&&(3!=h||f>k)&&(2!=h||k>f)?-ce:f,v=ie(u,k>b&&b>=t?b:oe(b,k));return _==v&&0==_&&(v=100),[_,v]}const Z=new Intl.NumberFormat(x?k.language:"en-US"),$=e=>Z.format(e),X=Math,Q=X.PI,ee=X.abs,te=X.floor,le=X.round,ne=X.ceil,ie=X.min,oe=X.max,se=X.pow,re=X.sign,ue=X.log10,ae=X.log2,fe=(e,t=1)=>X.asinh(e/t),ce=1/0;function he(e){return 1+(0|ue((e^e>>31)-(e>>31)))}function de(e,t){return le(e/t)*t}function pe(e,t,l){return ie(oe(e,t),l)}function me(e){return"function"==typeof e?e:()=>e}const ge=e=>e,xe=(e,t)=>t,we=()=>null,_e=()=>!0,ke=(e,t)=>e==t;function be(e,t){return ne(e/t)*t}function ve(e,t){return te(e/t)*t}function ye(e,t){return le(e*(t=10**t))/t}const Me=new Map;function Se(e){return((""+e).split(".")[1]||"").length}function Ee(e,t,l,n){let i=[],o=n.map(Se);for(let s=t;l>s;s++){let t=ee(s),l=ye(se(e,s),t);for(let e=0;n.length>e;e++){let r=n[e]*l,u=(0>r||0>s?t:0)+(o[e]>s?o[e]:0),a=ye(r,u);i.push(a),Me.set(a,u)}}return i}const De={},ze=[],Te=[null,null],Pe=Array.isArray;function Ae(e){return"string"==typeof e}function We(e){let t=!1;if(null!=e){let l=e.constructor;t=null==l||l==Object}return t}function Ye(e){return null!=e&&"object"==typeof e}function Ce(e,t=We){let l;if(Pe(e)){let n=e.find((e=>null!=e));if(Pe(n)||t(n)){l=Array(e.length);for(let n=0;e.length>n;n++)l[n]=Ce(e[n],t)}else l=e.slice()}else if(t(e)){l={};for(let n in e)l[n]=Ce(e[n],t)}else l=e;return l}function Fe(e){let t=arguments;for(let l=1;t.length>l;l++){let n=t[l];for(let t in n)We(e[t])?Fe(e[t],Ce(n[t])):e[t]=Ce(n[t])}return e}function He(e,t,l){for(let n,i=0,o=-1;t.length>i;i++){let s=t[i];if(s>o){for(n=s-1;n>=0&&null==e[n];)e[n--]=null;for(n=s+1;l>n&&null==e[n];)e[o=n++]=null}}}const Re="undefined"==typeof queueMicrotask?e=>Promise.resolve().then(e):queueMicrotask,Ge=["January","February","March","April","May","June","July","August","September","October","November","December"],Le=["Sunday","Monday","Tuesday","Wednesday","Thursday","Friday","Saturday"];function Ie(e){return e.slice(0,3)}const Oe=Le.map(Ie),Ne=Ge.map(Ie),je={MMMM:Ge,MMM:Ne,WWWW:Le,WWW:Oe};function Be(e){return(10>e?"0":"")+e}const Ue={YYYY:e=>e.getFullYear(),YY:e=>(e.getFullYear()+"").slice(2),MMMM:(e,t)=>t.MMMM[e.getMonth()],MMM:(e,t)=>t.MMM[e.getMonth()],MM:e=>Be(e.getMonth()+1),M:e=>e.getMonth()+1,DD:e=>Be(e.getDate()),D:e=>e.getDate(),WWWW:(e,t)=>t.WWWW[e.getDay()],WWW:(e,t)=>t.WWW[e.getDay()],HH:e=>Be(e.getHours()),H:e=>e.getHours(),h:e=>{let t=e.getHours();return 0==t?12:t>12?t-12:t},AA:e=>12>e.getHours()?"AM":"PM",aa:e=>12>e.getHours()?"am":"pm",a:e=>12>e.getHours()?"a":"p",mm:e=>Be(e.getMinutes()),m:e=>e.getMinutes(),ss:e=>Be(e.getSeconds()),s:e=>e.getSeconds(),fff:e=>function(e){return(10>e?"00":100>e?"0":"")+e}(e.getMilliseconds())};function Ve(e,t){t=t||je;let l,n=[],i=/\{([a-z]+)\}|[^{]+/gi;for(;l=i.exec(e);)n.push("{"==l[0][0]?Ue[l[1]]:l[0]);return e=>{let l="";for(let i=0;n.length>i;i++)l+="string"==typeof n[i]?n[i]:n[i](e,t);return l}}const Je=(new Intl.DateTimeFormat).resolvedOptions().timeZone,qe=e=>e%1==0,Ke=[1,2,2.5,5],Ze=Ee(10,-16,0,Ke),$e=Ee(10,0,16,Ke),Xe=$e.filter(qe),Qe=Ze.concat($e),et="{YYYY}",tt="\n"+et,lt="{M}/{D}",nt="\n"+lt,it=nt+"/{YY}",ot="{aa}",st="{h}:{mm}"+ot,rt="\n"+st,ut=":{ss}",at=null;function ft(e){let t=1e3*e,l=60*t,n=60*l,i=24*n,o=30*i,s=365*i;return[(1==e?Ee(10,0,3,Ke).filter(qe):Ee(10,-3,0,Ke)).concat([t,5*t,10*t,15*t,30*t,l,5*l,10*l,15*l,30*l,n,2*n,3*n,4*n,6*n,8*n,12*n,i,2*i,3*i,4*i,5*i,6*i,7*i,8*i,9*i,10*i,15*i,o,2*o,3*o,4*o,6*o,s,2*s,5*s,10*s,25*s,50*s,100*s]),[[s,et,at,at,at,at,at,at,1],[28*i,"{MMM}",tt,at,at,at,at,at,1],[i,lt,tt,at,at,at,at,at,1],[n,"{h}"+ot,it,at,nt,at,at,at,1],[l,st,it,at,nt,at,at,at,1],[t,ut,it+" "+st,at,nt+" "+st,at,rt,at,1],[e,ut+".{fff}",it+" "+st,at,nt+" "+st,at,rt,at,1]],function(t){return(r,u,a,f,c,h)=>{let d=[],p=c>=s,m=c>=o&&s>c,g=t(a),x=ye(g*e,3),w=_t(g.getFullYear(),p?0:g.getMonth(),m||p?1:g.getDate()),_=ye(w*e,3);if(m||p){let l=m?c/o:0,n=p?c/s:0,i=x==_?x:ye(_t(w.getFullYear()+n,w.getMonth()+l,1)*e,3),r=new Date(le(i/e)),u=r.getFullYear(),a=r.getMonth();for(let o=0;f>=i;o++){let s=_t(u+n*o,a+l*o,1),r=s-t(ye(s*e,3));i=ye((+s+r)*e,3),i>f||d.push(i)}}else{let o=i>c?c:i,s=_+(te(a)-te(x))+be(x-_,o);d.push(s);let p=t(s),m=p.getHours()+p.getMinutes()/l+p.getSeconds()/n,g=c/n,w=h/r.axes[u]._space;for(;s=ye(s+c,1==e?0:3),f>=s;)if(g>1){let e=te(ye(m+g,6))%24,l=t(s).getHours()-e;l>1&&(l=-1),s-=l*n,m=(m+g)%24,.7>ye((s-d[d.length-1])/c,3)*w||d.push(s)}else d.push(s)}return d}}]}const[ct,ht,dt]=ft(1),[pt,mt,gt]=ft(.001);function xt(e,t){return e.map((e=>e.map(((l,n)=>0==n||8==n||null==l?l:t(1==n||0==e[8]?l:e[1]+l)))))}function wt(e,t){return(l,n,i,o,s)=>{let r,u,a,f,c,h,d=t.find((e=>s>=e[0]))||t[t.length-1];return n.map((t=>{let l=e(t),n=l.getFullYear(),i=l.getMonth(),o=l.getDate(),s=l.getHours(),p=l.getMinutes(),m=l.getSeconds(),g=n!=r&&d[2]||i!=u&&d[3]||o!=a&&d[4]||s!=f&&d[5]||p!=c&&d[6]||m!=h&&d[7]||d[1];return r=n,u=i,a=o,f=s,c=p,h=m,g(l)}))}}function _t(e,t,l){return new Date(e,t,l)}function kt(e,t){return t(e)}function bt(e,t){return(l,n)=>t(e(n))}Ee(2,-53,53,[1]);const vt={show:!0,live:!0,isolate:!1,markers:{show:!0,width:2,stroke:function(e,t){let l=e.series[t];return l.width?l.stroke(e,t):l.points.width?l.points.stroke(e,t):null},fill:function(e,t){return e.series[t].fill(e,t)},dash:"solid"},idx:null,idxs:null,values:[]},yt=[0,0];function Mt(e,t,l){return e=>{0==e.button&&l(e)}}function St(e,t,l){return l}const Et={show:!0,x:!0,y:!0,lock:!1,move:function(e,t,l){return yt[0]=t,yt[1]=l,yt},points:{show:function(e,t){let i=e.cursor.points,o=D(),s=i.size(e,t);S(o,l,s),S(o,n,s);let r=s/-2;S(o,"marginLeft",r),S(o,"marginTop",r);let u=i.width(e,t,s);return u&&S(o,"borderWidth",u),o},size:function(e,t){return Vt(e.series[t].points.width,1)},width:0,stroke:function(e,t){let l=e.series[t].points;return l._stroke||l._fill},fill:function(e,t){let l=e.series[t].points;return l._fill||l._stroke}},bind:{mousedown:Mt,mouseup:Mt,click:Mt,dblclick:Mt,mousemove:St,mouseleave:St,mouseenter:St},drag:{setScale:!0,x:!0,y:!1,dist:0,uni:null,_x:!1,_y:!1},focus:{prox:-1},left:-10,top:-10,idx:null,dataIdx:function(e,t,l){return l},idxs:null},Dt={show:!0,stroke:"rgba(0,0,0,0.07)",width:2},zt=Fe({},Dt,{filter:xe}),Tt=Fe({},zt,{size:10}),Pt=Fe({},Dt,{show:!1}),At='12px system-ui, -apple-system, "Segoe UI", Roboto, "Helvetica Neue", Arial, "Noto Sans", sans-serif, "Apple Color Emoji", "Segoe UI Emoji", "Segoe UI Symbol", "Noto Color Emoji"',Wt="bold "+At,Yt={show:!0,scale:"x",stroke:u,space:50,gap:5,size:50,labelGap:0,labelSize:30,labelFont:Wt,side:2,grid:zt,ticks:Tt,border:Pt,font:At,rotate:0},Ct={show:!0,scale:"x",auto:!1,sorted:1,min:ce,max:-ce,idxs:[]};function Ft(e,t){return t.map((e=>null==e?"":$(e)))}function Ht(e,t,l,n,i,o,s){let r=[],u=Me.get(i)||0;for(let e=l=s?l:ye(be(l,i),u);n>=e;e=ye(e+i,u))r.push(Object.is(e,-0)?0:e);return r}function Rt(e,t,l,n,i){const o=[],s=e.scales[e.axes[t].scale].log,r=te((10==s?ue:ae)(l));i=se(s,r),0>r&&(i=ye(i,-r));let u=l;do{o.push(u),u=ye(u+i,Me.get(i)),i*s>u||(i=u)}while(n>=u);return o}function Gt(e,t,l,n,i){let o=e.scales[e.axes[t].scale].asinh,s=n>o?Rt(e,t,oe(o,l),n,i):[o],r=0>n||l>0?[]:[0];return(-o>l?Rt(e,t,oe(o,-n),-l,i):[o]).reverse().map((e=>-e)).concat(r,s)}const Lt=/./,It=/[12357]/,Ot=/[125]/,Nt=/1/;function jt(e,t,l){let n=e.axes[l],i=n.scale,o=e.scales[i];if(3==o.distr&&2==o.log)return t;let s=e.valToPos,r=n._space,u=s(10,i),a=s(9,i)-u4==o.distr&&0==e||a.test(e)?e:null))}function Bt(e,t){return null==t?"":$(t)}const Ut={show:!0,scale:"y",stroke:u,space:30,gap:5,size:50,labelGap:0,labelSize:30,labelFont:Wt,side:3,grid:zt,ticks:Tt,border:Pt,font:At,rotate:0};function Vt(e,t){return ye((3+2*(e||1))*t,3)}const Jt={scale:null,auto:!0,sorted:0,min:ce,max:-ce},qt={show:!0,auto:!0,sorted:0,alpha:1,facets:[Fe({},Jt,{scale:"x"}),Fe({},Jt,{scale:"y"})]},Kt={scale:"y",auto:!0,sorted:0,show:!0,spanGaps:!1,gaps:(e,t,l,n,i)=>i,alpha:1,points:{show:function(e,t){let{scale:l,idxs:n}=e.series[0],i=e._data[0],o=e.valToPos(i[n[0]],l,!0),s=e.valToPos(i[n[1]],l,!0);return ee(s-o)/(e.series[t].points.space*b)>=n[1]-n[0]},filter:null},values:null,min:ce,max:-ce,idxs:[],path:null,clip:null};function Zt(e,t,l){return l/10}const $t={time:!0,auto:!0,distr:1,log:10,asinh:1,min:null,max:null,dir:1,ori:0},Xt=Fe({},$t,{time:!1,ori:1}),Qt={};function el(e){let t=Qt[e];return t||(t={key:e,plots:[],sub(e){t.plots.push(e)},unsub(e){t.plots=t.plots.filter((t=>t!=e))},pub(e,l,n,i,o,s,r){for(let u=0;t.plots.length>u;u++)t.plots[u]!=l&&t.plots[u].pub(e,l,n,i,o,s,r)}},null!=e&&(Qt[e]=t)),t}function tl(e,t,l){const n=e.series[t],i=e.scales,o=e.bbox;let s=e._data[0],r=e._data[t],u=2==e.mode?i[n.facets[0].scale]:i[e.series[0].scale],a=2==e.mode?i[n.facets[1].scale]:i[n.scale],f=o.left,c=o.top,h=o.width,d=o.height,p=e.valToPosH,m=e.valToPosV;return 0==u.ori?l(n,s,r,u,a,p,m,f,c,h,d,al,cl,dl,ml,xl):l(n,s,r,u,a,m,p,c,f,d,h,fl,hl,pl,gl,wl)}function ll(e,t){let l=0,n=0,i=q(e.bands,ze);for(let e=0;i.length>e;e++){let o=i[e];o.series[0]==t?l=o.dir:o.series[1]==t&&(n|=1==o.dir?1:2)}return[l,1==n?-1:2==n?1:3==n?2:0]}function nl(e,t,l,n,i){let o=e.scales[e.series[t].scale];return-1==i?o.min:1==i?o.max:3==o.distr?1==o.dir?o.min:o.max:0}function il(e,t,l,n,i,o){return tl(e,t,((e,t,s,r,u,a,f,c,h,d,p)=>{let m=e.pxRound;const g=0==r.ori?cl:hl;let x,w;1==r.dir*(0==r.ori?1:-1)?(x=l,w=n):(x=n,w=l);let _=m(a(t[x],r,d,c)),k=m(f(s[x],u,p,h)),b=m(a(t[w],r,d,c)),v=m(f(1==o?u.max:u.min,u,p,h)),y=new Path2D(i);return g(y,b,v),g(y,_,v),g(y,_,k),y}))}function ol(e,t,l,n,i,o){let s=null;if(e.length>0){s=new Path2D;const r=0==t?dl:pl;let u=l;for(let t=0;e.length>t;t++){let l=e[t];if(l[1]>l[0]){let e=l[0]-u;e>0&&r(s,u,n,e,n+o),u=l[1]}}let a=l+i-u;a>0&&r(s,u,n,a,n+o)}return s}function sl(e,t,l,n,i,o,s){let r=[];for(let u=1==i?l:n;u>=l&&n>=u;u+=i)if(null===t[u]){let a=u,f=u;if(1==i)for(;++u<=n&&null===t[u];)f=u;else for(;--u>=l&&null===t[u];)f=u;let c=o(e[a]),h=f==a?c:o(e[f]);c=s>0?c:o(e[a-i]),h=0>s?h:o(e[f+i]),c>h||r.push([c,h])}return r}function rl(e){return 0==e?ge:1==e?le:t=>de(t,e)}function ul(e){let t=0==e?al:fl,l=0==e?(e,t,l,n,i,o)=>{e.arcTo(t,l,n,i,o)}:(e,t,l,n,i,o)=>{e.arcTo(l,t,i,n,o)},n=0==e?(e,t,l,n,i)=>{e.rect(t,l,n,i)}:(e,t,l,n,i)=>{e.rect(l,t,i,n)};return(e,i,o,s,r,u=0)=>{0==u?n(e,i,o,s,r):(u=ie(u,s/2,r/2),t(e,i+u,o),l(e,i+s,o,i+s,o+r,u),l(e,i+s,o+r,i,o+r,u),l(e,i,o+r,i,o,u),l(e,i,o,i+s,o,u),e.closePath())}}const al=(e,t,l)=>{e.moveTo(t,l)},fl=(e,t,l)=>{e.moveTo(l,t)},cl=(e,t,l)=>{e.lineTo(t,l)},hl=(e,t,l)=>{e.lineTo(l,t)},dl=ul(0),pl=ul(1),ml=(e,t,l,n,i,o)=>{e.arc(t,l,n,i,o)},gl=(e,t,l,n,i,o)=>{e.arc(l,t,n,i,o)},xl=(e,t,l,n,i,o,s)=>{e.bezierCurveTo(t,l,n,i,o,s)},wl=(e,t,l,n,i,o,s)=>{e.bezierCurveTo(l,t,i,n,s,o)};function _l(){return(e,t,l,n,i)=>tl(e,t,((t,o,s,r,u,a,f,c,h,d,p)=>{let m,g,{pxRound:x,points:w}=t;0==r.ori?(m=al,g=ml):(m=fl,g=gl);const _=ye(w.width*b,3);let k=(w.size-w.width)/2*b,v=ye(2*k,3),y=new Path2D,M=new Path2D,{left:S,top:E,width:D,height:z}=e.bbox;dl(M,S-v,E-v,D+2*v,z+2*v);const T=e=>{if(null!=s[e]){let t=x(a(o[e],r,d,c)),l=x(f(s[e],u,p,h));m(y,t+k,l),g(y,t,l,k,0,2*Q)}};if(i)i.forEach(T);else for(let e=l;n>=e;e++)T(e);return{stroke:_>0?y:null,fill:y,clip:M,flags:3}}))}function kl(e){return(t,l,n,i,o,s)=>{n!=i&&(o!=n&&s!=n&&e(t,l,n),o!=i&&s!=i&&e(t,l,i),e(t,l,s))}}const bl=kl(cl),vl=kl(hl);function yl(e){const t=q(e?.alignGaps,0);return(e,l,n,i)=>tl(e,l,((o,s,r,u,a,f,c,h,d,p,m)=>{let g,x,w=o.pxRound,_=e=>w(f(e,u,p,h)),k=e=>w(c(e,a,m,d));0==u.ori?(g=cl,x=bl):(g=hl,x=vl);const b=u.dir*(0==u.ori?1:-1),v={stroke:new Path2D,fill:null,clip:null,band:null,gaps:null,flags:1},y=v.stroke;let M,S,E,D=ce,z=-ce,T=_(s[1==b?n:i]),P=L(r,n,i,1*b),A=L(r,n,i,-1*b),W=_(s[P]),Y=_(s[A]);for(let e=1==b?n:i;e>=n&&i>=e;e+=b){let t=_(s[e]);t==T?null!=r[e]&&(S=k(r[e]),D==ce&&(g(y,t,S),M=S),D=ie(S,D),z=oe(S,z)):(D!=ce&&(x(y,T,D,z,M,S),E=T),null!=r[e]?(S=k(r[e]),g(y,t,S),D=z=M=S):(D=ce,z=-ce),T=t)}D!=ce&&D!=z&&E!=T&&x(y,T,D,z,M,S);let[C,F]=ll(e,l);if(null!=o.fill||0!=C){let t=v.fill=new Path2D(y),n=k(o.fillTo(e,l,o.min,o.max,C));g(t,Y,n),g(t,W,n)}if(!o.spanGaps){let a=[];a.push(...sl(s,r,n,i,b,_,t)),v.gaps=a=o.gaps(e,l,n,i,a),v.clip=ol(a,u.ori,h,d,p,m)}return 0!=F&&(v.band=2==F?[il(e,l,n,i,y,-1),il(e,l,n,i,y,1)]:il(e,l,n,i,y,F)),v}))}function Ml(e,t,l,n,i){const o=e.length;if(2>o)return null;const s=new Path2D;if(l(s,e[0],t[0]),2==o)n(s,e[1],t[1]);else{let l=Array(o),n=Array(o-1),r=Array(o-1),u=Array(o-1);for(let l=0;o-1>l;l++)r[l]=t[l+1]-t[l],u[l]=e[l+1]-e[l],n[l]=r[l]/u[l];l[0]=n[0];for(let e=1;o-1>e;e++)0===n[e]||0===n[e-1]||n[e-1]>0!=n[e]>0?l[e]=0:(l[e]=3*(u[e-1]+u[e])/((2*u[e]+u[e-1])/n[e-1]+(u[e]+2*u[e-1])/n[e]),isFinite(l[e])||(l[e]=0));l[o-1]=n[o-2];for(let n=0;o-1>n;n++)i(s,e[n]+u[n]/3,t[n]+l[n]*u[n]/3,e[n+1]-u[n]/3,t[n+1]-l[n+1]*u[n]/3,e[n+1],t[n+1])}return s}const Sl=new Set;function El(){Sl.forEach((e=>{e.syncRect(!0)}))}x&&(H("resize",_,El),H("scroll",_,El,!0),H(g,_,(()=>{Ol.pxRatio=b})));const Dl=yl(),zl=_l();function Tl(e,t,l,n){return(n?[e[0],e[1]].concat(e.slice(2)):[e[0]].concat(e.slice(1))).map(((e,n)=>Pl(e,n,t,l)))}function Pl(e,t,l,n){return Fe({},0==t?l:n,e)}function Al(e,t,l){return null==t?Te:[t,l]}const Wl=Al;function Yl(e,t,l){return null==t?Te:J(t,l,.1,!0)}function Cl(e,t,l,n){return null==t?Te:N(t,l,e.scales[n].log,!1)}const Fl=Cl;function Hl(e,t,l,n){return null==t?Te:j(t,l,e.scales[n].log,!1)}const Rl=Hl;function Gl(e,t,l,n,i){let o=oe(he(e),he(t)),s=t-e,r=G(i/n*s,l);do{let e=l[r],t=n*e/s;if(t>=i&&17>=o+(5>e?Me.get(e):0))return[e,t]}while(++r(t=le((l=+n)*b))+"px")),t,l]}function Il(e){e.show&&[e.font,e.labelFont].forEach((e=>{let t=ye(e[2]*b,1);e[0]=e[0].replace(/[0-9.]+px/,t+"px"),e[1]=t}))}function Ol(u,m,x){const k={mode:q(u.mode,1)},v=k.mode;function z(e,t){return((3==t.distr?ue(e>0?e:t.clamp(k,e,t.min,t.max,t.key)):4==t.distr?fe(e,t.asinh):e)-t._min)/(t._max-t._min)}function P(e,t,l,n){let i=z(e,t);return n+l*(-1==t.dir?1-i:i)}function W(e,t,l,n){let i=z(e,t);return n+l*(-1==t.dir?i:1-i)}function C(e,t,l,n){return 0==t.ori?P(e,t,l,n):W(e,t,l,n)}k.valToPosH=P,k.valToPosV=W;let F=!1;k.status=0;const L=k.root=D("uplot");null!=u.id&&(L.id=u.id),y(L,u.class),u.title&&(D("u-title",L).textContent=u.title);const I=E("canvas"),O=k.ctx=I.getContext("2d"),U=D("u-wrap",L),V=k.under=D("u-under",U);U.appendChild(I);const K=k.over=D("u-over",U),Z=+q((u=Ce(u)).pxAlign,1),$=rl(Z);(u.plugins||[]).forEach((e=>{e.opts&&(u=e.opts(k,u)||u)}));const te=u.ms||.001,re=k.series=1==v?Tl(u.series||[],Ct,Kt,!1):function(e,t){return e.map(((e,l)=>0==l?null:Fe({},t,e)))}(u.series||[null],qt),ae=k.axes=Tl(u.axes||[],Yt,Ut,!0),he=k.scales={},ge=k.bands=u.bands||[];ge.forEach((e=>{e.fill=me(e.fill||null),e.dir=q(e.dir,-1)}));const ve=2==v?re[1].facets[0].scale:re[0].scale,Me={axes:function(){for(let e=0;ae.length>e;e++){let t=ae[e];if(!t.show||!t._show)continue;let l,n,u=t.side,a=u%2,f=t.stroke(k,e),c=0==u||3==u?-1:1;if(t.label){let e=le((t._lpos+t.labelGap*c)*b);ql(t.labelFont[0],f,"center",2==u?i:o),O.save(),1==a?(l=n=0,O.translate(e,le(At+Lt/2)),O.rotate((3==u?-Q:Q)/2)):(l=le(Pt+Wt/2),n=e),O.fillText(t.label,l,n),O.restore()}let[h,d]=t._found;if(0==d)continue;let p=he[t.scale],m=0==a?Wt:Lt,g=0==a?Pt:At,x=le(t.gap*b),w=t._splits,_=2==p.distr?w.map((e=>jl[e])):w,v=2==p.distr?jl[w[1]]-jl[w[0]]:h,y=t.ticks,M=t.border,S=y.show?le(y.size*b):0,E=t._rotate*-Q/180,D=$(t._pos*b),z=D+(S+x)*c;n=0==a?z:0,l=1==a?z:0,ql(t.font[0],f,1==t.align?s:2==t.align?r:E>0?s:0>E?r:0==a?"center":3==u?r:s,E||1==a?"middle":2==u?i:o);let T=1.5*t.font[1],P=w.map((e=>$(C(e,p,m,g)))),A=t._values;for(let e=0;A.length>e;e++){let t=A[e];if(null!=t){0==a?l=P[e]:n=P[e],t=""+t;let i=-1==t.indexOf("\n")?[t]:t.split(/\n/gm);for(let e=0;i.length>e;e++){let t=i[e];E?(O.save(),O.translate(l,n+e*T),O.rotate(E),O.fillText(t,0,0),O.restore()):O.fillText(t,l,n+e*T)}}}y.show&&tn(P,y.filter(k,_,e,d,v),a,u,D,S,ye(y.width*b,3),y.stroke(k,e),y.dash,y.cap);let W=t.grid;W.show&&tn(P,W.filter(k,_,e,d,v),a,0==a?2:1,0==a?At:Pt,0==a?Lt:Wt,ye(W.width*b,3),W.stroke(k,e),W.dash,W.cap),M.show&&tn([D],[1],0==a?1:0,0==a?1:2,1==a?At:Pt,1==a?Lt:Wt,ye(M.width*b,3),M.stroke(k,e),M.dash,M.cap)}ti("drawAxes")},series:function(){pl>0&&(re.forEach(((e,t)=>{if(t>0&&e.show&&null==e._paths){let l=function(e){let t=pe(ml-1,0,pl-1),l=pe(gl+1,0,pl-1);for(;null==e[t]&&t>0;)t--;for(;null==e[l]&&pl-1>l;)l++;return[t,l]}(m[t]);e._paths=e.paths(k,t,l[0],l[1])}})),re.forEach(((e,t)=>{if(t>0&&e.show){Nl!=e.alpha&&(O.globalAlpha=Nl=e.alpha),Zl(t,!1),e._paths&&$l(t,!1);{Zl(t,!0);let l=e.points.show(k,t,ml,gl),n=e.points.filter(k,t,l,e._paths?e._paths.gaps:null);(l||n)&&(e.points._paths=e.points.paths(k,t,ml,gl,n),$l(t,!0))}1!=Nl&&(O.globalAlpha=Nl=1),ti("drawSeries",t)}})))}},Se=(u.drawOrder||["axes","series"]).map((e=>Me[e]));function Ee(e){let t=he[e];if(null==t){let l=(u.scales||De)[e]||De;if(null!=l.from)Ee(l.from),he[e]=Fe({},he[l.from],l,{key:e});else{t=he[e]=Fe({},e==ve?$t:Xt,l),t.key=e;let n=t.time,i=t.range,o=Pe(i);if((e!=ve||2==v&&!n)&&(!o||null!=i[0]&&null!=i[1]||(i={min:null==i[0]?B:{mode:1,hard:i[0],soft:i[0]},max:null==i[1]?B:{mode:1,hard:i[1],soft:i[1]}},o=!1),!o&&We(i))){let e=i;i=(t,l,n)=>null==l?Te:J(l,n,e)}t.range=me(i||(n?Wl:e==ve?3==t.distr?Fl:4==t.distr?Rl:Al:3==t.distr?Cl:4==t.distr?Hl:Yl)),t.auto=me(!o&&t.auto),t.clamp=me(t.clamp||Zt),t._min=t._max=null}}}Ee("x"),Ee("y"),1==v&&re.forEach((e=>{Ee(e.scale)})),ae.forEach((e=>{Ee(e.scale)}));for(let e in u.scales)Ee(e);const He=he[ve],Ge=He.distr;let Le,Ie;0==He.ori?(y(L,"u-hz"),Le=P,Ie=W):(y(L,"u-vt"),Le=W,Ie=P);const Oe={};for(let e in he){let t=he[e];null==t.min&&null==t.max||(Oe[e]={min:t.min,max:t.max},t.min=t.max=null)}const Ne=u.tzDate||(e=>new Date(le(e/te))),je=u.fmtDate||Ve,Be=1==te?dt(Ne):gt(Ne),Ue=wt(Ne,xt(1==te?ht:mt,je)),Je=bt(Ne,kt("{YYYY}-{MM}-{DD} {h}:{mm}{aa}",je)),qe=[],Ke=k.legend=Fe({},vt,u.legend),Ze=Ke.show,$e=Ke.markers;let et;Ke.idxs=qe,$e.width=me($e.width),$e.dash=me($e.dash),$e.stroke=me($e.stroke),$e.fill=me($e.fill);let tt,lt=[],nt=[],it=!1,ot={};if(Ke.live){const e=re[1]?re[1].values:null;it=null!=e,tt=it?e(k,1,0):{_:0};for(let e in tt)ot[e]="--"}if(Ze)if(et=E("table","u-legend",L),it){let e=E("tr","u-thead",et);for(var st in E("th",null,e),tt)E("th",t,e).textContent=st}else y(et,"u-inline"),Ke.live&&y(et,"u-live");const rt={show:!0},ut={show:!1},at=new Map;function ft(e,t,l){const n=at.get(t)||{},i=il.bind[e](k,t,l);i&&(H(e,t,n[e]=i),at.set(t,n))}function _t(e,t){const l=at.get(t)||{};for(let n in l)null!=e&&n!=e||(R(n,t,l[n]),delete l[n]);null==e&&at.delete(t)}let yt=0,Mt=0,St=0,Dt=0,zt=0,Tt=0,Pt=0,At=0,Wt=0,Lt=0;k.bbox={};let It=!1,Ot=!1,Nt=!1,Jt=!1,Qt=!1;function tl(e,t,l){(l||e!=k.width||t!=k.height)&&ll(e,t),on(!1),Nt=!0,Ot=!0,Jt=Qt=il.left>=0,_n()}function ll(e,t){k.width=yt=St=e,k.height=Mt=Dt=t,zt=Tt=0,function(){let e=!1,t=!1,l=!1,n=!1;ae.forEach((i=>{if(i.show&&i._show){let{side:o,_size:s}=i,r=o%2,u=s+(null!=i.label?i.labelSize:0);u>0&&(r?(St-=u,3==o?(zt+=u,n=!0):l=!0):(Dt-=u,0==o?(Tt+=u,e=!0):t=!0))}})),fl[0]=e,fl[1]=l,fl[2]=t,fl[3]=n,St-=dl[1]+dl[3],zt+=dl[3],Dt-=dl[2]+dl[0],Tt+=dl[0]}(),function(){let e=zt+St,t=Tt+Dt,l=zt,n=Tt;function i(i,o){switch(i){case 1:return e+=o,e-o;case 2:return t+=o,t-o;case 3:return l-=o,l+o;case 0:return n-=o,n+o}}ae.forEach((e=>{if(e.show&&e._show){let t=e.side;e._pos=i(t,e._size),null!=e.label&&(e._lpos=i(t,e.labelSize))}}))}();let l=k.bbox;Pt=l.left=de(zt*b,.5),At=l.top=de(Tt*b,.5),Wt=l.width=de(St*b,.5),Lt=l.height=de(Dt*b,.5)}k.setSize=function({width:e,height:t}){tl(e,t)};const il=k.cursor=Fe({},Et,{drag:{y:2==v}},u.cursor);{il.idxs=qe,il._lock=!1;let e=il.points;e.show=me(e.show),e.size=me(e.size),e.stroke=me(e.stroke),e.width=me(e.width),e.fill=me(e.fill)}const ol=k.focus=Fe({},u.focus||{alpha:.3},il.focus),sl=ol.prox>=0;let ul=[null];function al(l,n){if(1==v||n>0){let e=1==v&&he[l.scale].time,t=l.value;l.value=e?Ae(t)?bt(Ne,kt(t,je)):t||Je:t||Bt,l.label=l.label||(e?"Time":"Value")}if(n>0){l.width=null==l.width?1:l.width,l.paths=l.paths||Dl||we,l.fillTo=me(l.fillTo||nl),l.pxAlign=+q(l.pxAlign,Z),l.pxRound=rl(l.pxAlign),l.stroke=me(l.stroke||null),l.fill=me(l.fill||null),l._stroke=l._fill=l._paths=l._focus=null;let e=Vt(l.width,1),t=l.points=Fe({},{size:e,width:oe(1,.2*e),stroke:l.stroke,space:2*e,paths:zl,_stroke:null,_fill:null},l.points);t.show=me(t.show),t.filter=me(t.filter),t.fill=me(t.fill),t.stroke=me(t.stroke),t.paths=me(t.paths),t.pxAlign=l.pxAlign}if(Ze){let i=function(l,n){if(0==n&&(it||!Ke.live||2==v))return Te;let i=[],o=E("tr","u-series",et,et.childNodes[n]);y(o,l.class),l.show||y(o,e);let s=E("th",null,o);if($e.show){let e=D("u-marker",s);if(n>0){let t=$e.width(k,n);t&&(e.style.border=t+"px "+$e.dash(k,n)+" "+$e.stroke(k,n)),e.style.background=$e.fill(k,n)}}let r=D(t,s);for(var u in r.textContent=l.label,n>0&&($e.show||(r.style.color=l.width>0?$e.stroke(k,n):$e.fill(k,n)),ft("click",s,(e=>{if(il._lock)return;let t=re.indexOf(l);if((e.ctrlKey||e.metaKey)!=Ke.isolate){let e=re.some(((e,l)=>l>0&&l!=t&&e.show));re.forEach(((l,n)=>{n>0&&Pn(n,e?n==t?rt:ut:rt,!0,li.setSeries)}))}else Pn(t,{show:!l.show},!0,li.setSeries)})),sl&&ft(h,s,(()=>{il._lock||Pn(re.indexOf(l),Cn,!0,li.setSeries)}))),tt){let e=E("td","u-value",o);e.textContent="--",i.push(e)}return[o,i]}(l,n);lt.splice(n,0,i[0]),nt.splice(n,0,i[1]),Ke.values.push(null)}if(il.show){qe.splice(n,0,null);let e=function(e,t){if(t>0){let l=il.points.show(k,t);if(l)return y(l,"u-cursor-pt"),y(l,e.class),T(l,-10,-10,St,Dt),K.insertBefore(l,ul[t]),l}}(l,n);e&&ul.splice(n,0,e)}ti("addSeries",n)}k.addSeries=function(e,t){e=Pl(e,t=null==t?re.length:t,Ct,Kt),re.splice(t,0,e),al(re[t],t)},k.delSeries=function(e){if(re.splice(e,1),Ze){Ke.values.splice(e,1),nt.splice(e,1);let t=lt.splice(e,1)[0];_t(null,t.firstChild),t.remove()}il.show&&(qe.splice(e,1),ul.length>1&&ul.splice(e,1)[0].remove()),ti("delSeries",e)};const fl=[!1,!1,!1,!1];function cl(e,t,l){let[n,i,o,s]=l,r=t%2,u=0;return 0==r&&(s||i)&&(u=0==t&&!n||2==t&&!o?le(Yt.size/3):0),1==r&&(n||o)&&(u=1==t&&!i||3==t&&!s?le(Ut.size/2):0),u}const hl=k.padding=(u.padding||[cl,cl,cl,cl]).map((e=>me(q(e,cl)))),dl=k._padding=hl.map(((e,t)=>e(k,t,fl,0)));let pl,ml=null,gl=null;const xl=1==v?re[0].idxs:null;let wl,_l,kl,bl,vl,yl,Ml,El,Ol,Nl,jl=null,Bl=!1;function Ul(e,t){if(m=null==e?[]:Ce(e,Ye),2==v){pl=0;for(let e=1;re.length>e;e++)pl+=m[e][0].length;k.data=m=e}else if(null==m[0]&&(m[0]=[]),k.data=m.slice(),jl=m[0],pl=jl.length,2==Ge){m[0]=Array(pl);for(let e=0;pl>e;e++)m[0][e]=e}if(k._data=m,on(!0),ti("setData"),!1!==t){let e=He;e.auto(k,Bl)?Vl():Tn(ve,e.min,e.max),Jt=il.left>=0,Qt=!0,_n()}}function Vl(){let e,t;Bl=!0,1==v&&(pl>0?(ml=xl[0]=0,gl=xl[1]=pl-1,e=m[0][ml],t=m[0][gl],2==Ge?(e=ml,t=gl):1==pl&&(3==Ge?[e,t]=N(e,e,He.log,!1):4==Ge?[e,t]=j(e,e,He.log,!1):He.time?t=e+le(86400/te):[e,t]=J(e,t,.1,!0))):(ml=xl[0]=e=null,gl=xl[1]=t=null)),Tn(ve,e,t)}function Jl(e="#0000",t,l=ze,n="butt",i="#0000",o="round"){e!=wl&&(O.strokeStyle=wl=e),i!=_l&&(O.fillStyle=_l=i),t!=kl&&(O.lineWidth=kl=t),o!=vl&&(O.lineJoin=vl=o),n!=yl&&(O.lineCap=yl=n),l!=bl&&O.setLineDash(bl=l)}function ql(e,t,l,n){t!=_l&&(O.fillStyle=_l=t),e!=Ml&&(O.font=Ml=e),l!=El&&(O.textAlign=El=l),n!=Ol&&(O.textBaseline=Ol=n)}function Kl(e,t,l,n,i=0){if(n.length>0&&e.auto(k,Bl)&&(null==t||null==t.min)){let t=q(ml,0),o=q(gl,n.length-1),s=null==l.min?3==e.distr?function(e,t,l){let n=ce,i=-ce;for(let o=t;l>=o;o++)e[o]>0&&(n=ie(n,e[o]),i=oe(i,e[o]));return[n==ce?1:n,i==-ce?10:i]}(n,t,o):function(e,t,l,n){let i=ce,o=-ce;if(1==n)i=e[t],o=e[l];else if(-1==n)i=e[l],o=e[t];else for(let n=t;l>=n;n++)null!=e[n]&&(i=ie(i,e[n]),o=oe(o,e[n]));return[i,o]}(n,t,o,i):[l.min,l.max];e.min=ie(e.min,l.min=s[0]),e.max=oe(e.max,l.max=s[1])}}function Zl(e,t){let l=t?re[e].points:re[e];l._stroke=l.stroke(k,e),l._fill=l.fill(k,e)}function $l(e,t){let l=t?re[e].points:re[e],n=l._stroke,i=l._fill,{stroke:o,fill:s,clip:r,flags:u}=l._paths,a=null,f=ye(l.width*b,3),c=f%2/2;t&&null==i&&(i=f>0?"#fff":n);let h=1==l.pxAlign;if(h&&O.translate(c,c),!t){let e=Pt,t=At,n=Wt,i=Lt,o=f*b/2;0==l.min&&(i+=o),0==l.max&&(t-=o,i+=o),a=new Path2D,a.rect(e,t,n,i)}t?Xl(n,f,l.dash,l.cap,i,o,s,u,r):function(e,t,l,n,i,o,s,r,u,a,f){let c=!1;ge.forEach(((h,d)=>{if(h.series[0]==e){let e,p=re[h.series[1]],g=m[h.series[1]],x=(p._paths||De).band;Pe(x)&&(x=1==h.dir?x[0]:x[1]);let w=null;p.show&&x&&function(e,t,l){for(t=q(t,0),l=q(l,e.length-1);l>=t;){if(null!=e[t])return!0;t++}return!1}(g,ml,gl)?(w=h.fill(k,d)||o,e=p._paths.clip):x=null,Xl(t,l,n,i,w,s,r,u,a,f,e,x),c=!0}})),c||Xl(t,l,n,i,o,s,r,u,a,f)}(e,n,f,l.dash,l.cap,i,o,s,u,a,r),h&&O.translate(-c,-c)}function Xl(e,t,l,n,i,o,s,r,u,a,f,c){Jl(e,t,l,n,i),(u||a||c)&&(O.save(),u&&O.clip(u),a&&O.clip(a)),c?3==(3&r)?(O.clip(c),f&&O.clip(f),en(i,s),Ql(e,o,t)):2&r?(en(i,s),O.clip(c),Ql(e,o,t)):1&r&&(O.save(),O.clip(c),f&&O.clip(f),en(i,s),O.restore(),Ql(e,o,t)):(en(i,s),Ql(e,o,t)),(u||a||c)&&O.restore()}function Ql(e,t,l){l>0&&(t instanceof Map?t.forEach(((e,t)=>{O.strokeStyle=wl=t,O.stroke(e)})):null!=t&&e&&O.stroke(t))}function en(e,t){t instanceof Map?t.forEach(((e,t)=>{O.fillStyle=_l=t,O.fill(e)})):null!=t&&e&&O.fill(t)}function tn(e,t,l,n,i,o,s,r,u,a){let f=s%2/2;1==Z&&O.translate(f,f),Jl(r,s,u,a,r),O.beginPath();let c,h,d,p,m=i+(0==n||3==n?-o:o);0==l?(h=i,p=m):(c=i,d=m);for(let n=0;e.length>n;n++)null!=t[n]&&(0==l?c=d=e[n]:h=p=e[n],O.moveTo(c,h),O.lineTo(d,p));O.stroke(),1==Z&&O.translate(-f,-f)}function ln(e){let t=!0;return ae.forEach(((l,n)=>{if(!l.show)return;let i=he[l.scale];if(null==i.min)return void(l._show&&(t=!1,l._show=!1,on(!1)));l._show||(t=!1,l._show=!0,on(!1));let o=l.side,s=o%2,{min:r,max:u}=i,[a,f]=function(e,t,l,n){let i,o=ae[e];if(n>0){let s=o._space=o.space(k,e,t,l,n);i=Gl(t,l,o._incrs=o.incrs(k,e,t,l,n,s),n,s)}else i=[0,0];return o._found=i}(n,r,u,0==s?St:Dt);if(0==f)return;let c=l._splits=l.splits(k,n,r,u,a,f,2==i.distr),h=2==i.distr?c.map((e=>jl[e])):c,d=2==i.distr?jl[c[1]]-jl[c[0]]:a,p=l._values=l.values(k,l.filter(k,h,n,f,d),n,f,d);l._rotate=2==o?l.rotate(k,p,n,f):0;let m=l._size;l._size=ne(l.size(k,p,n,e)),null!=m&&l._size!=m&&(t=!1)})),t}function nn(e){let t=!0;return hl.forEach(((l,n)=>{let i=l(k,n,fl,e);i!=dl[n]&&(t=!1),dl[n]=i})),t}function on(e){re.forEach(((t,l)=>{l>0&&(t._paths=null,e&&(1==v?(t.min=null,t.max=null):t.facets.forEach((e=>{e.min=null,e.max=null}))))}))}k.setData=Ul;let sn,rn,un,an,fn,cn,hn,dn,pn,mn,gn,xn,wn=!1;function _n(){wn||(Re(kn),wn=!0)}function kn(){It&&(function(){let e=Ce(he,Ye);for(let t in e){let l=e[t],n=Oe[t];if(null!=n&&null!=n.min)Fe(l,n),t==ve&&on(!0);else if(t!=ve||2==v)if(0==pl&&null==l.from){let e=l.range(k,null,null,t);l.min=e[0],l.max=e[1]}else l.min=ce,l.max=-ce}if(pl>0){re.forEach(((t,l)=>{if(1==v){let n=t.scale,i=e[n],o=Oe[n];if(0==l){let e=i.range(k,i.min,i.max,n);i.min=e[0],i.max=e[1],ml=G(i.min,m[0]),gl=G(i.max,m[0]),i.min>m[0][ml]&&ml++,m[0][gl]>i.max&&gl--,t.min=jl[ml],t.max=jl[gl]}else t.show&&t.auto&&Kl(i,o,t,m[l],t.sorted);t.idxs[0]=ml,t.idxs[1]=gl}else if(l>0&&t.show&&t.auto){let[n,i]=t.facets,o=n.scale,s=i.scale,[r,u]=m[l];Kl(e[o],Oe[o],n,r,n.sorted),Kl(e[s],Oe[s],i,u,i.sorted),t.min=i.min,t.max=i.max}}));for(let t in e){let l=e[t],n=Oe[t];if(null==l.from&&(null==n||null==n.min)){let e=l.range(k,l.min==ce?null:l.min,l.max==-ce?null:l.max,t);l.min=e[0],l.max=e[1]}}}for(let t in e){let l=e[t];if(null!=l.from){let n=e[l.from];if(null==n.min)l.min=l.max=null;else{let e=l.range(k,n.min,n.max,t);l.min=e[0],l.max=e[1]}}}let t={},l=!1;for(let n in e){let i=e[n],o=he[n];if(o.min!=i.min||o.max!=i.max){o.min=i.min,o.max=i.max;let e=o.distr;o._min=3==e?ue(o.min):4==e?fe(o.min,o.asinh):o.min,o._max=3==e?ue(o.max):4==e?fe(o.max,o.asinh):o.max,t[n]=l=!0}}if(l){re.forEach(((e,l)=>{2==v?l>0&&t.y&&(e._paths=null):t[e.scale]&&(e._paths=null)}));for(let e in t)Nt=!0,ti("setScale",e);il.show&&(Jt=Qt=il.left>=0)}for(let e in Oe)Oe[e]=null}(),It=!1),Nt&&(function(){let e=!1,t=0;for(;!e;){t++;let l=ln(t),n=nn(t);e=3==t||l&&n,e||(ll(k.width,k.height),Ot=!0)}}(),Nt=!1),Ot&&(S(V,s,zt),S(V,i,Tt),S(V,l,St),S(V,n,Dt),S(K,s,zt),S(K,i,Tt),S(K,l,St),S(K,n,Dt),S(U,l,yt),S(U,n,Mt),I.width=le(yt*b),I.height=le(Mt*b),ae.forEach((({_el:t,_show:l,_size:n,_pos:i,side:o})=>{if(null!=t)if(l){let l=o%2==1;S(t,l?"left":"top",i-(3===o||0===o?n:0)),S(t,l?"width":"height",n),S(t,l?"top":"left",l?Tt:zt),S(t,l?"height":"width",l?Dt:St),M(t,e)}else y(t,e)})),wl=_l=kl=vl=yl=Ml=El=Ol=bl=null,Nl=1,Bn(!0),ti("setSize"),Ot=!1),yt>0&&Mt>0&&(O.clearRect(0,0,I.width,I.height),ti("drawClear"),Se.forEach((e=>e())),ti("draw")),il.show&&Jt&&(Nn(null,!0,!1),Jt=!1),F||(F=!0,k.status=1,ti("ready")),Bl=!1,wn=!1}function bn(e,t){let l=he[e];if(null==l.from){if(0==pl){let n=l.range(k,t.min,t.max,e);t.min=n[0],t.max=n[1]}if(t.min>t.max){let e=t.min;t.min=t.max,t.max=e}if(pl>1&&null!=t.min&&null!=t.max&&1e-16>t.max-t.min)return;e==ve&&2==l.distr&&pl>0&&(t.min=G(t.min,m[0]),t.max=G(t.max,m[0]),t.min==t.max&&t.max++),Oe[e]=t,It=!0,_n()}}k.redraw=(e,t)=>{Nt=t||!1,!1!==e?Tn(ve,He.min,He.max):_n()},k.setScale=bn;let vn=!1;const yn=il.drag;let Mn=yn.x,Sn=yn.y;il.show&&(il.x&&(sn=D("u-cursor-x",K)),il.y&&(rn=D("u-cursor-y",K)),0==He.ori?(un=sn,an=rn):(un=rn,an=sn),gn=il.left,xn=il.top);const En=k.select=Fe({show:!0,over:!0,left:0,width:0,top:0,height:0},u.select),Dn=En.show?D("u-select",En.over?K:V):null;function zn(e,t){if(En.show){for(let t in e)S(Dn,t,En[t]=e[t]);!1!==t&&ti("setSelect")}}function Tn(e,t,l){bn(e,{min:t,max:l})}function Pn(t,l,n,i){null!=l.focus&&function(e){if(e!=Yn){let t=null==e,l=1!=ol.alpha;re.forEach(((n,i)=>{let o=t||0==i||i==e;n._focus=t?null:o,l&&function(e,t){re[e].alpha=t,il.show&&ul[e]&&(ul[e].style.opacity=t),Ze&<[e]&&(lt[e].style.opacity=t)}(i,o?1:ol.alpha)})),Yn=e,l&&_n()}}(t),null!=l.show&&re.forEach(((n,i)=>{0>=i||t!=i&&null!=t||(n.show=l.show,function(t){let l=Ze?lt[t]:null;re[t].show?l&&M(l,e):(l&&y(l,e),ul.length>1&&T(ul[t],-10,-10,St,Dt))}(i),Tn(2==v?n.facets[1].scale:n.scale,null,null),_n())})),!1!==n&&ti("setSeries",t,l),i&&oi("setSeries",k,t,l)}let An,Wn,Yn;k.setSelect=zn,k.setSeries=Pn,k.addBand=function(e,t){e.fill=me(e.fill||null),e.dir=q(e.dir,-1),ge.splice(t=null==t?ge.length:t,0,e)},k.setBand=function(e,t){Fe(ge[e],t)},k.delBand=function(e){null==e?ge.length=0:ge.splice(e,1)};const Cn={focus:!0};function Fn(e,t,l){let n=he[t];l&&(e=e/b-(1==n.ori?Tt:zt));let i=St;1==n.ori&&(i=Dt,e=i-e),-1==n.dir&&(e=i-e);let o=n._min,s=o+e/i*(n._max-o),r=n.distr;return 3==r?se(10,s):4==r?((e,t=1)=>X.sinh(e)*t)(s,n.asinh):s}function Hn(e,t){S(Dn,s,En.left=e),S(Dn,l,En.width=t)}function Rn(e,t){S(Dn,i,En.top=e),S(Dn,n,En.height=t)}Ze&&sl&&H(d,et,(()=>{il._lock||null!=Yn&&Pn(null,Cn,!0,li.setSeries)})),k.valToIdx=e=>G(e,m[0]),k.posToIdx=function(e,t){return G(Fn(e,ve,t),m[0],ml,gl)},k.posToVal=Fn,k.valToPos=(e,t,l)=>0==he[t].ori?P(e,he[t],l?Wt:St,l?Pt:0):W(e,he[t],l?Lt:Dt,l?At:0),k.batch=function(e){e(k),_n()},k.setCursor=(e,t,l)=>{gn=e.left,xn=e.top,Nn(null,t,l)};let Gn=0==He.ori?Hn:Rn,Ln=1==He.ori?Hn:Rn;function In(e,t){if(null!=e){let t=e.idx;Ke.idx=t,re.forEach(((e,l)=>{(l>0||!it)&&On(l,t)}))}Ze&&Ke.live&&function(){if(Ze&&Ke.live)for(let e=2==v?1:0;re.length>e;e++){if(0==e&&it)continue;let t=Ke.values[e],l=0;for(let n in t)nt[e][l++].firstChild.nodeValue=t[n]}}(),Qt=!1,!1!==t&&ti("setLegend")}function On(e,t){let l;if(null==t)l=ot;else{let n=re[e],i=0==e&&2==Ge?jl:m[e];l=it?n.values(k,e,t):{_:n.value(k,i[t],e,t)}}Ke.values[e]=l}function Nn(e,t,l){let n;pn=gn,mn=xn,[gn,xn]=il.move(k,gn,xn),il.show&&(un&&T(un,le(gn),0,St,Dt),an&&T(an,0,le(xn),St,Dt)),An=ce;let i=0==He.ori?St:Dt,o=1==He.ori?St:Dt;if(0>gn||0==pl||ml>gl){n=null;for(let e=0;re.length>e;e++)e>0&&ul.length>1&&T(ul[e],-10,-10,St,Dt);if(sl&&Pn(null,Cn,!0,null==e&&li.setSeries),Ke.live){qe.fill(null),Qt=!0;for(let e=0;re.length>e;e++)Ke.values[e]=ot}}else{let e,t,l;1==v&&(e=0==He.ori?gn:xn,t=Fn(e,ve),n=G(t,m[0],ml,gl),l=be(Le(m[0][n],He,i,0),.5));for(let e=2==v?1:0;re.length>e;e++){let s=re[e],r=qe[e],u=1==v?m[e][r]:m[e][1][r],a=il.dataIdx(k,e,n,t),f=1==v?m[e][a]:m[e][1][a];Qt=Qt||f!=u||a!=r,qe[e]=a;let c=a==n?l:be(Le(1==v?m[0][a]:m[e][0][a],He,i,0),.5);if(e>0&&s.show){let t,l,n=null==f?-10:be(Ie(f,1==v?he[s.scale]:he[s.facets[1].scale],o,0),.5);if(n>0&&1==v){let t=ee(n-xn);t>An||(An=t,Wn=e)}if(0==He.ori?(t=c,l=n):(t=n,l=c),Qt&&ul.length>1){A(ul[e],il.points.fill(k,e),il.points.stroke(k,e));let n,i,o,s,r=!0,u=il.points.bbox;if(null!=u){r=!1;let t=u(k,e);o=t.left,s=t.top,n=t.width,i=t.height}else o=t,s=l,n=i=il.points.size(k,e);Y(ul[e],n,i,r),T(ul[e],o,s,St,Dt)}}if(Ke.live){if(!Qt||0==e&&it)continue;On(e,a)}}}if(il.idx=n,il.left=gn,il.top=xn,Qt&&(Ke.idx=n,In()),En.show&&vn)if(null!=e){let[t,l]=li.scales,[n,s]=li.match,[r,u]=e.cursor.sync.scales,a=e.cursor.drag;if(Mn=a._x,Sn=a._y,Mn||Sn){let a,f,c,h,d,{left:p,top:m,width:g,height:x}=e.select,w=e.scales[t].ori,_=e.posToVal,k=null!=t&&n(t,r),b=null!=l&&s(l,u);k&&Mn?(0==w?(a=p,f=g):(a=m,f=x),c=he[t],h=Le(_(a,r),c,i,0),d=Le(_(a+f,r),c,i,0),Gn(ie(h,d),ee(d-h))):Gn(0,i),b&&Sn?(1==w?(a=p,f=g):(a=m,f=x),c=he[l],h=Ie(_(a,u),c,o,0),d=Ie(_(a+f,u),c,o,0),Ln(ie(h,d),ee(d-h))):Ln(0,o)}else qn()}else{let e=ee(pn-fn),t=ee(mn-cn);if(1==He.ori){let l=e;e=t,t=l}Mn=yn.x&&e>=yn.dist,Sn=yn.y&&t>=yn.dist;let l,n,s=yn.uni;null!=s?Mn&&Sn&&(Mn=e>=s,Sn=t>=s,Mn||Sn||(t>e?Sn=!0:Mn=!0)):yn.x&&yn.y&&(Mn||Sn)&&(Mn=Sn=!0),Mn&&(0==He.ori?(l=hn,n=gn):(l=dn,n=xn),Gn(ie(l,n),ee(n-l)),Sn||Ln(0,o)),Sn&&(1==He.ori?(l=hn,n=gn):(l=dn,n=xn),Ln(ie(l,n),ee(n-l)),Mn||Gn(0,i)),Mn||Sn||(Gn(0,0),Ln(0,0))}if(yn._x=Mn,yn._y=Sn,null==e){if(l){if(null!=ni){let[e,t]=li.scales;li.values[0]=null!=e?Fn(0==He.ori?gn:xn,e):null,li.values[1]=null!=t?Fn(1==He.ori?gn:xn,t):null}oi(a,k,gn,xn,St,Dt,n)}if(sl){let e=l&&li.setSeries,t=ol.prox;null==Yn?An>t||Pn(Wn,Cn,!0,e):An>t?Pn(null,Cn,!0,e):Wn!=Yn&&Pn(Wn,Cn,!0,e)}}F&&!1!==t&&ti("setCursor")}k.setLegend=In;let jn=null;function Bn(e){!0===e?jn=null:(jn=K.getBoundingClientRect(),ti("syncRect",jn))}function Un(e,t,l,n,i,o){il._lock||(Vn(e,t,l,n,i,o,0,!1,null!=e),null!=e?Nn(null,!0,!0):Nn(t,!0,!1))}function Vn(e,t,l,n,i,o,s,r,u){if(null==jn&&Bn(!1),null!=e)l=e.clientX-jn.left,n=e.clientY-jn.top;else{if(0>l||0>n)return gn=-10,void(xn=-10);let[e,s]=li.scales,r=t.cursor.sync,[u,a]=r.values,[f,c]=r.scales,[h,d]=li.match,p=t.axes[0].side%2==1,m=0==He.ori?St:Dt,g=1==He.ori?St:Dt,x=p?o:i,w=p?i:o,_=p?n:l,k=p?l:n;if(l=null!=f?h(e,f)?C(u,he[e],m,0):-10:m*(_/x),n=null!=c?d(s,c)?C(a,he[s],g,0):-10:g*(k/w),1==He.ori){let e=l;l=n,n=e}}u&&(l>1&&St-1>l||(l=de(l,St)),n>1&&Dt-1>n||(n=de(n,Dt))),r?(fn=l,cn=n,[hn,dn]=il.move(k,l,n)):(gn=l,xn=n)}const Jn={width:0,height:0};function qn(){zn(Jn,!1)}function Kn(e,t,l,n,i,o){vn=!0,Mn=Sn=yn._x=yn._y=!1,Vn(e,t,l,n,i,o,0,!0,!1),null!=e&&(ft(c,w,Zn),oi(f,k,hn,dn,St,Dt,null))}function Zn(e,t,l,n,i,o){vn=yn._x=yn._y=!1,Vn(e,t,l,n,i,o,0,!1,!0);let{left:s,top:r,width:u,height:a}=En,f=u>0||a>0;if(f&&zn(En),yn.setScale&&f){let e=s,t=u,l=r,n=a;if(1==He.ori&&(e=r,t=a,l=s,n=u),Mn&&Tn(ve,Fn(e,ve),Fn(e+t,ve)),Sn)for(let e in he){let t=he[e];e!=ve&&null==t.from&&t.min!=ce&&Tn(e,Fn(l+n,e),Fn(l,e))}qn()}else il.lock&&(il._lock=!il._lock,il._lock||Nn(null,!0,!1));null!=e&&(_t(c,w),oi(c,k,gn,xn,St,Dt,null))}function $n(e){Vl(),qn(),null!=e&&oi(p,k,gn,xn,St,Dt,null)}function Xn(){ae.forEach(Il),tl(k.width,k.height,!0)}H(g,_,Xn);const Qn={};Qn.mousedown=Kn,Qn.mousemove=Un,Qn.mouseup=Zn,Qn.dblclick=$n,Qn.setSeries=(e,t,l,n)=>{Pn(l,n,!0,!1)},il.show&&(ft(f,K,Kn),ft(a,K,Un),ft(h,K,Bn),ft(d,K,(function(){if(!il._lock){let e=vn;if(vn){let e,t,l=!0,n=!0,i=10;0==He.ori?(e=Mn,t=Sn):(e=Sn,t=Mn),e&&t&&(l=i>=gn||gn>=St-i,n=i>=xn||xn>=Dt-i),e&&l&&(gn=hn>gn?0:St),t&&n&&(xn=dn>xn?0:Dt),Nn(null,!0,!0),vn=!1}gn=-10,xn=-10,Nn(null,!0,!0),e&&(vn=e)}})),ft(p,K,$n),Sl.add(k),k.syncRect=Bn);const ei=k.hooks=u.hooks||{};function ti(e,t,l){e in ei&&ei[e].forEach((e=>{e.call(null,k,t,l)}))}(u.plugins||[]).forEach((e=>{for(let t in e.hooks)ei[t]=(ei[t]||[]).concat(e.hooks[t])}));const li=Fe({key:null,setSeries:!1,filters:{pub:_e,sub:_e},scales:[ve,re[1]?re[1].scale:null],match:[ke,ke],values:[null,null]},il.sync);il.sync=li;const ni=li.key,ii=el(ni);function oi(e,t,l,n,i,o,s){li.filters.pub(e,t,l,n,i,o,s)&&ii.pub(e,t,l,n,i,o,s)}function si(){ti("init",u,m),Ul(m||u.data,!1),Oe[ve]?bn(ve,Oe[ve]):Vl(),tl(u.width,u.height),Nn(null,!0,!1),zn(En,!1)}return ii.sub(k),k.pub=function(e,t,l,n,i,o,s){li.filters.sub(e,t,l,n,i,o,s)&&Qn[e](null,t,l,n,i,o,s)},k.destroy=function(){ii.unsub(k),Sl.delete(k),at.clear(),R(g,_,Xn),L.remove(),ti("destroy")},re.forEach(al),ae.forEach((function(e,t){if(e._show=e.show,e.show){let l=e.side%2,n=he[e.scale];null==n&&(e.scale=l?re[1].scale:ve,n=he[e.scale]);let i=n.time;e.size=me(e.size),e.space=me(e.space),e.rotate=me(e.rotate),e.incrs=me(e.incrs||(2==n.distr?Xe:i?1==te?ct:pt:Qe)),e.splits=me(e.splits||(i&&1==n.distr?Be:3==n.distr?Rt:4==n.distr?Gt:Ht)),e.stroke=me(e.stroke),e.grid.stroke=me(e.grid.stroke),e.ticks.stroke=me(e.ticks.stroke),e.border.stroke=me(e.border.stroke);let o=e.values;e.values=Pe(o)&&!Pe(o[0])?me(o):i?Pe(o)?wt(Ne,xt(o,je)):Ae(o)?function(e,t){let l=Ve(t);return(t,n)=>n.map((t=>l(e(t))))}(Ne,o):o||Ue:o||Ft,e.filter=me(e.filter||(3>n.distr?xe:jt)),e.font=Ll(e.font),e.labelFont=Ll(e.labelFont),e._size=e.size(k,null,t,0),e._space=e._rotate=e._incrs=e._found=e._splits=e._values=null,e._size>0&&(fl[t]=!0,e._el=D("u-axis",U))}})),x?x instanceof HTMLElement?(x.appendChild(L),si()):x(k,si):si(),k}Ol.assign=Fe,Ol.fmtNum=$,Ol.rangeNum=J,Ol.rangeLog=N,Ol.rangeAsinh=j,Ol.orient=tl,Ol.pxRatio=b,Ol.join=function(e,t){let l=new Set;for(let t=0;e.length>t;t++){let n=e[t][0],i=n.length;for(let e=0;i>e;e++)l.add(n[e])}let n=[Array.from(l).sort(((e,t)=>e-t))],i=n[0].length,o=new Map;for(let e=0;i>e;e++)o.set(n[0][e],e);for(let l=0;e.length>l;l++){let s=e[l],r=s[0];for(let e=1;s.length>e;e++){let u=s[e],a=Array(i).fill(void 0),f=t?t[l][e]:1,c=[];for(let e=0;u.length>e;e++){let t=u[e],l=o.get(r[e]);null===t?0!=f&&(a[l]=t,2==f&&c.push(l)):a[l]=t}He(a,c,i),n.push(a)}}return n},Ol.fmtDate=Ve,Ol.tzDate=function(e,t){let l;return"UTC"==t||"Etc/UTC"==t?l=new Date(+e+6e4*e.getTimezoneOffset()):t==Je?l=e:(l=new Date(e.toLocaleString("en-US",{timeZone:t})),l.setMilliseconds(e.getMilliseconds())),l},Ol.sync=el;{Ol.addGap=function(e,t,l){let n=e[e.length-1];n&&n[0]==t?n[1]=l:e.push([t,l])},Ol.clipGaps=ol;let e=Ol.paths={points:_l};e.linear=yl,e.stepped=function(e){const t=q(e.align,1),l=q(e.ascDesc,!1),n=q(e.alignGaps,0);return(e,i,o,s)=>tl(e,i,((r,u,a,f,c,h,d,p,m,g,x)=>{let w=r.pxRound,_=e=>w(h(e,f,g,p)),k=e=>w(d(e,c,x,m)),v=0==f.ori?cl:hl;const y={stroke:new Path2D,fill:null,clip:null,band:null,gaps:null,flags:1},M=y.stroke,S=f.dir*(0==f.ori?1:-1);o=L(a,o,s,1),s=L(a,o,s,-1);let E=k(a[1==S?o:s]),D=_(u[1==S?o:s]),z=D;v(M,D,E);for(let e=1==S?o:s;e>=o&&s>=e;e+=S){let l=a[e];if(null==l)continue;let n=_(u[e]),i=k(l);1==t?v(M,n,E):v(M,z,i),v(M,n,i),E=i,z=n}let[T,P]=ll(e,i);if(null!=r.fill||0!=T){let t=y.fill=new Path2D(M),l=k(r.fillTo(e,i,r.min,r.max,T));v(t,z,l),v(t,D,l)}if(!r.spanGaps){let c=[];c.push(...sl(u,a,o,s,S,_,n));let h=r.width*b/2,d=l||1==t?h:-h,w=l||-1==t?-h:h;c.forEach((e=>{e[0]+=d,e[1]+=w})),y.gaps=c=r.gaps(e,i,o,s,c),y.clip=ol(c,f.ori,p,m,g,x)}return 0!=P&&(y.band=2==P?[il(e,i,o,s,M,-1),il(e,i,o,s,M,1)]:il(e,i,o,s,M,P)),y}))},e.bars=function(e){const t=q((e=e||De).size,[.6,ce,1]),l=e.align||0,n=(e.gap||0)*b,i=q(e.radius,0),o=1-t[0],s=q(t[1],ce)*b,r=q(t[2],1)*b,u=q(e.disp,De),a=q(e.each,(()=>{})),{fill:f,stroke:c}=u;return(e,t,h,d)=>tl(e,t,((p,m,g,x,w,_,k,v,y,M,S)=>{let E=p.pxRound;const D=x.dir*(0==x.ori?1:-1),z=w.dir*(1==w.ori?1:-1);let T,P,A=0==x.ori?dl:pl,W=0==x.ori?a:(e,t,l,n,i,o,s)=>{a(e,t,l,i,n,s,o)},[Y,C]=ll(e,t),F=3==w.distr?1==Y?w.max:w.min:0,H=k(F,w,S,y),R=E(p.width*b),G=!1,L=null,I=null,O=null,N=null;null==f||0!=R&&null==c||(G=!0,L=f.values(e,t,h,d),I=new Map,new Set(L).forEach((e=>{null!=e&&I.set(e,new Path2D)})),R>0&&(O=c.values(e,t,h,d),N=new Map,new Set(O).forEach((e=>{null!=e&&N.set(e,new Path2D)}))));let{x0:j,size:B}=u;if(null!=j&&null!=B){m=j.values(e,t,h,d),2==j.unit&&(m=m.map((t=>e.posToVal(v+t*M,x.key,!0))));let l=B.values(e,t,h,d);P=2==B.unit?l[0]*M:_(l[0],x,M,v)-_(0,x,M,v),P=E(P-R),T=1==D?-R/2:P+R/2}else{let e=M;if(m.length>1){let t=null;for(let l=0,n=1/0;m.length>l;l++)if(void 0!==g[l]){if(null!=t){let i=ee(m[l]-m[t]);n>i&&(n=i,e=ee(_(m[l],x,M,v)-_(m[t],x,M,v)))}t=l}}P=E(ie(s,oe(r,e-e*o))-R-n),T=(0==l?P/2:l==D?0:P)-l*D*n/2}const U={stroke:null,fill:null,clip:null,band:null,gaps:null,flags:3};let V;0!=C&&(U.band=new Path2D,V=E(k(1==C?w.max:w.min,w,S,y)));const J=G?null:new Path2D,K=U.band;let{y0:Z,y1:$}=u,X=null;null!=Z&&null!=$&&(g=$.values(e,t,h,d),X=Z.values(e,t,h,d));for(let l=1==D?h:d;l>=h&&d>=l;l+=D){let n=g[l],o=_(2!=x.distr||null!=u?m[l]:l,x,M,v),s=k(q(n,F),w,S,y);null!=X&&null!=n&&(H=k(X[l],w,S,y));let r=E(o-T),a=E(oe(s,H)),f=E(ie(s,H)),c=a-f,h=i*P;null!=n&&(G?(R>0&&null!=O[l]&&A(N.get(O[l]),r,f+te(R/2),P,oe(0,c-R),h),null!=L[l]&&A(I.get(L[l]),r,f+te(R/2),P,oe(0,c-R),h)):A(J,r,f+te(R/2),P,oe(0,c-R),h),W(e,t,l,r-R/2,f,P+R,c)),0!=C&&(z*C==1?(a=f,f=V):(f=a,a=V),c=a-f,A(K,r-R/2,f,P+R,oe(0,c),0))}return R>0&&(U.stroke=G?N:J),U.fill=G?I:J,U}))},e.spline=function(e){return function(e,t){const l=q(t?.alignGaps,0);return(t,n,i,o)=>tl(t,n,((s,r,u,a,f,c,h,d,p,m,g)=>{let x,w,_,k=s.pxRound,b=e=>k(c(e,a,m,d)),v=e=>k(h(e,f,g,p));0==a.ori?(x=al,_=cl,w=xl):(x=fl,_=hl,w=wl);const y=a.dir*(0==a.ori?1:-1);i=L(u,i,o,1),o=L(u,i,o,-1);let M=b(r[1==y?i:o]),S=M,E=[],D=[];for(let e=1==y?i:o;e>=i&&o>=e;e+=y)if(null!=u[e]){let t=b(r[e]);E.push(S=t),D.push(v(u[e]))}const z={stroke:e(E,D,x,_,w,k),fill:null,clip:null,band:null,gaps:null,flags:1},T=z.stroke;let[P,A]=ll(t,n);if(null!=s.fill||0!=P){let e=z.fill=new Path2D(T),l=v(s.fillTo(t,n,s.min,s.max,P));_(e,S,l),_(e,M,l)}if(!s.spanGaps){let e=[];e.push(...sl(r,u,i,o,y,b,l)),z.gaps=e=s.gaps(t,n,i,o,e),z.clip=ol(e,a.ori,d,p,m,g)}return 0!=A&&(z.band=2==A?[il(t,n,i,o,T,-1),il(t,n,i,o,T,1)]:il(t,n,i,o,T,A)),z}))}(Ml,e)}}return Ol}(); diff --git a/programs/static-files-disk-uploader/static-files-disk-uploader.cpp b/programs/static-files-disk-uploader/static-files-disk-uploader.cpp index 07c066b0d59..786ae1f559f 100644 --- a/programs/static-files-disk-uploader/static-files-disk-uploader.cpp +++ b/programs/static-files-disk-uploader/static-files-disk-uploader.cpp @@ -160,7 +160,7 @@ try if (options.empty() || options.count("help")) { std::cout << description << std::endl; - exit(0); + exit(0); // NOLINT(concurrency-mt-unsafe) } String metadata_path; diff --git a/programs/su/su.cpp b/programs/su/su.cpp index 0979abf353d..95e7a0b709c 100644 --- a/programs/su/su.cpp +++ b/programs/su/su.cpp @@ -108,7 +108,7 @@ try if (argc < 3) { std::cout << "Usage: ./clickhouse su user:group ..." << std::endl; - exit(0); + exit(0); // NOLINT(concurrency-mt-unsafe) } std::string_view user_and_group = argv[1]; diff --git a/src/AggregateFunctions/AggregateFunctionFactory.cpp b/src/AggregateFunctions/AggregateFunctionFactory.cpp index 766d8af0d8c..a8385ad8b59 100644 --- a/src/AggregateFunctions/AggregateFunctionFactory.cpp +++ b/src/AggregateFunctions/AggregateFunctionFactory.cpp @@ -176,11 +176,6 @@ AggregateFunctionPtr AggregateFunctionFactory::getImpl( /// storage stores AggregateFunction(uniqCombinedIf) and in SELECT you /// need to filter aggregation result based on another column. -#if defined(UNBUNDLED) -#pragma GCC diagnostic push -#pragma GCC diagnostic ignored "-Wstringop-overread" -#endif - if (!combinator->supportsNesting() && nested_name.ends_with(combinator_name)) { throw Exception(ErrorCodes::ILLEGAL_AGGREGATION, @@ -188,10 +183,6 @@ AggregateFunctionPtr AggregateFunctionFactory::getImpl( combinator_name); } -#if defined(UNBUNDLED) -#pragma GCC diagnostic pop -#endif - DataTypes nested_types = combinator->transformArguments(argument_types); Array nested_parameters = combinator->transformParameters(parameters); diff --git a/src/Backups/BackupEntryFromAppendOnlyFile.cpp b/src/Backups/BackupEntryFromAppendOnlyFile.cpp index fa816091bdf..910b6b6507e 100644 --- a/src/Backups/BackupEntryFromAppendOnlyFile.cpp +++ b/src/Backups/BackupEntryFromAppendOnlyFile.cpp @@ -5,16 +5,6 @@ namespace DB { -BackupEntryFromAppendOnlyFile::BackupEntryFromAppendOnlyFile( - const String & file_path_, - const std::optional & file_size_, - const std::optional & checksum_, - const std::shared_ptr & temporary_file_) - : BackupEntryFromImmutableFile(file_path_, file_size_, checksum_, temporary_file_) - , limit(BackupEntryFromImmutableFile::getSize()) -{ -} - BackupEntryFromAppendOnlyFile::BackupEntryFromAppendOnlyFile( const DiskPtr & disk_, const String & file_path_, diff --git a/src/Backups/BackupEntryFromAppendOnlyFile.h b/src/Backups/BackupEntryFromAppendOnlyFile.h index d868f82d45f..c6055b86268 100644 --- a/src/Backups/BackupEntryFromAppendOnlyFile.h +++ b/src/Backups/BackupEntryFromAppendOnlyFile.h @@ -11,13 +11,8 @@ namespace DB class BackupEntryFromAppendOnlyFile : public BackupEntryFromImmutableFile { public: - /// The constructor is allowed to not set `file_size_` or `checksum_`, in that case it will be calculated from the data. - explicit BackupEntryFromAppendOnlyFile( - const String & file_path_, - const std::optional & file_size_ = {}, - const std::optional & checksum_ = {}, - const std::shared_ptr & temporary_file_ = {}); + /// The constructor is allowed to not set `file_size_` or `checksum_`, in that case it will be calculated from the data. BackupEntryFromAppendOnlyFile( const DiskPtr & disk_, const String & file_path_, diff --git a/src/Backups/BackupEntryFromImmutableFile.cpp b/src/Backups/BackupEntryFromImmutableFile.cpp index 088324f364a..86b9c13fb9a 100644 --- a/src/Backups/BackupEntryFromImmutableFile.cpp +++ b/src/Backups/BackupEntryFromImmutableFile.cpp @@ -2,20 +2,12 @@ #include #include #include +#include namespace DB { -BackupEntryFromImmutableFile::BackupEntryFromImmutableFile( - const String & file_path_, - const std::optional & file_size_, - const std::optional & checksum_, - const std::shared_ptr & temporary_file_) - : file_path(file_path_), file_size(file_size_), checksum(checksum_), temporary_file(temporary_file_) -{ -} - BackupEntryFromImmutableFile::BackupEntryFromImmutableFile( const DiskPtr & disk_, const String & file_path_, @@ -32,16 +24,24 @@ UInt64 BackupEntryFromImmutableFile::getSize() const { std::lock_guard lock{get_file_size_mutex}; if (!file_size) - file_size = disk ? disk->getFileSize(file_path) : Poco::File(file_path).getSize(); + file_size = disk->getFileSize(file_path); return *file_size; } std::unique_ptr BackupEntryFromImmutableFile::getReadBuffer() const { - if (disk) - return disk->readFile(file_path); - else - return createReadBufferFromFileBase(file_path, /* settings= */ {}); + return disk->readFile(file_path); +} + + +DataSourceDescription BackupEntryFromImmutableFile::getDataSourceDescription() const +{ + return disk->getDataSourceDescription(); +} + +String BackupEntryFromImmutableFile::getFilePath() const +{ + return file_path; } } diff --git a/src/Backups/BackupEntryFromImmutableFile.h b/src/Backups/BackupEntryFromImmutableFile.h index 5103518c873..99241c691cb 100644 --- a/src/Backups/BackupEntryFromImmutableFile.h +++ b/src/Backups/BackupEntryFromImmutableFile.h @@ -4,8 +4,6 @@ #include #include -namespace Poco { class TemporaryFile; } - namespace DB { class TemporaryFileOnDisk; @@ -16,13 +14,8 @@ using DiskPtr = std::shared_ptr; class BackupEntryFromImmutableFile : public IBackupEntry { public: - /// The constructor is allowed to not set `file_size_` or `checksum_`, in that case it will be calculated from the data. - explicit BackupEntryFromImmutableFile( - const String & file_path_, - const std::optional & file_size_ = {}, - const std::optional & checksum_ = {}, - const std::shared_ptr & temporary_file_ = {}); + /// The constructor is allowed to not set `file_size_` or `checksum_`, in that case it will be calculated from the data. BackupEntryFromImmutableFile( const DiskPtr & disk_, const String & file_path_, @@ -36,8 +29,10 @@ public: std::optional getChecksum() const override { return checksum; } std::unique_ptr getReadBuffer() const override; - String getFilePath() const { return file_path; } - DiskPtr getDisk() const { return disk; } + String getFilePath() const override; + DataSourceDescription getDataSourceDescription() const override; + + DiskPtr tryGetDiskIfExists() const override { return disk; } private: const DiskPtr disk; @@ -45,7 +40,6 @@ private: mutable std::optional file_size TSA_GUARDED_BY(get_file_size_mutex); mutable std::mutex get_file_size_mutex; const std::optional checksum; - const std::shared_ptr temporary_file; const std::shared_ptr temporary_file_on_disk; }; diff --git a/src/Backups/BackupEntryFromMemory.h b/src/Backups/BackupEntryFromMemory.h index 2226112c9c3..df3b9de40e3 100644 --- a/src/Backups/BackupEntryFromMemory.h +++ b/src/Backups/BackupEntryFromMemory.h @@ -19,6 +19,18 @@ public: std::optional getChecksum() const override { return checksum; } std::unique_ptr getReadBuffer() const override; + String getFilePath() const override + { + return ""; + } + + DataSourceDescription getDataSourceDescription() const override + { + return DataSourceDescription{DataSourceType::RAM, "", false, false}; + } + + DiskPtr tryGetDiskIfExists() const override { return nullptr; } + private: const String data; const std::optional checksum; diff --git a/src/Backups/BackupEntryFromSmallFile.cpp b/src/Backups/BackupEntryFromSmallFile.cpp index ca13fcd8f1e..d24b3a6498d 100644 --- a/src/Backups/BackupEntryFromSmallFile.cpp +++ b/src/Backups/BackupEntryFromSmallFile.cpp @@ -36,4 +36,5 @@ BackupEntryFromSmallFile::BackupEntryFromSmallFile( : BackupEntryFromMemory(readFile(disk_, file_path_), checksum_), disk(disk_), file_path(file_path_) { } + } diff --git a/src/Backups/BackupEntryFromSmallFile.h b/src/Backups/BackupEntryFromSmallFile.h index 298c4fa8f77..99e319f07a0 100644 --- a/src/Backups/BackupEntryFromSmallFile.h +++ b/src/Backups/BackupEntryFromSmallFile.h @@ -23,9 +23,9 @@ public: const String & file_path_, const std::optional & checksum_ = {}); - String getFilePath() const { return file_path; } - DiskPtr getDisk() const { return disk; } + String getFilePath() const override { return file_path; } + DiskPtr tryGetDiskIfExists() const override { return disk; } private: const DiskPtr disk; const String file_path; diff --git a/src/Backups/BackupIO.cpp b/src/Backups/BackupIO.cpp new file mode 100644 index 00000000000..3641ab9ac7b --- /dev/null +++ b/src/Backups/BackupIO.cpp @@ -0,0 +1,27 @@ +#include + +#include +#include +#include + +namespace DB +{ + +namespace ErrorCodes +{ + extern const int NOT_IMPLEMENTED; +} + +void IBackupWriter::copyFileThroughBuffer(std::unique_ptr && source, const String & file_name) +{ + auto write_buffer = writeFile(file_name); + copyData(*source, *write_buffer); + write_buffer->finalize(); +} + +void IBackupWriter::copyFileNative(DiskPtr /* from_disk */, const String & /* file_name_from */, const String & /* file_name_to */) +{ + throw Exception(ErrorCodes::NOT_IMPLEMENTED, "Native copy not implemented for backup writer"); +} + +} diff --git a/src/Backups/BackupIO.h b/src/Backups/BackupIO.h index 389df97502a..8c400fca023 100644 --- a/src/Backups/BackupIO.h +++ b/src/Backups/BackupIO.h @@ -1,6 +1,8 @@ #pragma once #include +#include +#include namespace DB { @@ -15,6 +17,7 @@ public: virtual bool fileExists(const String & file_name) = 0; virtual UInt64 getFileSize(const String & file_name) = 0; virtual std::unique_ptr readFile(const String & file_name) = 0; + virtual DataSourceDescription getDataSourceDescription() const = 0; }; /// Represents operations of storing to disk or uploading for writing a backup. @@ -27,6 +30,15 @@ public: virtual bool fileContentsEqual(const String & file_name, const String & expected_file_contents) = 0; virtual std::unique_ptr writeFile(const String & file_name) = 0; virtual void removeFiles(const Strings & file_names) = 0; + virtual DataSourceDescription getDataSourceDescription() const = 0; + virtual void copyFileThroughBuffer(std::unique_ptr && source, const String & file_name); + + virtual bool supportNativeCopy(DataSourceDescription /* data_source_description */) const + { + return false; + } + + virtual void copyFileNative(DiskPtr from_disk, const String & file_name_from, const String & file_name_to); }; } diff --git a/src/Backups/BackupIO_Disk.cpp b/src/Backups/BackupIO_Disk.cpp index 22a86825387..4fcea94087c 100644 --- a/src/Backups/BackupIO_Disk.cpp +++ b/src/Backups/BackupIO_Disk.cpp @@ -6,6 +6,12 @@ namespace DB { + +namespace ErrorCodes +{ + extern const int LOGICAL_ERROR; +} + BackupReaderDisk::BackupReaderDisk(const DiskPtr & disk_, const String & path_) : disk(disk_), path(path_) { } @@ -77,4 +83,28 @@ void BackupWriterDisk::removeFiles(const Strings & file_names) disk->removeDirectory(path); } +DataSourceDescription BackupWriterDisk::getDataSourceDescription() const +{ + return disk->getDataSourceDescription(); +} + +DataSourceDescription BackupReaderDisk::getDataSourceDescription() const +{ + return disk->getDataSourceDescription(); +} + +bool BackupWriterDisk::supportNativeCopy(DataSourceDescription data_source_description) const +{ + return data_source_description == disk->getDataSourceDescription(); +} + +void BackupWriterDisk::copyFileNative(DiskPtr from_disk, const String & file_name_from, const String & file_name_to) +{ + if (!from_disk) + throw Exception(ErrorCodes::LOGICAL_ERROR, "Cannot natively copy data to disk without source disk"); + auto file_path = path / file_name_to; + disk->createDirectories(file_path.parent_path()); + from_disk->copyFile(file_name_from, *disk, file_path); +} + } diff --git a/src/Backups/BackupIO_Disk.h b/src/Backups/BackupIO_Disk.h index 53412e6d219..fd37691eeb7 100644 --- a/src/Backups/BackupIO_Disk.h +++ b/src/Backups/BackupIO_Disk.h @@ -17,6 +17,7 @@ public: bool fileExists(const String & file_name) override; UInt64 getFileSize(const String & file_name) override; std::unique_ptr readFile(const String & file_name) override; + DataSourceDescription getDataSourceDescription() const override; private: DiskPtr disk; @@ -34,7 +35,11 @@ public: bool fileContentsEqual(const String & file_name, const String & expected_file_contents) override; std::unique_ptr writeFile(const String & file_name) override; void removeFiles(const Strings & file_names) override; + DataSourceDescription getDataSourceDescription() const override; + bool supportNativeCopy(DataSourceDescription data_source_description) const override; + + void copyFileNative(DiskPtr from_disk, const String & file_name_from, const String & file_name_to) override; private: DiskPtr disk; std::filesystem::path path; diff --git a/src/Backups/BackupIO_File.cpp b/src/Backups/BackupIO_File.cpp index 7c08c150474..8c043d49ff8 100644 --- a/src/Backups/BackupIO_File.cpp +++ b/src/Backups/BackupIO_File.cpp @@ -1,6 +1,8 @@ #include #include #include +#include +#include namespace fs = std::filesystem; @@ -78,4 +80,55 @@ void BackupWriterFile::removeFiles(const Strings & file_names) fs::remove(path); } +DataSourceDescription BackupWriterFile::getDataSourceDescription() const +{ + DataSourceDescription data_source_description; + + data_source_description.type = DataSourceType::Local; + + if (auto block_device_id = tryGetBlockDeviceId(path); block_device_id.has_value()) + data_source_description.description = *block_device_id; + else + data_source_description.description = path; + data_source_description.is_encrypted = false; + data_source_description.is_cached = false; + + return data_source_description; +} + +DataSourceDescription BackupReaderFile::getDataSourceDescription() const +{ + DataSourceDescription data_source_description; + + data_source_description.type = DataSourceType::Local; + + if (auto block_device_id = tryGetBlockDeviceId(path); block_device_id.has_value()) + data_source_description.description = *block_device_id; + else + data_source_description.description = path; + data_source_description.is_encrypted = false; + data_source_description.is_cached = false; + + return data_source_description; +} + + +bool BackupWriterFile::supportNativeCopy(DataSourceDescription data_source_description) const +{ + return data_source_description == getDataSourceDescription(); +} + +void BackupWriterFile::copyFileNative(DiskPtr from_disk, const String & file_name_from, const String & file_name_to) +{ + auto file_path = path / file_name_to; + fs::create_directories(file_path.parent_path()); + std::string abs_source_path; + if (from_disk) + abs_source_path = fullPath(from_disk, file_name_from); + else + abs_source_path = fs::absolute(file_name_from); + + fs::copy(abs_source_path, file_path, fs::copy_options::recursive | fs::copy_options::overwrite_existing); +} + } diff --git a/src/Backups/BackupIO_File.h b/src/Backups/BackupIO_File.h index 5d37408e6d8..425a019c71a 100644 --- a/src/Backups/BackupIO_File.h +++ b/src/Backups/BackupIO_File.h @@ -9,12 +9,13 @@ namespace DB class BackupReaderFile : public IBackupReader { public: - BackupReaderFile(const String & path_); + explicit BackupReaderFile(const String & path_); ~BackupReaderFile() override; bool fileExists(const String & file_name) override; UInt64 getFileSize(const String & file_name) override; std::unique_ptr readFile(const String & file_name) override; + DataSourceDescription getDataSourceDescription() const override; private: std::filesystem::path path; @@ -23,7 +24,7 @@ private: class BackupWriterFile : public IBackupWriter { public: - BackupWriterFile(const String & path_); + explicit BackupWriterFile(const String & path_); ~BackupWriterFile() override; bool fileExists(const String & file_name) override; @@ -31,6 +32,10 @@ public: bool fileContentsEqual(const String & file_name, const String & expected_file_contents) override; std::unique_ptr writeFile(const String & file_name) override; void removeFiles(const Strings & file_names) override; + DataSourceDescription getDataSourceDescription() const override; + bool supportNativeCopy(DataSourceDescription data_source_description) const override; + + void copyFileNative(DiskPtr from_disk, const String & file_name_from, const String & file_name_to) override; private: std::filesystem::path path; diff --git a/src/Backups/BackupImpl.cpp b/src/Backups/BackupImpl.cpp index 263aab2bd50..f6442545f48 100644 --- a/src/Backups/BackupImpl.cpp +++ b/src/Backups/BackupImpl.cpp @@ -111,6 +111,22 @@ public: UInt64 getSize() const override { return size; } std::optional getChecksum() const override { return checksum; } + String getFilePath() const override + { + return data_file_name; + } + + DiskPtr tryGetDiskIfExists() const override + { + return nullptr; + } + + DataSourceDescription getDataSourceDescription() const override + { + return backup->reader->getDataSourceDescription(); + } + + private: const std::shared_ptr backup; const String archive_suffix; @@ -587,9 +603,86 @@ BackupEntryPtr BackupImpl::readFile(const SizeAndChecksum & size_and_checksum) c } } +namespace +{ + +std::optional getInfoAboutFileFromBaseBackupIfExists(std::shared_ptr base_backup, const std::string & file_path) +{ + if (base_backup && base_backup->fileExists(file_path)) + return std::pair{base_backup->getFileSize(file_path), base_backup->getFileChecksum(file_path)}; + + return std::nullopt; +} + +enum class CheckBackupResult +{ + HasPrefix, + HasFull, + HasNothing, +}; + +CheckBackupResult checkBaseBackupForFile(const SizeAndChecksum & base_backup_info, const FileInfo & new_entry_info) +{ + /// We cannot reuse base backup because our file is smaller + /// than file stored in previous backup + if (new_entry_info.size > base_backup_info.first) + return CheckBackupResult::HasNothing; + + if (base_backup_info.first == new_entry_info.size) + return CheckBackupResult::HasFull; + + return CheckBackupResult::HasPrefix; + +} + +struct ChecksumsForNewEntry +{ + UInt128 full_checksum; + UInt128 prefix_checksum; +}; + +/// Calculate checksum for backup entry if it's empty. +/// Also able to calculate additional checksum of some prefix. +ChecksumsForNewEntry calculateNewEntryChecksumsIfNeeded(BackupEntryPtr entry, size_t prefix_size) +{ + if (prefix_size > 0) + { + auto read_buffer = entry->getReadBuffer(); + HashingReadBuffer hashing_read_buffer(*read_buffer); + hashing_read_buffer.ignore(prefix_size); + auto prefix_checksum = hashing_read_buffer.getHash(); + if (entry->getChecksum() == std::nullopt) + { + hashing_read_buffer.ignoreAll(); + auto full_checksum = hashing_read_buffer.getHash(); + return ChecksumsForNewEntry{full_checksum, prefix_checksum}; + } + else + { + return ChecksumsForNewEntry{*(entry->getChecksum()), prefix_checksum}; + } + } + else + { + if (entry->getChecksum() == std::nullopt) + { + auto read_buffer = entry->getReadBuffer(); + HashingReadBuffer hashing_read_buffer(*read_buffer); + hashing_read_buffer.ignoreAll(); + return ChecksumsForNewEntry{hashing_read_buffer.getHash(), 0}; + } + else + { + return ChecksumsForNewEntry{*(entry->getChecksum()), 0}; + } + } +} + +} void BackupImpl::writeFile(const String & file_name, BackupEntryPtr entry) { + std::lock_guard lock{mutex}; if (open_mode != OpenMode::WRITE) throw Exception("Backup is not opened for writing", ErrorCodes::LOGICAL_ERROR); @@ -597,164 +690,179 @@ void BackupImpl::writeFile(const String & file_name, BackupEntryPtr entry) if (writing_finalized) throw Exception("Backup is already finalized", ErrorCodes::LOGICAL_ERROR); + std::string from_file_name = "memory buffer"; + if (auto fname = entry->getFilePath(); !fname.empty()) + from_file_name = "file " + fname; + LOG_TRACE(log, "Writing backup for file {} from file {}", file_name, from_file_name); + auto adjusted_path = removeLeadingSlash(file_name); if (coordination->getFileInfo(adjusted_path)) throw Exception( ErrorCodes::BACKUP_ENTRY_ALREADY_EXISTS, "Backup {}: Entry {} already exists", backup_name, quoteString(file_name)); - FileInfo info; - info.file_name = adjusted_path; - size_t size = entry->getSize(); - info.size = size; + FileInfo info + { + .file_name = adjusted_path, + .size = entry->getSize(), + .base_size = 0, + .base_checksum = 0, + }; - /// Check if the entry's data is empty. - if (!info.size) + /// Empty file, nothing to backup + if (info.size == 0) { coordination->addFileInfo(info); return; } - /// Maybe we have a copy of this file in the backup already. - std::optional checksum = entry->getChecksum(); - if (checksum && coordination->getFileInfo(std::pair{size, *checksum})) - { - info.checksum = *checksum; - coordination->addFileInfo(info); - return; - } + std::optional base_backup_file_info = getInfoAboutFileFromBaseBackupIfExists(base_backup, adjusted_path); - /// Check if a entry with such name exists in the base backup. - bool base_exists = (base_backup && base_backup->fileExists(adjusted_path)); - UInt64 base_size = 0; - UInt128 base_checksum{0, 0}; - if (base_exists) + /// We have info about this file in base backup + /// If file has no checksum -- calculate and fill it. + if (base_backup_file_info.has_value()) { - base_size = base_backup->getFileSize(adjusted_path); - base_checksum = base_backup->getFileChecksum(adjusted_path); - } + LOG_TRACE(log, "File {} found in base backup, checking for equality", adjusted_path); + CheckBackupResult check_base = checkBaseBackupForFile(*base_backup_file_info, info); - std::unique_ptr read_buffer; /// We'll set that later. - std::optional hashing_read_buffer; - UInt64 hashing_pos = 0; /// Current position in `hashing_read_buffer`. - - /// Determine whether it's possible to receive this entry's data from the base backup completely or partly. - bool use_base = false; - if (base_exists && base_size && (size >= base_size)) - { - if (checksum && (size == base_size)) + /// File with the same name but smaller size exist in previous backup + if (check_base == CheckBackupResult::HasPrefix) { - /// The size is the same, we need to compare checksums to find out - /// if the entry's data has not changed since the base backup. - use_base = (*checksum == base_checksum); + auto checksums = calculateNewEntryChecksumsIfNeeded(entry, base_backup_file_info->first); + info.checksum = checksums.full_checksum; + + /// We have prefix of this file in backup with the same checksum. + /// In ClickHouse this can happen for StorageLog for example. + if (checksums.prefix_checksum == base_backup_file_info->second) + { + LOG_TRACE(log, "File prefix of {} in base backup, will write rest part of file to current backup", adjusted_path); + info.base_size = base_backup_file_info->first; + info.base_checksum = base_backup_file_info->second; + } + else + { + LOG_TRACE(log, "Prefix checksum of file {} doesn't match with checksum in base backup", adjusted_path); + } } else { - /// The size has increased, we need to calculate a partial checksum to find out - /// if the entry's data has only appended since the base backup. - read_buffer = entry->getReadBuffer(); - hashing_read_buffer.emplace(*read_buffer); - hashing_read_buffer->ignore(base_size); - hashing_pos = base_size; - UInt128 partial_checksum = hashing_read_buffer->getHash(); - if (size == base_size) - checksum = partial_checksum; - if (partial_checksum == base_checksum) - use_base = true; + /// We have full file or have nothing, first of all let's get checksum + /// of current file + auto checksums = calculateNewEntryChecksumsIfNeeded(entry, 0); + info.checksum = checksums.full_checksum; + + if (info.checksum == base_backup_file_info->second) + { + LOG_TRACE(log, "Found whole file {} in base backup", adjusted_path); + assert(check_base == CheckBackupResult::HasFull); + assert(info.size == base_backup_file_info->first); + + info.base_size = base_backup_file_info->first; + info.base_checksum = base_backup_file_info->second; + /// Actually we can add this info to coordination and exist, + /// but we intentionally don't do it, otherwise control flow + /// of this function will be very complex. + } + else + { + LOG_TRACE(log, "Whole file {} in base backup doesn't match by checksum", adjusted_path); + } } } - - /// Finish calculating the checksum. - if (!checksum) + else /// We don't have info about this file_name (sic!) in base backup, + /// however file could be renamed, so we will check one more time using size and checksum { - if (!read_buffer) - read_buffer = entry->getReadBuffer(); - if (!hashing_read_buffer) - hashing_read_buffer.emplace(*read_buffer); - hashing_read_buffer->ignore(size - hashing_pos); - checksum = hashing_read_buffer->getHash(); + + LOG_TRACE(log, "Nothing found for file {} in base backup", adjusted_path); + auto checksums = calculateNewEntryChecksumsIfNeeded(entry, 0); + info.checksum = checksums.full_checksum; } - hashing_read_buffer.reset(); - info.checksum = *checksum; /// Maybe we have a copy of this file in the backup already. - if (coordination->getFileInfo(std::pair{size, *checksum})) + if (coordination->getFileInfo(std::pair{info.size, info.checksum})) { + LOG_TRACE(log, "File {} already exist in current backup, adding reference", adjusted_path); coordination->addFileInfo(info); return; } - /// Check if a entry with the same checksum exists in the base backup. - if (base_backup && !use_base && base_backup->fileExists(std::pair{size, *checksum})) + /// On the previous lines we checked that backup for file with adjusted_name exist in previous backup. + /// However file can be renamed, but has the same size and checksums, let's check for this case. + if (base_backup && base_backup->fileExists(std::pair{info.size, info.checksum})) { - /// The entry's data has not changed since the base backup, - /// but the entry itself has been moved or renamed. - base_size = size; - base_checksum = *checksum; - use_base = true; - } - if (use_base) - { - info.base_size = base_size; - info.base_checksum = base_checksum; - } + LOG_TRACE(log, "File {} doesn't exist in current backup, but we have file with same size and checksum", adjusted_path); + info.base_size = info.size; + info.base_checksum = info.checksum; - if (use_base && (size == base_size)) - { - /// The entry's data has not been changed since the base backup. coordination->addFileInfo(info); return; } - bool is_data_file_required; + /// All "short paths" failed. We don't have this file in previous or existing backup + /// or have only prefix of it in previous backup. Let's go long path. + info.data_file_name = info.file_name; info.archive_suffix = current_archive_suffix; + + bool is_data_file_required; coordination->addFileInfo(info, is_data_file_required); if (!is_data_file_required) - return; /// We copy data only if it's a new combination of size & checksum. - - /// Either the entry wasn't exist in the base backup - /// or the entry has data appended to the end of the data from the base backup. - /// In both those cases we have to copy data to this backup. - - /// Find out where the start position to copy data is. - auto copy_pos = use_base ? base_size : 0; - - /// Move the current read position to the start position to copy data. - if (!read_buffer) - read_buffer = entry->getReadBuffer(); - read_buffer->seek(copy_pos, SEEK_SET); - - if (!num_files_written) - checkLockFile(true); - - /// Copy the entry's data after `copy_pos`. - std::unique_ptr out; - if (use_archives) { - String archive_suffix = current_archive_suffix; - bool next_suffix = false; - if (current_archive_suffix.empty() && is_internal_backup) - next_suffix = true; - /*if (archive_params.max_volume_size && current_archive_writer - && (current_archive_writer->getTotalSize() + size - base_size > archive_params.max_volume_size)) - next_suffix = true;*/ - if (next_suffix) - current_archive_suffix = coordination->getNextArchiveSuffix(); - if (info.archive_suffix != current_archive_suffix) - { - info.archive_suffix = current_archive_suffix; - coordination->updateFileInfo(info); - } - out = getArchiveWriter(current_archive_suffix)->writeFile(info.data_file_name); + LOG_TRACE(log, "File {} doesn't exist in current backup, but we have file with same size and checksum", adjusted_path); + return; /// We copy data only if it's a new combination of size & checksum. + } + auto writer_description = writer->getDataSourceDescription(); + auto reader_description = entry->getDataSourceDescription(); + + /// We need to copy whole file without archive, we can do it faster + /// if source and destination are compatible + if (!use_archives && info.base_size == 0 && writer->supportNativeCopy(reader_description)) + { + + LOG_TRACE(log, "Will copy file {} using native copy", adjusted_path); + /// Should be much faster than writing data through server + writer->copyFileNative(entry->tryGetDiskIfExists(), entry->getFilePath(), info.data_file_name); } else { - out = writer->writeFile(info.data_file_name); + LOG_TRACE(log, "Will copy file {} through memory buffers", adjusted_path); + auto read_buffer = entry->getReadBuffer(); + + /// If we have prefix in base we will seek to the start of the suffix which differs + if (info.base_size != 0) + read_buffer->seek(info.base_size, SEEK_SET); + + if (!num_files_written) + checkLockFile(true); + + if (use_archives) + { + LOG_TRACE(log, "Adding file {} to archive", adjusted_path); + String archive_suffix = current_archive_suffix; + bool next_suffix = false; + if (current_archive_suffix.empty() && is_internal_backup) + next_suffix = true; + /*if (archive_params.max_volume_size && current_archive_writer + && (current_archive_writer->getTotalSize() + size - base_size > archive_params.max_volume_size)) + next_suffix = true;*/ + if (next_suffix) + current_archive_suffix = coordination->getNextArchiveSuffix(); + + if (info.archive_suffix != current_archive_suffix) + { + info.archive_suffix = current_archive_suffix; + coordination->updateFileInfo(info); + } + auto out = getArchiveWriter(current_archive_suffix)->writeFile(info.data_file_name); + copyData(*read_buffer, *out); + out->finalize(); + } + else + { + writer->copyFileThroughBuffer(std::move(read_buffer), info.data_file_name); + } } - copyData(*read_buffer, *out); - out->finalize(); ++num_files_written; } diff --git a/src/Backups/BackupsWorker.cpp b/src/Backups/BackupsWorker.cpp index add23411d8f..a310ee90a38 100644 --- a/src/Backups/BackupsWorker.cpp +++ b/src/Backups/BackupsWorker.cpp @@ -90,7 +90,8 @@ namespace } catch (...) { - coordination->setError(current_host, Exception{getCurrentExceptionCode(), getCurrentExceptionMessage(true, true)}); + if (coordination) + coordination->setError(current_host, Exception{getCurrentExceptionCode(), getCurrentExceptionMessage(true, true)}); } } @@ -164,9 +165,9 @@ OperationID BackupsWorker::startMakingBackup(const ASTPtr & query, const Context backup_coordination = makeBackupCoordination(backup_settings.coordination_zk_path, context, backup_settings.internal); } + auto backup_info = BackupInfo::fromAST(*backup_query->backup_name); try { - auto backup_info = BackupInfo::fromAST(*backup_query->backup_name); addInfo(backup_id, backup_info.toString(), backup_settings.internal, BackupStatus::CREATING_BACKUP); /// Prepare context to use. @@ -213,6 +214,7 @@ OperationID BackupsWorker::startMakingBackup(const ASTPtr & query, const Context } catch (...) { + tryLogCurrentException(log, fmt::format("Failed to start {} {}", (backup_settings.internal ? "internal backup" : "backup"), backup_info.toString())); /// Something bad happened, the backup has not built. setStatusSafe(backup_id, BackupStatus::BACKUP_FAILED); sendCurrentExceptionToCoordination(backup_coordination, backup_settings.host_id); diff --git a/src/Backups/IBackupEntriesLazyBatch.cpp b/src/Backups/IBackupEntriesLazyBatch.cpp index 5fb3a4cb3c0..78086015e7b 100644 --- a/src/Backups/IBackupEntriesLazyBatch.cpp +++ b/src/Backups/IBackupEntriesLazyBatch.cpp @@ -20,6 +20,20 @@ public: UInt64 getSize() const override { return getInternalBackupEntry()->getSize(); } std::optional getChecksum() const override { return getInternalBackupEntry()->getChecksum(); } std::unique_ptr getReadBuffer() const override { return getInternalBackupEntry()->getReadBuffer(); } + String getFilePath() const override + { + return getInternalBackupEntry()->getFilePath(); + } + + DiskPtr tryGetDiskIfExists() const override + { + return getInternalBackupEntry()->tryGetDiskIfExists(); + } + + DataSourceDescription getDataSourceDescription() const override + { + return getInternalBackupEntry()->getDataSourceDescription(); + } private: BackupEntryPtr getInternalBackupEntry() const diff --git a/src/Backups/IBackupEntry.h b/src/Backups/IBackupEntry.h index 04f7dc61475..2a71a1e9756 100644 --- a/src/Backups/IBackupEntry.h +++ b/src/Backups/IBackupEntry.h @@ -4,6 +4,8 @@ #include #include #include +#include +#include namespace DB { @@ -24,6 +26,12 @@ public: /// Returns a read buffer for reading the data. virtual std::unique_ptr getReadBuffer() const = 0; + + virtual String getFilePath() const = 0; + + virtual DiskPtr tryGetDiskIfExists() const = 0; + + virtual DataSourceDescription getDataSourceDescription() const = 0; }; using BackupEntryPtr = std::shared_ptr; diff --git a/src/Backups/registerBackupEnginesFileAndDisk.cpp b/src/Backups/registerBackupEnginesFileAndDisk.cpp index 380ae36a8e3..69af37fc6ff 100644 --- a/src/Backups/registerBackupEnginesFileAndDisk.cpp +++ b/src/Backups/registerBackupEnginesFileAndDisk.cpp @@ -46,7 +46,7 @@ namespace void checkPath(const String & disk_name, const DiskPtr & disk, fs::path & path) { path = path.lexically_normal(); - if (!path.is_relative() && (disk->getType() == DiskType::Local)) + if (!path.is_relative() && (disk->getDataSourceDescription().type == DataSourceType::Local)) path = path.lexically_proximate(disk->getPath()); bool path_ok = path.empty() || (path.is_relative() && (*path.begin() != "..")); diff --git a/src/Bridge/IBridge.cpp b/src/Bridge/IBridge.cpp index d30e0a4c853..824ab23a882 100644 --- a/src/Bridge/IBridge.cpp +++ b/src/Bridge/IBridge.cpp @@ -179,7 +179,7 @@ void IBridge::initialize(Application & self) limit.rlim_max = limit.rlim_cur = gb; if (setrlimit(RLIMIT_RSS, &limit)) LOG_WARNING(log, "Unable to set maximum RSS to 1GB: {} (current rlim_cur={}, rlim_max={})", - errnoToString(errno), limit.rlim_cur, limit.rlim_max); + errnoToString(), limit.rlim_cur, limit.rlim_max); if (!getrlimit(RLIMIT_RSS, &limit)) LOG_INFO(log, "RSS limit: cur={}, max={}", limit.rlim_cur, limit.rlim_max); diff --git a/src/Client/ClientBase.cpp b/src/Client/ClientBase.cpp index c399f01c565..348ea4ece11 100644 --- a/src/Client/ClientBase.cpp +++ b/src/Client/ClientBase.cpp @@ -1952,7 +1952,7 @@ void ClientBase::runInteractive() if (home_path.empty()) { - const char * home_path_cstr = getenv("HOME"); + const char * home_path_cstr = getenv("HOME"); // NOLINT(concurrency-mt-unsafe) if (home_path_cstr) home_path = home_path_cstr; } @@ -1962,7 +1962,7 @@ void ClientBase::runInteractive() history_file = config().getString("history_file"); else { - auto * history_file_from_env = getenv("CLICKHOUSE_HISTORY_FILE"); + auto * history_file_from_env = getenv("CLICKHOUSE_HISTORY_FILE"); // NOLINT(concurrency-mt-unsafe) if (history_file_from_env) history_file = history_file_from_env; else if (!home_path.empty()) @@ -2260,13 +2260,13 @@ void ClientBase::init(int argc, char ** argv) if (options.count("version") || options.count("V")) { showClientVersion(); - exit(0); + exit(0); // NOLINT(concurrency-mt-unsafe) } if (options.count("version-clean")) { std::cout << VERSION_STRING; - exit(0); + exit(0); // NOLINT(concurrency-mt-unsafe) } /// Output of help message. @@ -2274,7 +2274,7 @@ void ClientBase::init(int argc, char ** argv) || (options.count("host") && options["host"].as() == "elp")) /// If user writes -help instead of --help. { printHelpMessage(options_description); - exit(0); + exit(0); // NOLINT(concurrency-mt-unsafe) } /// Common options for clickhouse-client and clickhouse-local. diff --git a/src/Columns/ColumnConst.h b/src/Columns/ColumnConst.h index 21cfaf7f64c..99a230720a4 100644 --- a/src/Columns/ColumnConst.h +++ b/src/Columns/ColumnConst.h @@ -269,7 +269,7 @@ public: bool isFixedAndContiguous() const override { return data->isFixedAndContiguous(); } bool valuesHaveFixedSize() const override { return data->valuesHaveFixedSize(); } size_t sizeOfValueIfFixed() const override { return data->sizeOfValueIfFixed(); } - StringRef getRawData() const override { return data->getRawData(); } + std::string_view getRawData() const override { return data->getRawData(); } /// Not part of the common interface. diff --git a/src/Columns/ColumnDecimal.h b/src/Columns/ColumnDecimal.h index 03875121637..afae2cd641a 100644 --- a/src/Columns/ColumnDecimal.h +++ b/src/Columns/ColumnDecimal.h @@ -71,9 +71,9 @@ public: data.resize_assume_reserved(data.size() - n); } - StringRef getRawData() const override + std::string_view getRawData() const override { - return StringRef(reinterpret_cast(data.data()), byteSize()); + return {reinterpret_cast(data.data()), byteSize()}; } StringRef getDataAt(size_t n) const override diff --git a/src/Columns/ColumnFixedString.h b/src/Columns/ColumnFixedString.h index 711db056ee6..7c2d9b1a155 100644 --- a/src/Columns/ColumnFixedString.h +++ b/src/Columns/ColumnFixedString.h @@ -209,7 +209,7 @@ public: bool isFixedAndContiguous() const override { return true; } size_t sizeOfValueIfFixed() const override { return n; } - StringRef getRawData() const override { return StringRef(chars.data(), chars.size()); } + std::string_view getRawData() const override { return {reinterpret_cast(chars.data()), chars.size()}; } /// Specialized part of interface, not from IColumn. void insertString(const String & string) { insertData(string.c_str(), string.size()); } diff --git a/src/Columns/ColumnVector.h b/src/Columns/ColumnVector.h index 6ba9abaca32..88e953891cc 100644 --- a/src/Columns/ColumnVector.h +++ b/src/Columns/ColumnVector.h @@ -332,9 +332,9 @@ public: bool isFixedAndContiguous() const override { return true; } size_t sizeOfValueIfFixed() const override { return sizeof(T); } - StringRef getRawData() const override + std::string_view getRawData() const override { - return StringRef(reinterpret_cast(data.data()), byteSize()); + return {reinterpret_cast(data.data()), byteSize()}; } StringRef getDataAt(size_t n) const override diff --git a/src/Columns/IColumn.h b/src/Columns/IColumn.h index 8b693015674..974925d247e 100644 --- a/src/Columns/IColumn.h +++ b/src/Columns/IColumn.h @@ -507,7 +507,7 @@ public: [[nodiscard]] virtual bool isFixedAndContiguous() const { return false; } /// If isFixedAndContiguous, returns the underlying data array, otherwise throws an exception. - [[nodiscard]] virtual StringRef getRawData() const { throw Exception("Column " + getName() + " is not a contiguous block of memory", ErrorCodes::NOT_IMPLEMENTED); } + [[nodiscard]] virtual std::string_view getRawData() const { throw Exception("Column " + getName() + " is not a contiguous block of memory", ErrorCodes::NOT_IMPLEMENTED); } /// If valuesHaveFixedSize, returns size of value, otherwise throw an exception. [[nodiscard]] virtual size_t sizeOfValueIfFixed() const { throw Exception("Values of column " + getName() + " are not fixed size.", ErrorCodes::CANNOT_GET_SIZE_OF_FIELD); } diff --git a/src/Common/ColumnsHashing.h b/src/Common/ColumnsHashing.h index 0e4617345fa..54446950567 100644 --- a/src/Common/ColumnsHashing.h +++ b/src/Common/ColumnsHashing.h @@ -41,12 +41,12 @@ struct HashMethodOneNumber /// If the keys of a fixed length then key_sizes contains their lengths, empty otherwise. HashMethodOneNumber(const ColumnRawPtrs & key_columns, const Sizes & /*key_sizes*/, const HashMethodContextPtr &) { - vec = key_columns[0]->getRawData().data; + vec = key_columns[0]->getRawData().data(); } explicit HashMethodOneNumber(const IColumn * column) { - vec = column->getRawData().data; + vec = column->getRawData().data(); } /// Creates context. Method is called once and result context is used in all threads. @@ -577,7 +577,7 @@ struct HashMethodKeysFixed columns_data.reset(new const char*[keys_size]); for (size_t i = 0; i < keys_size; ++i) - columns_data[i] = Base::getActualColumns()[i]->getRawData().data; + columns_data[i] = Base::getActualColumns()[i]->getRawData().data(); } #endif } diff --git a/src/Common/Config/ConfigProcessor.cpp b/src/Common/Config/ConfigProcessor.cpp index c0b7b176b33..8d7c26e967f 100644 --- a/src/Common/Config/ConfigProcessor.cpp +++ b/src/Common/Config/ConfigProcessor.cpp @@ -419,7 +419,7 @@ void ConfigProcessor::doIncludesRecursive( XMLDocumentPtr env_document; auto get_env_node = [&](const std::string & name) -> const Node * { - const char * env_val = std::getenv(name.c_str()); + const char * env_val = std::getenv(name.c_str()); // NOLINT(concurrency-mt-unsafe) // this is safe on Linux glibc/Musl, but potentially not safe on other platforms if (env_val == nullptr) return nullptr; diff --git a/src/Common/CurrentMetrics.cpp b/src/Common/CurrentMetrics.cpp index c46342f6ade..eeb8b4e2832 100644 --- a/src/Common/CurrentMetrics.cpp +++ b/src/Common/CurrentMetrics.cpp @@ -29,6 +29,10 @@ M(PostgreSQLConnection, "Number of client connections using PostgreSQL protocol") \ M(OpenFileForRead, "Number of files open for reading") \ M(OpenFileForWrite, "Number of files open for writing") \ + M(TotalTemporaryFiles, "Number of temporary files created") \ + M(TemporaryFilesForSort, "Number of temporary files created for external sorting") \ + M(TemporaryFilesForAggregation, "Number of temporary files created for external aggregation") \ + M(TemporaryFilesForJoin, "Number of temporary files created for JOIN") \ M(Read, "Number of read (read, pread, io_getevents, etc.) syscalls in fly") \ M(Write, "Number of write (write, pwrite, io_getevents, etc.) syscalls in fly") \ M(NetworkReceive, "Number of threads receiving data from network. Only ClickHouse-related network interaction is included, not by 3rd party libraries.") \ diff --git a/src/Common/DNSResolver.cpp b/src/Common/DNSResolver.cpp index 10797b7a809..cf61d2795f0 100644 --- a/src/Common/DNSResolver.cpp +++ b/src/Common/DNSResolver.cpp @@ -86,13 +86,10 @@ static void splitHostAndPort(const std::string & host_and_port, std::string & ou static DNSResolver::IPAddresses hostByName(const std::string & host) { - /// Family: AF_UNSPEC - /// AI_ALL is required for checking if client is allowed to connect from an address - auto flags = Poco::Net::DNS::DNS_HINT_AI_V4MAPPED | Poco::Net::DNS::DNS_HINT_AI_ALL; /// Do not resolve IPv6 (or IPv4) if no local IPv6 (or IPv4) addresses are configured. /// It should not affect client address checking, since client cannot connect from IPv6 address /// if server has no IPv6 addresses. - flags |= Poco::Net::DNS::DNS_HINT_AI_ADDRCONFIG; + auto flags = Poco::Net::DNS::DNS_HINT_AI_ADDRCONFIG; DNSResolver::IPAddresses addresses; diff --git a/src/Common/DateLUT.cpp b/src/Common/DateLUT.cpp index 1206015b764..ae6127670e5 100644 --- a/src/Common/DateLUT.cpp +++ b/src/Common/DateLUT.cpp @@ -30,12 +30,12 @@ std::string determineDefaultTimeZone() { namespace fs = std::filesystem; - const char * tzdir_env_var = std::getenv("TZDIR"); + const char * tzdir_env_var = std::getenv("TZDIR"); // NOLINT(concurrency-mt-unsafe) // ok, because it does not run concurrently with other getenv calls fs::path tz_database_path = tzdir_env_var ? tzdir_env_var : "/usr/share/zoneinfo/"; fs::path tz_file_path; std::string error_prefix; - const char * tz_env_var = std::getenv("TZ"); + const char * tz_env_var = std::getenv("TZ"); // NOLINT(concurrency-mt-unsafe) // ok, because it does not run concurrently with other getenv calls /// In recent tzdata packages some files now are symlinks and canonical path resolution /// may give wrong timezone names - store the name as it is, if possible. diff --git a/src/Common/Documentation.h b/src/Common/Documentation.h new file mode 100644 index 00000000000..0b0eacbeccd --- /dev/null +++ b/src/Common/Documentation.h @@ -0,0 +1,68 @@ +#pragma once + +#include +#include +#include + + +namespace DB +{ + +/** Embedded reference documentation for high-level server components, + * such as SQL functions, table functions, data types, table engines, etc. + * + * The advantages of embedded documentation are: + * - it is easy to write and update with code; + * - it is easy to make sure that the documentation exists; + * - the documentation can be introspected by the queries on running server; + * - the documentation can be extracted by external tools such as SQL editors + * in machine-readable form and presented in human readable form; + * - the documentation can be generated in various formats; + * - it is easy to generate a documentation with information about the version when the feature appeared or changed; + * - it is easy to point to the source code from the documentation; + * - it is easy to point to the tests that covered every component, and order the tests by relevance; + * - it is easy to point to the authors of every feature; + * - the examples from the documentation can be automatically validated to be always correct and serve the role of tests; + * - no cross-team communication impedance; + * + * The disadvantages of embedded documentation are: + * - it is only suitable for uniform components of the system and not suitable for tutorials and overviews; + * - it is more difficult to edit by random readers; + * + * The documentation can contain: + * - description (the main text); + * - examples (queries that can be referenced from the text by names); + * - categories - one or a few text strings like {"Mathematical", "Array Processing"}; + * + * Only the description is mandatory. + * + * The description should be represented in Markdown (or just plaintext). + * Some extensions for Markdown are added: + * - [example:name] will reference to an example with the corresponding name. + * + * Documentation does not support multiple languages. + * The only available language is English. + */ +struct Documentation +{ + using Description = std::string; + using ExampleName = std::string; + using ExampleQuery = std::string; + using Examples = std::map; + using Category = std::string; + using Categories = std::vector; + + Description description; + Examples examples; + Categories categories; + + Documentation(Description description_) : description(std::move(description_)) {} + Documentation(Description description_, Examples examples_) : description(std::move(description_)), examples(std::move(examples_)) {} + Documentation(Description description_, Examples examples_, Categories categories_) + : description(std::move(description_)), examples(std::move(examples_)), categories(std::move(categories_)) {} + + /// TODO: Please remove this constructor. Documentation should always be non-empty. + Documentation() {} +}; + +} diff --git a/src/Common/ErrorCodes.cpp b/src/Common/ErrorCodes.cpp index f8d8deab08b..f65711a8521 100644 --- a/src/Common/ErrorCodes.cpp +++ b/src/Common/ErrorCodes.cpp @@ -635,6 +635,7 @@ M(664, ACCESS_STORAGE_DOESNT_ALLOW_BACKUP) \ M(665, CANNOT_CONNECT_NATS) \ M(666, CANNOT_USE_CACHE) \ + M(667, NOT_INITIALIZED) \ \ M(999, KEEPER_EXCEPTION) \ M(1000, POCO_EXCEPTION) \ diff --git a/src/Common/Exception.cpp b/src/Common/Exception.cpp index 1f5ff35ab4e..3645ac5594f 100644 --- a/src/Common/Exception.cpp +++ b/src/Common/Exception.cpp @@ -152,12 +152,12 @@ Exception::FramePointers Exception::getStackFramePointers() const void throwFromErrno(const std::string & s, int code, int the_errno) { - throw ErrnoException(s + ", " + errnoToString(code, the_errno), code, the_errno); + throw ErrnoException(s + ", " + errnoToString(the_errno), code, the_errno); } void throwFromErrnoWithPath(const std::string & s, const std::string & path, int code, int the_errno) { - throw ErrnoException(s + ", " + errnoToString(code, the_errno), code, the_errno, path); + throw ErrnoException(s + ", " + errnoToString(the_errno), code, the_errno, path); } static void tryLogCurrentExceptionImpl(Poco::Logger * logger, const std::string & start_of_message) diff --git a/src/Common/FileCache.cpp b/src/Common/FileCache.cpp index 0f2c4559177..0ac047e0818 100644 --- a/src/Common/FileCache.cpp +++ b/src/Common/FileCache.cpp @@ -30,12 +30,12 @@ FileCache::FileCache( , max_element_size(cache_settings_.max_elements) , max_file_segment_size(cache_settings_.max_file_segment_size) , allow_persistent_files(cache_settings_.do_not_evict_index_and_mark_files) + , enable_cache_hits_threshold(cache_settings_.enable_cache_hits_threshold) , enable_filesystem_query_cache_limit(cache_settings_.enable_filesystem_query_cache_limit) + , log(&Poco::Logger::get("FileCache")) , main_priority(std::make_unique()) , stash_priority(std::make_unique()) , max_stash_element_size(cache_settings_.max_elements) - , enable_cache_hits_threshold(cache_settings_.enable_cache_hits_threshold) - , log(&Poco::Logger::get("FileCache")) { } @@ -77,132 +77,6 @@ void FileCache::assertInitialized() const throw Exception(ErrorCodes::REMOTE_FS_OBJECT_CACHE_ERROR, "Cache not initialized"); } -FileCache::QueryContextPtr FileCache::getCurrentQueryContext(std::lock_guard & cache_lock) -{ - if (!isQueryInitialized()) - return nullptr; - - return getQueryContext(std::string(CurrentThread::getQueryId()), cache_lock); -} - -FileCache::QueryContextPtr FileCache::getQueryContext(const String & query_id, std::lock_guard & /* cache_lock */) -{ - auto query_iter = query_map.find(query_id); - return (query_iter == query_map.end()) ? nullptr : query_iter->second; -} - -void FileCache::removeQueryContext(const String & query_id) -{ - std::lock_guard cache_lock(mutex); - auto query_iter = query_map.find(query_id); - - if (query_iter == query_map.end()) - { - throw Exception( - ErrorCodes::LOGICAL_ERROR, - "Attempt to release query context that does not exist (query_id: {})", - query_id); - } - - query_map.erase(query_iter); -} - -FileCache::QueryContextPtr FileCache::getOrSetQueryContext( - const String & query_id, const ReadSettings & settings, std::lock_guard & cache_lock) -{ - if (query_id.empty()) - return nullptr; - - auto context = getQueryContext(query_id, cache_lock); - if (context) - return context; - - auto query_context = std::make_shared(settings.max_query_cache_size, settings.skip_download_if_exceeds_query_cache); - auto query_iter = query_map.emplace(query_id, query_context).first; - return query_iter->second; -} - -FileCache::QueryContextHolder FileCache::getQueryContextHolder(const String & query_id, const ReadSettings & settings) -{ - std::lock_guard cache_lock(mutex); - - if (!enable_filesystem_query_cache_limit || settings.max_query_cache_size == 0) - return {}; - - /// if enable_filesystem_query_cache_limit is true, and max_query_cache_size large than zero, - /// we create context query for current query. - auto context = getOrSetQueryContext(query_id, settings, cache_lock); - return QueryContextHolder(query_id, this, context); -} - -void FileCache::QueryContext::remove(const Key & key, size_t offset, size_t size, std::lock_guard & cache_lock) -{ - if (cache_size < size) - throw Exception(ErrorCodes::LOGICAL_ERROR, "Deleted cache size exceeds existing cache size"); - - if (!skip_download_if_exceeds_query_cache) - { - auto record = records.find({key, offset}); - if (record != records.end()) - { - record->second->removeAndGetNext(cache_lock); - records.erase({key, offset}); - } - } - cache_size -= size; -} - -void FileCache::QueryContext::reserve(const Key & key, size_t offset, size_t size, std::lock_guard & cache_lock) -{ - if (cache_size + size > max_cache_size) - { - throw Exception( - ErrorCodes::LOGICAL_ERROR, - "Reserved cache size exceeds the remaining cache size (key: {}, offset: {})", - key.toString(), offset); - } - - if (!skip_download_if_exceeds_query_cache) - { - auto record = records.find({key, offset}); - if (record == records.end()) - { - auto queue_iter = priority->add(key, offset, 0, cache_lock); - record = records.insert({{key, offset}, queue_iter}).first; - } - record->second->incrementSize(size, cache_lock); - } - cache_size += size; -} - -void FileCache::QueryContext::use(const Key & key, size_t offset, std::lock_guard & cache_lock) -{ - if (skip_download_if_exceeds_query_cache) - return; - - auto record = records.find({key, offset}); - if (record != records.end()) - record->second->use(cache_lock); -} - -FileCache::QueryContextHolder::QueryContextHolder( - const String & query_id_, - FileCache * cache_, - FileCache::QueryContextPtr context_) - : query_id(query_id_) - , cache(cache_) - , context(context_) -{ -} - -FileCache::QueryContextHolder::~QueryContextHolder() -{ - /// If only the query_map and the current holder hold the context_query, - /// the query has been completed and the query_context is released. - if (context && context.use_count() == 2) - cache->removeQueryContext(query_id); -} - void FileCache::initialize() { std::lock_guard cache_lock(mutex); @@ -234,9 +108,10 @@ void FileCache::useCell( if (file_segment->isDownloaded() && fs::file_size(getPathInLocalCache(file_segment->key(), file_segment->offset(), file_segment->isPersistent())) == 0) - throw Exception(ErrorCodes::LOGICAL_ERROR, - "Cannot have zero size downloaded file segments. Current file segment: {}", - file_segment->range().toString()); + throw Exception( + ErrorCodes::LOGICAL_ERROR, + "Cannot have zero size downloaded file segments. {}", + file_segment->getInfoForLog()); result.push_back(cell.file_segment); @@ -998,7 +873,7 @@ void FileCache::remove( Key key, size_t offset, std::lock_guard & cache_lock, std::lock_guard & /* segment_lock */) { - LOG_TEST(log, "Remove. Key: {}, offset: {}", key.toString(), offset); + LOG_DEBUG(log, "Remove from cache. Key: {}, offset: {}", key.toString(), offset); auto * cell = getCell(key, offset, cache_lock); if (!cell) @@ -1222,12 +1097,6 @@ size_t FileCache::getUsedCacheSizeUnlocked(std::lock_guard & cache_l return main_priority->getCacheSize(cache_lock); } -size_t FileCache::getAvailableCacheSize() const -{ - std::lock_guard cache_lock(mutex); - return getAvailableCacheSizeUnlocked(cache_lock); -} - size_t FileCache::getAvailableCacheSizeUnlocked(std::lock_guard & cache_lock) const { return max_size - getUsedCacheSizeUnlocked(cache_lock); @@ -1346,4 +1215,130 @@ void FileCache::assertPriorityCorrectness(std::lock_guard & cache_lo assert(main_priority->getElementsNum(cache_lock) <= max_element_size); } +FileCache::QueryContextPtr FileCache::getCurrentQueryContext(std::lock_guard & cache_lock) +{ + if (!isQueryInitialized()) + return nullptr; + + return getQueryContext(std::string(CurrentThread::getQueryId()), cache_lock); +} + +FileCache::QueryContextPtr FileCache::getQueryContext(const String & query_id, std::lock_guard & /* cache_lock */) +{ + auto query_iter = query_map.find(query_id); + return (query_iter == query_map.end()) ? nullptr : query_iter->second; +} + +void FileCache::removeQueryContext(const String & query_id) +{ + std::lock_guard cache_lock(mutex); + auto query_iter = query_map.find(query_id); + + if (query_iter == query_map.end()) + { + throw Exception( + ErrorCodes::LOGICAL_ERROR, + "Attempt to release query context that does not exist (query_id: {})", + query_id); + } + + query_map.erase(query_iter); +} + +FileCache::QueryContextPtr FileCache::getOrSetQueryContext( + const String & query_id, const ReadSettings & settings, std::lock_guard & cache_lock) +{ + if (query_id.empty()) + return nullptr; + + auto context = getQueryContext(query_id, cache_lock); + if (context) + return context; + + auto query_context = std::make_shared(settings.max_query_cache_size, settings.skip_download_if_exceeds_query_cache); + auto query_iter = query_map.emplace(query_id, query_context).first; + return query_iter->second; +} + +FileCache::QueryContextHolder FileCache::getQueryContextHolder(const String & query_id, const ReadSettings & settings) +{ + std::lock_guard cache_lock(mutex); + + if (!enable_filesystem_query_cache_limit || settings.max_query_cache_size == 0) + return {}; + + /// if enable_filesystem_query_cache_limit is true, and max_query_cache_size large than zero, + /// we create context query for current query. + auto context = getOrSetQueryContext(query_id, settings, cache_lock); + return QueryContextHolder(query_id, this, context); +} + +void FileCache::QueryContext::remove(const Key & key, size_t offset, size_t size, std::lock_guard & cache_lock) +{ + if (cache_size < size) + throw Exception(ErrorCodes::LOGICAL_ERROR, "Deleted cache size exceeds existing cache size"); + + if (!skip_download_if_exceeds_query_cache) + { + auto record = records.find({key, offset}); + if (record != records.end()) + { + record->second->removeAndGetNext(cache_lock); + records.erase({key, offset}); + } + } + cache_size -= size; +} + +void FileCache::QueryContext::reserve(const Key & key, size_t offset, size_t size, std::lock_guard & cache_lock) +{ + if (cache_size + size > max_cache_size) + { + throw Exception( + ErrorCodes::LOGICAL_ERROR, + "Reserved cache size exceeds the remaining cache size (key: {}, offset: {})", + key.toString(), offset); + } + + if (!skip_download_if_exceeds_query_cache) + { + auto record = records.find({key, offset}); + if (record == records.end()) + { + auto queue_iter = priority->add(key, offset, 0, cache_lock); + record = records.insert({{key, offset}, queue_iter}).first; + } + record->second->incrementSize(size, cache_lock); + } + cache_size += size; +} + +void FileCache::QueryContext::use(const Key & key, size_t offset, std::lock_guard & cache_lock) +{ + if (skip_download_if_exceeds_query_cache) + return; + + auto record = records.find({key, offset}); + if (record != records.end()) + record->second->use(cache_lock); +} + +FileCache::QueryContextHolder::QueryContextHolder( + const String & query_id_, + FileCache * cache_, + FileCache::QueryContextPtr context_) + : query_id(query_id_) + , cache(cache_) + , context(context_) +{ +} + +FileCache::QueryContextHolder::~QueryContextHolder() +{ + /// If only the query_map and the current holder hold the context_query, + /// the query has been completed and the query_context is released. + if (context && context.use_count() == 2) + cache->removeQueryContext(query_id); +} + } diff --git a/src/Common/FileCache.h b/src/Common/FileCache.h index 5b368329c88..1690690d102 100644 --- a/src/Common/FileCache.h +++ b/src/Common/FileCache.h @@ -23,13 +23,17 @@ namespace DB { /// Local cache for remote filesystem files, represented as a set of non-overlapping non-empty file segments. -/// Different caching algorithms are implemented based on IFileCachePriority. +/// Different caching algorithms are implemented using IFileCachePriority. class FileCache : private boost::noncopyable { - friend class FileSegment; - friend class IFileCachePriority; - friend struct FileSegmentsHolder; - friend class FileSegmentRangeWriter; + +friend class FileSegment; +friend class IFileCachePriority; +friend struct FileSegmentsHolder; +friend class FileSegmentRangeWriter; + +struct QueryContext; +using QueryContextPtr = std::shared_ptr; public: using Key = DB::FileCacheKey; @@ -41,25 +45,8 @@ public: /// Restore cache from local filesystem. void initialize(); - void removeIfExists(const Key & key); - - void removeIfReleasable(); - - static bool isReadOnly(); - - /// Cache capacity in bytes. - size_t capacity() const { return max_size; } - - static Key hash(const String & path); - - String getPathInLocalCache(const Key & key, size_t offset, bool is_persistent) const; - - String getPathInLocalCache(const Key & key) const; - const String & getBasePath() const { return cache_base_path; } - std::vector tryGetCachePaths(const Key & key); - /** * Given an `offset` and `size` representing [offset, offset + size) bytes interval, * return list of cached non-overlapping non-empty @@ -84,6 +71,28 @@ public: */ FileSegmentsHolder get(const Key & key, size_t offset, size_t size); + /// Remove files by `key`. Removes files which might be used at the moment. + void removeIfExists(const Key & key); + + /// Remove files by `key`. Will not remove files which are used at the moment. + void removeIfReleasable(); + + static Key hash(const String & path); + + String getPathInLocalCache(const Key & key, size_t offset, bool is_persistent) const; + + String getPathInLocalCache(const Key & key) const; + + std::vector tryGetCachePaths(const Key & key); + + size_t capacity() const { return max_size; } + + size_t getUsedCacheSize() const; + + size_t getFileSegmentsNum() const; + + static bool isReadOnly(); + /** * Create a file segment of exactly requested size with EMPTY state. * Throw exception if requested size exceeds max allowed file segment size. @@ -102,92 +111,6 @@ public: /// For debug. String dumpStructure(const Key & key); - size_t getUsedCacheSize() const; - - size_t getFileSegmentsNum() const; - -private: - String cache_base_path; - size_t max_size; - size_t max_element_size; - size_t max_file_segment_size; - bool allow_persistent_files; - - bool is_initialized = false; - - mutable std::mutex mutex; - - bool tryReserve(const Key & key, size_t offset, size_t size, std::lock_guard & cache_lock); - - void remove(Key key, size_t offset, std::lock_guard & cache_lock, std::lock_guard & segment_lock); - - bool isLastFileSegmentHolder( - const Key & key, size_t offset, std::lock_guard & cache_lock, std::lock_guard & segment_lock); - - void reduceSizeToDownloaded( - const Key & key, size_t offset, std::lock_guard & cache_lock, std::lock_guard & /* segment_lock */); - - void assertInitialized() const; - - using AccessKeyAndOffset = std::pair; - struct KeyAndOffsetHash - { - std::size_t operator()(const AccessKeyAndOffset & key) const - { - return std::hash()(key.first.key) ^ std::hash()(key.second); - } - }; - - using FileCacheRecords = std::unordered_map; - - /// Used to track and control the cache access of each query. - /// Through it, we can realize the processing of different queries by the cache layer. - struct QueryContext - { - FileCacheRecords records; - FileCachePriorityPtr priority; - - size_t cache_size = 0; - size_t max_cache_size; - - bool skip_download_if_exceeds_query_cache; - - QueryContext(size_t max_cache_size_, bool skip_download_if_exceeds_query_cache_) - : max_cache_size(max_cache_size_), skip_download_if_exceeds_query_cache(skip_download_if_exceeds_query_cache_) - { - } - - void remove(const Key & key, size_t offset, size_t size, std::lock_guard & cache_lock); - - void reserve(const Key & key, size_t offset, size_t size, std::lock_guard & cache_lock); - - void use(const Key & key, size_t offset, std::lock_guard & cache_lock); - - size_t getMaxCacheSize() const { return max_cache_size; } - - size_t getCacheSize() const { return cache_size; } - - FileCachePriorityPtr getPriority() { return priority; } - - bool isSkipDownloadIfExceed() const { return skip_download_if_exceeds_query_cache; } - }; - - using QueryContextPtr = std::shared_ptr; - using QueryContextMap = std::unordered_map; - - QueryContextMap query_map; - - bool enable_filesystem_query_cache_limit; - - QueryContextPtr getCurrentQueryContext(std::lock_guard & cache_lock); - - QueryContextPtr getQueryContext(const String & query_id, std::lock_guard & cache_lock); - - void removeQueryContext(const String & query_id); - - QueryContextPtr getOrSetQueryContext(const String & query_id, const ReadSettings & settings, std::lock_guard &); - -public: /// Save a query context information, and adopt different cache policies /// for different queries through the context cache layer. struct QueryContextHolder : private boost::noncopyable @@ -206,6 +129,43 @@ public: QueryContextHolder getQueryContextHolder(const String & query_id, const ReadSettings & settings); private: + String cache_base_path; + + size_t max_size; + size_t max_element_size; + size_t max_file_segment_size; + + bool allow_persistent_files; + size_t enable_cache_hits_threshold; + bool enable_filesystem_query_cache_limit; + + Poco::Logger * log; + bool is_initialized = false; + + mutable std::mutex mutex; + + bool tryReserve(const Key & key, size_t offset, size_t size, std::lock_guard & cache_lock); + + void remove( + Key key, + size_t offset, + std::lock_guard & cache_lock, + std::lock_guard & segment_lock); + + bool isLastFileSegmentHolder( + const Key & key, + size_t offset, + std::lock_guard & cache_lock, + std::lock_guard & segment_lock); + + void reduceSizeToDownloaded( + const Key & key, + size_t offset, + std::lock_guard & cache_lock, + std::lock_guard & segment_lock); + + void assertInitialized() const; + struct FileSegmentCell : private boost::noncopyable { FileSegmentPtr file_segment; @@ -223,24 +183,30 @@ private: FileSegmentCell(FileSegmentPtr file_segment_, FileCache * cache, std::lock_guard & cache_lock); FileSegmentCell(FileSegmentCell && other) noexcept - : file_segment(std::move(other.file_segment)), queue_iterator(other.queue_iterator) + : file_segment(std::move(other.file_segment)), queue_iterator(std::move(other.queue_iterator)) {} + }; + + using AccessKeyAndOffset = std::pair; + struct KeyAndOffsetHash + { + std::size_t operator()(const AccessKeyAndOffset & key) const { + return std::hash()(key.first.key) ^ std::hash()(key.second); } }; using FileSegmentsByOffset = std::map; using CachedFiles = std::unordered_map; + using FileCacheRecords = std::unordered_map; CachedFiles files; std::unique_ptr main_priority; FileCacheRecords stash_records; std::unique_ptr stash_priority; - size_t max_stash_element_size; - size_t enable_cache_hits_threshold; - Poco::Logger * log; + void loadCacheInfoIntoMemory(std::lock_guard & cache_lock); FileSegments getImpl(const Key & key, const FileSegment::Range & range, std::lock_guard & cache_lock); @@ -257,11 +223,11 @@ private: void useCell(const FileSegmentCell & cell, FileSegments & result, std::lock_guard & cache_lock) const; bool tryReserveForMainList( - const Key & key, size_t offset, size_t size, QueryContextPtr query_context, std::lock_guard & cache_lock); - - size_t getAvailableCacheSize() const; - - void loadCacheInfoIntoMemory(std::lock_guard & cache_lock); + const Key & key, + size_t offset, + size_t size, + QueryContextPtr query_context, + std::lock_guard & cache_lock); FileSegments splitRangeIntoCells( const Key & key, @@ -289,6 +255,48 @@ private: void assertCacheCellsCorrectness(const FileSegmentsByOffset & cells_by_offset, std::lock_guard & cache_lock); + /// Used to track and control the cache access of each query. + /// Through it, we can realize the processing of different queries by the cache layer. + struct QueryContext + { + FileCacheRecords records; + FileCachePriorityPtr priority; + + size_t cache_size = 0; + size_t max_cache_size; + + bool skip_download_if_exceeds_query_cache; + + QueryContext(size_t max_cache_size_, bool skip_download_if_exceeds_query_cache_) + : max_cache_size(max_cache_size_) + , skip_download_if_exceeds_query_cache(skip_download_if_exceeds_query_cache_) {} + + size_t getMaxCacheSize() const { return max_cache_size; } + + size_t getCacheSize() const { return cache_size; } + + FileCachePriorityPtr getPriority() const { return priority; } + + bool isSkipDownloadIfExceed() const { return skip_download_if_exceeds_query_cache; } + + void remove(const Key & key, size_t offset, size_t size, std::lock_guard & cache_lock); + + void reserve(const Key & key, size_t offset, size_t size, std::lock_guard & cache_lock); + + void use(const Key & key, size_t offset, std::lock_guard & cache_lock); + }; + + using QueryContextMap = std::unordered_map; + QueryContextMap query_map; + + QueryContextPtr getCurrentQueryContext(std::lock_guard & cache_lock); + + QueryContextPtr getQueryContext(const String & query_id, std::lock_guard & cache_lock); + + void removeQueryContext(const String & query_id); + + QueryContextPtr getOrSetQueryContext(const String & query_id, const ReadSettings & settings, std::lock_guard &); + public: void assertCacheCorrectness(const Key & key, std::lock_guard & cache_lock); diff --git a/src/Common/FileSegment.cpp b/src/Common/FileSegment.cpp index c2a12b38320..9252d27f754 100644 --- a/src/Common/FileSegment.cpp +++ b/src/Common/FileSegment.cpp @@ -55,6 +55,7 @@ FileSegment::FileSegment( case (State::DOWNLOADED): { reserved_size = downloaded_size = size_; + is_downloaded = true; break; } case (State::SKIP_CACHE): @@ -430,19 +431,20 @@ void FileSegment::completeBatchAndResetDownloader() cv.notify_all(); } -void FileSegment::completeWithState(State state, bool auto_resize) +void FileSegment::completeWithState(State state) { std::lock_guard cache_lock(cache->mutex); std::lock_guard segment_lock(mutex); assertNotDetached(segment_lock); - bool is_downloader = isDownloaderImpl(segment_lock); - if (!is_downloader) + auto caller_id = getCallerId(); + if (caller_id != downloader_id) { - cv.notify_all(); - throw Exception(ErrorCodes::REMOTE_FS_OBJECT_CACHE_ERROR, - "File segment can be completed only by downloader or downloader's FileSegmentsHodler"); + throw Exception( + ErrorCodes::LOGICAL_ERROR, + "File segment completion can be done only by downloader. (CallerId: {}, downloader id: {}", + caller_id, downloader_id); } if (state != State::DOWNLOADED @@ -450,140 +452,48 @@ void FileSegment::completeWithState(State state, bool auto_resize) && state != State::PARTIALLY_DOWNLOADED_NO_CONTINUATION) { cv.notify_all(); - throw Exception(ErrorCodes::REMOTE_FS_OBJECT_CACHE_ERROR, - "Cannot complete file segment with state: {}", stateToString(state)); - } - - if (state == State::DOWNLOADED) - { - if (auto_resize && downloaded_size != range().size()) - { - LOG_TEST(log, "Resize cell {} to downloaded: {}", range().toString(), downloaded_size); - assert(downloaded_size <= range().size()); - segment_range = Range(segment_range.left, segment_range.left + downloaded_size - 1); - } - - /// Update states and finalize cache write buffer. - setDownloaded(segment_lock); - - if (downloaded_size != range().size()) - throw Exception( - ErrorCodes::REMOTE_FS_OBJECT_CACHE_ERROR, - "Cannot complete file segment as DOWNLOADED, because downloaded size ({}) does not match expected size ({})", - downloaded_size, range().size()); + throw Exception( + ErrorCodes::REMOTE_FS_OBJECT_CACHE_ERROR, + "Cannot complete file segment with state: {}", stateToString(state)); } download_state = state; - - try - { - completeImpl(cache_lock, segment_lock); - } - catch (...) - { - if (!downloader_id.empty() && is_downloader) - downloader_id.clear(); - - cv.notify_all(); - throw; - } - - cv.notify_all(); + completeBasedOnCurrentState(cache_lock, segment_lock); } -void FileSegment::completeBasedOnCurrentState(std::lock_guard & cache_lock) +void FileSegment::completeWithoutState(std::lock_guard & cache_lock) { std::lock_guard segment_lock(mutex); + completeBasedOnCurrentState(cache_lock, segment_lock); +} +void FileSegment::completeBasedOnCurrentState(std::lock_guard & cache_lock, std::lock_guard & segment_lock) +{ if (is_detached) return; - assertNotDetached(segment_lock); - - completeBasedOnCurrentStateUnlocked(cache_lock, segment_lock); -} - -void FileSegment::completeBasedOnCurrentStateUnlocked( - std::lock_guard & cache_lock, std::lock_guard & segment_lock) -{ + bool is_downloader = isDownloaderImpl(segment_lock); bool is_last_holder = cache->isLastFileSegmentHolder(key(), offset(), cache_lock, segment_lock); + bool can_update_segment_state = is_downloader || is_last_holder; + size_t current_downloaded_size = getDownloadedSize(segment_lock); - if (is_last_holder && download_state == State::SKIP_CACHE) - { - cache->remove(key(), offset(), cache_lock, segment_lock); - return; - } - - if (download_state == State::SKIP_CACHE || is_detached) - return; - - if (isDownloaderImpl(segment_lock) - && download_state != State::DOWNLOADED - && getDownloadedSize(segment_lock) == range().size()) - { - setDownloaded(segment_lock); - } - - assertNotDetached(segment_lock); - - if (download_state == State::DOWNLOADING || download_state == State::EMPTY) - { - /// Segment state can be changed from DOWNLOADING or EMPTY only if the caller is the - /// downloader or the only owner of the segment. - - bool can_update_segment_state = isDownloaderImpl(segment_lock) || is_last_holder; - - if (can_update_segment_state) - download_state = State::PARTIALLY_DOWNLOADED; - } - - try - { - completeImpl(cache_lock, segment_lock); - } - catch (...) - { - if (!downloader_id.empty() && isDownloaderImpl(segment_lock)) - downloader_id.clear(); - - cv.notify_all(); - throw; - } - - cv.notify_all(); -} - -void FileSegment::completeImpl(std::lock_guard & cache_lock, std::lock_guard & segment_lock) -{ - bool is_last_holder = cache->isLastFileSegmentHolder(key(), offset(), cache_lock, segment_lock); - - if (is_last_holder - && (download_state == State::PARTIALLY_DOWNLOADED || download_state == State::PARTIALLY_DOWNLOADED_NO_CONTINUATION)) - { - size_t current_downloaded_size = getDownloadedSize(segment_lock); - if (current_downloaded_size == 0) + SCOPE_EXIT({ + if (is_downloader) { - download_state = State::SKIP_CACHE; - LOG_TEST(log, "Remove cell {} (nothing downloaded)", range().toString()); - cache->remove(key(), offset(), cache_lock, segment_lock); + cv.notify_all(); } + }); + + LOG_TEST(log, "Complete without state (is_last_holder: {}). File segment info: {}", is_last_holder, getInfoForLogImpl(segment_lock)); + + if (can_update_segment_state) + { + if (current_downloaded_size == range().size()) + setDownloaded(segment_lock); else - { - /** - * Only last holder of current file segment can resize the cell, - * because there is an invariant that file segments returned to users - * in FileSegmentsHolder represent a contiguous range, so we can resize - * it only when nobody needs it. - */ - download_state = State::PARTIALLY_DOWNLOADED_NO_CONTINUATION; - /// Resize this file segment by creating a copy file segment with DOWNLOADED state, - /// but current file segment should remain PARRTIALLY_DOWNLOADED_NO_CONTINUATION and with detached state, - /// because otherwise an invariant that getOrSet() returns a contiguous range of file segments will be broken - /// (this will be crucial for other file segment holder, not for current one). - cache->reduceSizeToDownloaded(key(), offset(), cache_lock, segment_lock); - } + download_state = State::PARTIALLY_DOWNLOADED; - markAsDetached(segment_lock); + resetDownloaderImpl(segment_lock); if (cache_writer) { @@ -593,10 +503,62 @@ void FileSegment::completeImpl(std::lock_guard & cache_lock, std::lo } } - if (!downloader_id.empty() && (isDownloaderImpl(segment_lock) || is_last_holder)) + switch (download_state) { - LOG_TEST(log, "Clearing downloader id: {}, current state: {}", downloader_id, stateToString(download_state)); - downloader_id.clear(); + case State::SKIP_CACHE: + { + if (is_last_holder) + cache->remove(key(), offset(), cache_lock, segment_lock); + + return; + } + case State::DOWNLOADED: + { + assert(downloaded_size == range().size()); + assert(is_downloaded); + break; + } + case State::DOWNLOADING: + case State::EMPTY: + { + assert(!is_last_holder); + break; + } + case State::PARTIALLY_DOWNLOADED: + case State::PARTIALLY_DOWNLOADED_NO_CONTINUATION: + { + if (is_last_holder) + { + if (current_downloaded_size == 0) + { + LOG_TEST(log, "Remove cell {} (nothing downloaded)", range().toString()); + + download_state = State::SKIP_CACHE; + cache->remove(key(), offset(), cache_lock, segment_lock); + } + else + { + LOG_TEST(log, "Resize cell {} to downloaded: {}", range().toString(), current_downloaded_size); + + /** + * Only last holder of current file segment can resize the cell, + * because there is an invariant that file segments returned to users + * in FileSegmentsHolder represent a contiguous range, so we can resize + * it only when nobody needs it. + */ + download_state = State::PARTIALLY_DOWNLOADED_NO_CONTINUATION; + + /// Resize this file segment by creating a copy file segment with DOWNLOADED state, + /// but current file segment should remain PARRTIALLY_DOWNLOADED_NO_CONTINUATION and with detached state, + /// because otherwise an invariant that getOrSet() returns a contiguous range of file segments will be broken + /// (this will be crucial for other file segment holder, not for current one). + cache->reduceSizeToDownloaded(key(), offset(), cache_lock, segment_lock); + } + + markAsDetached(segment_lock); + } + break; + } } LOG_TEST(log, "Completed file segment: {}", getInfoForLogImpl(segment_lock)); @@ -613,6 +575,7 @@ String FileSegment::getInfoForLogImpl(std::lock_guard & segment_lock { WriteBufferFromOwnString info; info << "File segment: " << range().toString() << ", "; + info << "key: " << key().toString() << ", "; info << "state: " << download_state << ", "; info << "downloaded size: " << getDownloadedSize(segment_lock) << ", "; info << "reserved size: " << reserved_size << ", "; @@ -699,6 +662,8 @@ void FileSegment::assertDetachedStatus(std::lock_guard & segment_loc FileSegmentPtr FileSegment::getSnapshot(const FileSegmentPtr & file_segment, std::lock_guard & /* cache_lock */) { + std::lock_guard segment_lock(file_segment->mutex); + auto snapshot = std::make_shared( file_segment->offset(), file_segment->range().size(), @@ -708,8 +673,8 @@ FileSegmentPtr FileSegment::getSnapshot(const FileSegmentPtr & file_segment, std snapshot->hits_count = file_segment->getHitsCount(); snapshot->ref_count = file_segment.use_count(); - snapshot->downloaded_size = file_segment->getDownloadedSize(); - snapshot->download_state = file_segment->state(); + snapshot->downloaded_size = file_segment->getDownloadedSize(segment_lock); + snapshot->download_state = file_segment->download_state; snapshot->is_persistent = file_segment->isPersistent(); return snapshot; @@ -738,7 +703,7 @@ void FileSegment::detach( download_state = State::PARTIALLY_DOWNLOADED_NO_CONTINUATION; downloader_id.clear(); - LOG_TEST(log, "Detached file segment: {}", getInfoForLogImpl(segment_lock)); + LOG_DEBUG(log, "Detached file segment: {}", getInfoForLogImpl(segment_lock)); } void FileSegment::markAsDetached(std::lock_guard & /* segment_lock */) @@ -793,7 +758,7 @@ FileSegmentsHolder::~FileSegmentsHolder() /// under the same mutex, because complete() checks for segment pointers. std::lock_guard cache_lock(cache->mutex); - file_segment->completeBasedOnCurrentState(cache_lock); + file_segment->completeWithoutState(cache_lock); file_segment_it = file_segments.erase(current_file_segment_it); } @@ -855,17 +820,34 @@ void FileSegmentRangeWriter::completeFileSegment(FileSegment & file_segment) if (file_segment.getDownloadedSize() > 0) { - /// file_segment->complete(DOWNLOADED) is not enough, because file segment capacity - /// was initially set with a margin as `max_file_segment_size`. => We need to always - /// resize to actual size after download finished. file_segment.getOrSetDownloader(); - file_segment.completeWithState(FileSegment::State::DOWNLOADED, /* auto_resize */true); + + { + /// file_segment->complete(DOWNLOADED) is not enough, because file segment capacity + /// was initially set with a margin as `max_file_segment_size`. => We need to always + /// resize to actual size after download finished. + + /// Current file segment is downloaded as a part of write-through cache + /// and therefore cannot be concurrently accessed. Nevertheless, it can be + /// accessed by cache system tables if someone read from them, + /// therefore we need a mutex. + std::lock_guard segment_lock(file_segment.mutex); + + assert(file_segment.downloaded_size <= file_segment.range().size()); + file_segment.segment_range = FileSegment::Range( + file_segment.segment_range.left, + file_segment.segment_range.left + file_segment.downloaded_size - 1); + file_segment.reserved_size = file_segment.downloaded_size; + } + + file_segment.completeWithState(FileSegment::State::DOWNLOADED); + on_complete_file_segment_func(file_segment); } else { std::lock_guard cache_lock(cache->mutex); - file_segment.completeBasedOnCurrentState(cache_lock); + file_segment.completeWithoutState(cache_lock); } } diff --git a/src/Common/FileSegment.h b/src/Common/FileSegment.h index 4168cf8bd03..2b25cfd172e 100644 --- a/src/Common/FileSegment.h +++ b/src/Common/FileSegment.h @@ -142,7 +142,7 @@ public: void completeBatchAndResetDownloader(); - void completeWithState(State state, bool auto_resize = false); + void completeWithState(State state); String getInfoForLog() const; @@ -195,12 +195,8 @@ private: /// FileSegmentsHolder. complete() might check if the caller of the method /// is the last alive holder of the segment. Therefore, complete() and destruction /// of the file segment pointer must be done under the same cache mutex. - void completeBasedOnCurrentState(std::lock_guard & cache_lock); - void completeBasedOnCurrentStateUnlocked(std::lock_guard & cache_lock, std::lock_guard & segment_lock); - - void completeImpl( - std::lock_guard & cache_lock, - std::lock_guard & segment_lock); + void completeBasedOnCurrentState(std::lock_guard & cache_lock, std::lock_guard & segment_lock); + void completeWithoutState(std::lock_guard & cache_lock); void resetDownloaderImpl(std::lock_guard & segment_lock); diff --git a/src/Common/LRUFileCachePriority.cpp b/src/Common/LRUFileCachePriority.cpp index 91addc92501..c4d6313e4d6 100644 --- a/src/Common/LRUFileCachePriority.cpp +++ b/src/Common/LRUFileCachePriority.cpp @@ -24,9 +24,7 @@ IFileCachePriority::WriteIterator LRUFileCachePriority::add(const Key & key, siz throw Exception( ErrorCodes::LOGICAL_ERROR, "Attempt to add duplicate queue entry to queue. (Key: {}, offset: {}, size: {})", - entry.key.toString(), - entry.offset, - entry.size); + entry.key.toString(), entry.offset, entry.size); } #endif @@ -36,6 +34,8 @@ IFileCachePriority::WriteIterator LRUFileCachePriority::add(const Key & key, siz CurrentMetrics::add(CurrentMetrics::FilesystemCacheSize, size); CurrentMetrics::add(CurrentMetrics::FilesystemCacheElements); + LOG_DEBUG(log, "Added entry into LRU queue, key: {}, offset: {}", key.toString(), offset); + return std::make_shared(this, iter); } @@ -54,6 +54,8 @@ void LRUFileCachePriority::removeAll(std::lock_guard &) CurrentMetrics::sub(CurrentMetrics::FilesystemCacheSize, cache_size); CurrentMetrics::sub(CurrentMetrics::FilesystemCacheElements, queue.size()); + LOG_DEBUG(log, "Removed all entries from LRU queue"); + queue.clear(); cache_size = 0; } @@ -86,6 +88,8 @@ void LRUFileCachePriority::LRUFileCacheIterator::removeAndGetNext(std::lock_guar CurrentMetrics::sub(CurrentMetrics::FilesystemCacheSize, queue_iter->size); CurrentMetrics::sub(CurrentMetrics::FilesystemCacheElements); + LOG_DEBUG(cache_priority->log, "Removed entry from LRU queue, key: {}, offset: {}", queue_iter->key.toString(), queue_iter->offset); + queue_iter = cache_priority->queue.erase(queue_iter); } diff --git a/src/Common/LRUFileCachePriority.h b/src/Common/LRUFileCachePriority.h index 7ea35e9a5eb..2cdcc981e04 100644 --- a/src/Common/LRUFileCachePriority.h +++ b/src/Common/LRUFileCachePriority.h @@ -2,6 +2,7 @@ #include #include +#include namespace DB { @@ -32,6 +33,7 @@ public: private: LRUQueue queue; + Poco::Logger * log = &Poco::Logger::get("LRUFileCachePriority"); }; class LRUFileCachePriority::LRUFileCacheIterator : public IFileCachePriority::IIterator diff --git a/src/Common/PipeFDs.cpp b/src/Common/PipeFDs.cpp index bea7546a8cd..4a5bcacd0de 100644 --- a/src/Common/PipeFDs.cpp +++ b/src/Common/PipeFDs.cpp @@ -106,7 +106,7 @@ void LazyPipeFDs::tryIncreaseSize(int desired_size) { if (errno == EINVAL) { - LOG_INFO(log, "Cannot get pipe capacity, {}. Very old Linux kernels have no support for this fcntl.", errnoToString(ErrorCodes::CANNOT_FCNTL)); + LOG_INFO(log, "Cannot get pipe capacity, {}. Very old Linux kernels have no support for this fcntl.", errnoToString()); /// It will work nevertheless. } else diff --git a/src/Common/ProfileEvents.cpp b/src/Common/ProfileEvents.cpp index e5275be43c1..297b3bdb59d 100644 --- a/src/Common/ProfileEvents.cpp +++ b/src/Common/ProfileEvents.cpp @@ -31,30 +31,30 @@ M(ReadCompressedBytes, "Number of bytes (the number of bytes before decompression) read from compressed sources (files, network).") \ M(CompressedReadBufferBlocks, "Number of compressed blocks (the blocks of data that are compressed independent of each other) read from compressed sources (files, network).") \ M(CompressedReadBufferBytes, "Number of uncompressed bytes (the number of bytes after decompression) read from compressed sources (files, network).") \ - M(UncompressedCacheHits, "") \ - M(UncompressedCacheMisses, "") \ - M(UncompressedCacheWeightLost, "") \ - M(MMappedFileCacheHits, "") \ - M(MMappedFileCacheMisses, "") \ - M(OpenedFileCacheHits, "") \ - M(OpenedFileCacheMisses, "") \ + M(UncompressedCacheHits, "Number of times a block of data has been found in the uncompressed cache (and decompression was avoided).") \ + M(UncompressedCacheMisses, "Number of times a block of data has not been found in the uncompressed cache (and required decompression).") \ + M(UncompressedCacheWeightLost, "Number of bytes evicted from the uncompressed cache.") \ + M(MMappedFileCacheHits, "Number of times a file has been found in the MMap cache (for the 'mmap' read_method), so we didn't have to mmap it again.") \ + M(MMappedFileCacheMisses, "Number of times a file has not been found in the MMap cache (for the 'mmap' read_method), so we had to mmap it again.") \ + M(OpenedFileCacheHits, "Number of times a file has been found in the opened file cache, so we didn't have to open it again.") \ + M(OpenedFileCacheMisses, "Number of times a file has been found in the opened file cache, so we had to open it again.") \ M(AIOWrite, "Number of writes with Linux or FreeBSD AIO interface") \ M(AIOWriteBytes, "Number of bytes written with Linux or FreeBSD AIO interface") \ M(AIORead, "Number of reads with Linux or FreeBSD AIO interface") \ M(AIOReadBytes, "Number of bytes read with Linux or FreeBSD AIO interface") \ - M(IOBufferAllocs, "") \ - M(IOBufferAllocBytes, "") \ - M(ArenaAllocChunks, "") \ - M(ArenaAllocBytes, "") \ - M(FunctionExecute, "") \ - M(TableFunctionExecute, "") \ - M(MarkCacheHits, "") \ - M(MarkCacheMisses, "") \ - M(CreatedReadBufferOrdinary, "") \ - M(CreatedReadBufferDirectIO, "") \ - M(CreatedReadBufferDirectIOFailed, "") \ - M(CreatedReadBufferMMap, "") \ - M(CreatedReadBufferMMapFailed, "") \ + M(IOBufferAllocs, "Number of allocations of IO buffers (for ReadBuffer/WriteBuffer).") \ + M(IOBufferAllocBytes, "Number of bytes allocated for IO buffers (for ReadBuffer/WriteBuffer).") \ + M(ArenaAllocChunks, "Number of chunks allocated for memory Arena (used for GROUP BY and similar operations)") \ + M(ArenaAllocBytes, "Number of bytes allocated for memory Arena (used for GROUP BY and similar operations)") \ + M(FunctionExecute, "Number of SQL ordinary function calls (SQL functions are called on per-block basis, so this number represents the number of blocks).") \ + M(TableFunctionExecute, "Number of table function calls.") \ + M(MarkCacheHits, "Number of times an entry has been found in the mark cache, so we didn't have to load a mark file.") \ + M(MarkCacheMisses, "Number of times an entry has not been found in the mark cache, so we had to load a mark file in memory, which is a costly operation, adding to query latency.") \ + M(CreatedReadBufferOrdinary, "Number of times ordinary read buffer was created for reading data (while choosing among other read methods).") \ + M(CreatedReadBufferDirectIO, "Number of times a read buffer with O_DIRECT was created for reading data (while choosing among other read methods).") \ + M(CreatedReadBufferDirectIOFailed, "Number of times a read buffer with O_DIRECT was attempted to be created for reading data (while choosing among other read methods), but the OS did not allow it (due to lack of filesystem support or other reasons) and we fallen back to the ordinary reading method.") \ + M(CreatedReadBufferMMap, "Number of times a read buffer using 'mmap' was created for reading data (while choosing among other read methods).") \ + M(CreatedReadBufferMMapFailed, "Number of times a read buffer with 'mmap' was attempted to be created for reading data (while choosing among other read methods), but the OS did not allow it (due to lack of filesystem support or other reasons) and we fallen back to the ordinary reading method.") \ M(DiskReadElapsedMicroseconds, "Total time spent waiting for read syscall. This include reads from page cache.") \ M(DiskWriteElapsedMicroseconds, "Total time spent waiting for write syscall. This include writes to page cache.") \ M(NetworkReceiveElapsedMicroseconds, "Total time spent waiting for data to receive or receiving data from network. Only ClickHouse-related network interaction is included, not by 3rd party libraries.") \ @@ -67,12 +67,12 @@ \ M(ReplicatedPartFetches, "Number of times a data part was downloaded from replica of a ReplicatedMergeTree table.") \ M(ReplicatedPartFailedFetches, "Number of times a data part was failed to download from replica of a ReplicatedMergeTree table.") \ - M(ObsoleteReplicatedParts, "") \ + M(ObsoleteReplicatedParts, "Number of times a data part was covered by another data part that has been fetched from a replica (so, we have marked a covered data part as obsolete and no longer needed).") \ M(ReplicatedPartMerges, "Number of times data parts of ReplicatedMergeTree tables were successfully merged.") \ M(ReplicatedPartFetchesOfMerged, "Number of times we prefer to download already merged part from replica of ReplicatedMergeTree table instead of performing a merge ourself (usually we prefer doing a merge ourself to save network traffic). This happens when we have not all source parts to perform a merge or when the data part is old enough.") \ - M(ReplicatedPartMutations, "") \ - M(ReplicatedPartChecks, "") \ - M(ReplicatedPartChecksFailed, "") \ + M(ReplicatedPartMutations, "Number of times data parts of ReplicatedMergeTree tables were successfully mutated.") \ + M(ReplicatedPartChecks, "Number of times we had to perform advanced search for a data part on replicas or to clarify the need of an existing data part.") \ + M(ReplicatedPartChecksFailed, "Number of times the advanced search for a data part on replicas did not give result or when unexpected part has been found and moved away.") \ M(ReplicatedDataLoss, "Number of times a data part that we wanted doesn't exist on any replica (even on replicas that are offline right now). That data parts are definitely lost. This is normal due to asynchronous replication (if quorum inserts were not enabled), when the replica on which the data part was written was failed and when it became online after fail it doesn't contain that data part.") \ \ M(InsertedRows, "Number of rows INSERTed to all tables.") \ @@ -85,30 +85,30 @@ M(DistributedDelayedInsertsMilliseconds, "Total number of milliseconds spent while the INSERT of a block to a Distributed table was throttled due to high number of pending bytes.") \ M(DuplicatedInsertedBlocks, "Number of times the INSERTed block to a ReplicatedMergeTree table was deduplicated.") \ \ - M(ZooKeeperInit, "") \ - M(ZooKeeperTransactions, "") \ - M(ZooKeeperList, "") \ - M(ZooKeeperCreate, "") \ - M(ZooKeeperRemove, "") \ - M(ZooKeeperExists, "") \ - M(ZooKeeperGet, "") \ - M(ZooKeeperSet, "") \ - M(ZooKeeperMulti, "") \ - M(ZooKeeperCheck, "") \ - M(ZooKeeperSync, "") \ - M(ZooKeeperClose, "") \ - M(ZooKeeperWatchResponse, "") \ - M(ZooKeeperUserExceptions, "") \ - M(ZooKeeperHardwareExceptions, "") \ - M(ZooKeeperOtherExceptions, "") \ - M(ZooKeeperWaitMicroseconds, "") \ - M(ZooKeeperBytesSent, "") \ - M(ZooKeeperBytesReceived, "") \ + M(ZooKeeperInit, "Number of times connection with ZooKeeper has been established.") \ + M(ZooKeeperTransactions, "Number of ZooKeeper operations, which include both read and write operations as well as multi-transactions.") \ + M(ZooKeeperList, "Number of 'list' (getChildren) requests to ZooKeeper.") \ + M(ZooKeeperCreate, "Number of 'create' requests to ZooKeeper.") \ + M(ZooKeeperRemove, "Number of 'remove' requests to ZooKeeper.") \ + M(ZooKeeperExists, "Number of 'exists' requests to ZooKeeper.") \ + M(ZooKeeperGet, "Number of 'get' requests to ZooKeeper.") \ + M(ZooKeeperSet, "Number of 'set' requests to ZooKeeper.") \ + M(ZooKeeperMulti, "Number of 'multi' requests to ZooKeeper (compound transactions).") \ + M(ZooKeeperCheck, "Number of 'check' requests to ZooKeeper. Usually they don't make sense in isolation, only as part of a complex transaction.") \ + M(ZooKeeperSync, "Number of 'sync' requests to ZooKeeper. These requests are rarely needed or usable.") \ + M(ZooKeeperClose, "Number of times connection with ZooKeeper has been closed voluntary.") \ + M(ZooKeeperWatchResponse, "Number of times watch notification has been received from ZooKeeper.") \ + M(ZooKeeperUserExceptions, "Number of exceptions while working with ZooKeeper related to the data (no node, bad version or similar).") \ + M(ZooKeeperHardwareExceptions, "Number of exceptions while working with ZooKeeper related to network (connection loss or similar).") \ + M(ZooKeeperOtherExceptions, "Number of exceptions while working with ZooKeeper other than ZooKeeperUserExceptions and ZooKeeperHardwareExceptions.") \ + M(ZooKeeperWaitMicroseconds, "Number of microseconds spent waiting for responses from ZooKeeper after creating a request, summed across all the requesting threads.") \ + M(ZooKeeperBytesSent, "Number of bytes send over network while communicating with ZooKeeper.") \ + M(ZooKeeperBytesReceived, "Number of bytes received over network while communicating with ZooKeeper.") \ \ - M(DistributedConnectionFailTry, "Total count when distributed connection fails with retry") \ - M(DistributedConnectionMissingTable, "") \ - M(DistributedConnectionStaleReplica, "") \ - M(DistributedConnectionFailAtAll, "Total count when distributed connection fails after all retries finished") \ + M(DistributedConnectionFailTry, "Total count when distributed connection fails with retry.") \ + M(DistributedConnectionMissingTable, "Number of times we rejected a replica from a distributed query, because it did not contain a table needed for the query.") \ + M(DistributedConnectionStaleReplica, "Number of times we rejected a replica from a distributed query, because some table needed for a query had replication lag higher than the configured threshold.") \ + M(DistributedConnectionFailAtAll, "Total count when distributed connection fails after all retries finished.") \ \ M(HedgedRequestsChangeReplica, "Total count when timeout for changing replica expired in hedged requests.") \ \ @@ -119,12 +119,21 @@ \ M(ExecuteShellCommand, "Number of shell command executions.") \ \ - M(ExternalSortWritePart, "") \ - M(ExternalSortMerge, "") \ - M(ExternalAggregationWritePart, "") \ - M(ExternalAggregationMerge, "") \ - M(ExternalAggregationCompressedBytes, "") \ - M(ExternalAggregationUncompressedBytes, "") \ + M(ExternalProcessingCompressedBytesTotal, "Number of compressed bytes written by external processing (sorting/aggragating/joining)") \ + M(ExternalProcessingUncompressedBytesTotal, "Amount of data (uncompressed, before compression) written by external processing (sorting/aggragating/joining)") \ + M(ExternalProcessingFilesTotal, "Number of files used by external processing (sorting/aggragating/joining)") \ + M(ExternalSortWritePart, "Number of times a temporary file was written to disk for sorting in external memory.") \ + M(ExternalSortMerge, "Number of times temporary files were merged for sorting in external memory.") \ + M(ExternalSortCompressedBytes, "Number of compressed bytes written for sorting in external memory.") \ + M(ExternalSortUncompressedBytes, "Amount of data (uncompressed, before compression) written for sorting in external memory.") \ + M(ExternalAggregationWritePart, "Number of times a temporary file was written to disk for aggregation in external memory.") \ + M(ExternalAggregationMerge, "Number of times temporary files were merged for aggregation in external memory.") \ + M(ExternalAggregationCompressedBytes, "Number of bytes written to disk for aggregation in external memory.") \ + M(ExternalAggregationUncompressedBytes, "Amount of data (uncompressed, before compression) written to disk for aggregation in external memory.") \ + M(ExternalJoinWritePart, "Number of times a temporary file was written to disk for JOIN in external memory.") \ + M(ExternalJoinMerge, "Number of times temporary files were merged for JOIN in external memory.") \ + M(ExternalJoinCompressedBytes, "Number of compressed bytes written for JOIN in external memory.") \ + M(ExternalJoinUncompressedBytes, "Amount of data (uncompressed, before compression) written for JOIN in external memory.") \ \ M(SlowRead, "Number of reads from a file that were slow. This indicate system overload. Thresholds are controlled by read_backoff_* settings.") \ M(ReadBackoff, "Number of times the number of query processing threads was lowered due to slow reads.") \ @@ -166,46 +175,59 @@ M(RegexpCreated, "Compiled regular expressions. Identical regular expressions compiled just once and cached forever.") \ M(ContextLock, "Number of times the lock of Context was acquired or tried to acquire. This is global lock.") \ \ - M(StorageBufferFlush, "") \ - M(StorageBufferErrorOnFlush, "") \ - M(StorageBufferPassedAllMinThresholds, "") \ - M(StorageBufferPassedTimeMaxThreshold, "") \ - M(StorageBufferPassedRowsMaxThreshold, "") \ - M(StorageBufferPassedBytesMaxThreshold, "") \ - M(StorageBufferPassedTimeFlushThreshold, "") \ - M(StorageBufferPassedRowsFlushThreshold, "") \ - M(StorageBufferPassedBytesFlushThreshold, "") \ - M(StorageBufferLayerLockReadersWaitMilliseconds, "Time for waiting for Buffer layer during reading") \ - M(StorageBufferLayerLockWritersWaitMilliseconds, "Time for waiting free Buffer layer to write to (can be used to tune Buffer layers)") \ + M(StorageBufferFlush, "Number of times a buffer in a 'Buffer' table was flushed.") \ + M(StorageBufferErrorOnFlush, "Number of times a buffer in the 'Buffer' table has not been able to flush due to error writing in the destination table.") \ + M(StorageBufferPassedAllMinThresholds, "Number of times a criteria on min thresholds has been reached to flush a buffer in a 'Buffer' table.") \ + M(StorageBufferPassedTimeMaxThreshold, "Number of times a criteria on max time threshold has been reached to flush a buffer in a 'Buffer' table.") \ + M(StorageBufferPassedRowsMaxThreshold, "Number of times a criteria on max rows threshold has been reached to flush a buffer in a 'Buffer' table.") \ + M(StorageBufferPassedBytesMaxThreshold, "Number of times a criteria on max bytes threshold has been reached to flush a buffer in a 'Buffer' table.") \ + M(StorageBufferPassedTimeFlushThreshold, "Number of times background-only flush threshold on time has been reached to flush a buffer in a 'Buffer' table. This is expert-only metric. If you read this and you are not an expert, stop reading.") \ + M(StorageBufferPassedRowsFlushThreshold, "Number of times background-only flush threshold on rows has been reached to flush a buffer in a 'Buffer' table. This is expert-only metric. If you read this and you are not an expert, stop reading.") \ + M(StorageBufferPassedBytesFlushThreshold, "Number of times background-only flush threshold on bytes has been reached to flush a buffer in a 'Buffer' table. This is expert-only metric. If you read this and you are not an expert, stop reading.") \ + M(StorageBufferLayerLockReadersWaitMilliseconds, "Time for waiting for Buffer layer during reading.") \ + M(StorageBufferLayerLockWritersWaitMilliseconds, "Time for waiting free Buffer layer to write to (can be used to tune Buffer layers).") \ \ - M(DictCacheKeysRequested, "") \ - M(DictCacheKeysRequestedMiss, "") \ - M(DictCacheKeysRequestedFound, "") \ - M(DictCacheKeysExpired, "") \ - M(DictCacheKeysNotFound, "") \ - M(DictCacheKeysHit, "") \ - M(DictCacheRequestTimeNs, "") \ - M(DictCacheRequests, "") \ - M(DictCacheLockWriteNs, "") \ - M(DictCacheLockReadNs, "") \ + M(DictCacheKeysRequested, "Number of keys requested from the data source for the dictionaries of 'cache' types.") \ + M(DictCacheKeysRequestedMiss, "Number of keys requested from the data source for dictionaries of 'cache' types but not found in the data source.") \ + M(DictCacheKeysRequestedFound, "Number of keys requested from the data source for dictionaries of 'cache' types and found in the data source.") \ + M(DictCacheKeysExpired, "Number of keys looked up in the dictionaries of 'cache' types and found in the cache but they were obsolete.") \ + M(DictCacheKeysNotFound, "Number of keys looked up in the dictionaries of 'cache' types and not found.") \ + M(DictCacheKeysHit, "Number of keys looked up in the dictionaries of 'cache' types and found in the cache.") \ + M(DictCacheRequestTimeNs, "Number of nanoseconds spend in querying the external data sources for the dictionaries of 'cache' types.") \ + M(DictCacheRequests, "Number of bulk requests to the external data sources for the dictionaries of 'cache' types.") \ + M(DictCacheLockWriteNs, "Number of nanoseconds spend in waiting for write lock to update the data for the dictionaries of 'cache' types.") \ + M(DictCacheLockReadNs, "Number of nanoseconds spend in waiting for read lock to lookup the data for the dictionaries of 'cache' types.") \ \ - M(DistributedSyncInsertionTimeoutExceeded, "") \ - M(DataAfterMergeDiffersFromReplica, "") \ - M(DataAfterMutationDiffersFromReplica, "") \ - M(PolygonsAddedToPool, "") \ - M(PolygonsInPoolAllocatedBytes, "") \ - M(RWLockAcquiredReadLocks, "") \ - M(RWLockAcquiredWriteLocks, "") \ - M(RWLockReadersWaitMilliseconds, "") \ - M(RWLockWritersWaitMilliseconds, "") \ + M(DistributedSyncInsertionTimeoutExceeded, "A timeout has exceeded while waiting for shards during synchronous insertion into a Distributed table (with 'insert_distributed_sync' = 1)") \ + M(DataAfterMergeDiffersFromReplica, R"( +Number of times data after merge is not byte-identical to the data on another replicas. There could be several reasons: +1. Using newer version of compression library after server update. +2. Using another compression method. +3. Non-deterministic compression algorithm (highly unlikely). +4. Non-deterministic merge algorithm due to logical error in code. +5. Data corruption in memory due to bug in code. +6. Data corruption in memory due to hardware issue. +7. Manual modification of source data after server startup. +8. Manual modification of checksums stored in ZooKeeper. +9. Part format related settings like 'enable_mixed_granularity_parts' are different on different replicas. +The server successfully detected this situation and will download merged part from replica to force byte-identical result. +)") \ + M(DataAfterMutationDiffersFromReplica, "Number of times data after mutation is not byte-identical to the data on another replicas. In addition to the reasons described in 'DataAfterMergeDiffersFromReplica', it is also possible due to non-deterministic mutation.") \ + M(PolygonsAddedToPool, "A polygon has been added to the cache (pool) for the 'pointInPolygon' function.") \ + M(PolygonsInPoolAllocatedBytes, "The number of bytes for polygons added to the cache (pool) for the 'pointInPolygon' function.") \ + \ + M(RWLockAcquiredReadLocks, "Number of times a read lock was acquired (in a heavy RWLock).") \ + M(RWLockAcquiredWriteLocks, "Number of times a write lock was acquired (in a heavy RWLock).") \ + M(RWLockReadersWaitMilliseconds, "Total time spent waiting for a read lock to be acquired (in a heavy RWLock).") \ + M(RWLockWritersWaitMilliseconds, "Total time spent waiting for a write lock to be acquired (in a heavy RWLock).") \ M(DNSError, "Total count of errors in DNS resolution") \ \ M(RealTimeMicroseconds, "Total (wall clock) time spent in processing (queries and other tasks) threads (not that this is a sum).") \ M(UserTimeMicroseconds, "Total time spent in processing (queries and other tasks) threads executing CPU instructions in user space. This include time CPU pipeline was stalled due to cache misses, branch mispredictions, hyper-threading, etc.") \ M(SystemTimeMicroseconds, "Total time spent in processing (queries and other tasks) threads executing CPU instructions in OS kernel space. This include time CPU pipeline was stalled due to cache misses, branch mispredictions, hyper-threading, etc.") \ M(MemoryOvercommitWaitTimeMicroseconds, "Total time spent in waiting for memory to be freed in OvercommitTracker.") \ - M(SoftPageFaults, "") \ - M(HardPageFaults, "") \ + M(SoftPageFaults, "The number of soft page faults in query execution threads. Soft page fault usually means a miss in the memory allocator cache which required a new memory mapping from the OS and subsequent allocation of a page of physical memory.") \ + M(HardPageFaults, "The number of hard page faults in query execution threads. High values indicate either that you forgot to turn off swap on your server, or eviction of memory pages of the ClickHouse binary during very high memory pressure, or successful usage of the 'mmap' read method for the tables data.") \ \ M(OSIOWaitMicroseconds, "Total time a thread spent waiting for a result of IO operation, from the OS point of view. This is real IO that doesn't include page cache.") \ M(OSCPUWaitMicroseconds, "Total time a thread was ready for execution but waiting to be scheduled by OS, from the OS point of view.") \ diff --git a/src/Common/QueryProfiler.cpp b/src/Common/QueryProfiler.cpp index 741ae5b5747..7266b9b9553 100644 --- a/src/Common/QueryProfiler.cpp +++ b/src/Common/QueryProfiler.cpp @@ -81,7 +81,6 @@ namespace ErrorCodes extern const int CANNOT_SET_SIGNAL_HANDLER; extern const int CANNOT_CREATE_TIMER; extern const int CANNOT_SET_TIMER_PERIOD; - extern const int CANNOT_DELETE_TIMER; extern const int NOT_IMPLEMENTED; } @@ -188,7 +187,7 @@ void QueryProfilerBase::tryCleanup() if (timer_id.has_value()) { if (timer_delete(*timer_id)) - LOG_ERROR(log, "Failed to delete query profiler timer {}", errnoToString(ErrorCodes::CANNOT_DELETE_TIMER)); + LOG_ERROR(log, "Failed to delete query profiler timer {}", errnoToString()); timer_id.reset(); } diff --git a/src/Common/SharedLibrary.cpp b/src/Common/SharedLibrary.cpp index 89c3467fcc8..6104c96676a 100644 --- a/src/Common/SharedLibrary.cpp +++ b/src/Common/SharedLibrary.cpp @@ -17,7 +17,7 @@ SharedLibrary::SharedLibrary(std::string_view path, int flags) { handle = dlopen(path.data(), flags); if (!handle) - throw Exception(ErrorCodes::CANNOT_DLOPEN, "Cannot dlopen: ({})", dlerror()); + throw Exception(ErrorCodes::CANNOT_DLOPEN, "Cannot dlopen: ({})", dlerror()); // NOLINT(concurrency-mt-unsafe) // MT-Safe on Linux, see man dlerror updatePHDRCache(); @@ -33,11 +33,11 @@ SharedLibrary::~SharedLibrary() void * SharedLibrary::getImpl(std::string_view name, bool no_throw) { - dlerror(); + dlerror(); // NOLINT(concurrency-mt-unsafe) // MT-Safe on Linux, see man dlerror auto * res = dlsym(handle, name.data()); - if (char * error = dlerror()) + if (char * error = dlerror()) // NOLINT(concurrency-mt-unsafe) // MT-Safe on Linux, see man dlerror { if (no_throw) return nullptr; diff --git a/src/Common/ShellCommand.cpp b/src/Common/ShellCommand.cpp index 0050288b1cf..17ee1b880b3 100644 --- a/src/Common/ShellCommand.cpp +++ b/src/Common/ShellCommand.cpp @@ -76,7 +76,7 @@ ShellCommand::~ShellCommand() int retcode = kill(pid, SIGTERM); if (retcode != 0) - LOG_WARNING(getLogger(), "Cannot kill shell command pid {} errno '{}'", pid, errnoToString(retcode)); + LOG_WARNING(getLogger(), "Cannot kill shell command pid {} errno '{}'", pid, errnoToString()); } else { @@ -201,8 +201,8 @@ std::unique_ptr ShellCommand::executeImpl( // by the child process, which might not expect this. sigset_t mask; sigemptyset(&mask); - sigprocmask(0, nullptr, &mask); - sigprocmask(SIG_UNBLOCK, &mask, nullptr); + sigprocmask(0, nullptr, &mask); // NOLINT(concurrency-mt-unsafe) + sigprocmask(SIG_UNBLOCK, &mask, nullptr); // NOLINT(concurrency-mt-unsafe) execv(filename, argv); /// If the process is running, then `execv` does not return here. diff --git a/src/Common/StatusFile.cpp b/src/Common/StatusFile.cpp index cdeeeaccf03..ab8ec23e79b 100644 --- a/src/Common/StatusFile.cpp +++ b/src/Common/StatusFile.cpp @@ -23,7 +23,6 @@ namespace DB namespace ErrorCodes { extern const int CANNOT_OPEN_FILE; - extern const int CANNOT_CLOSE_FILE; extern const int CANNOT_TRUNCATE_FILE; extern const int CANNOT_SEEK_THROUGH_FILE; } @@ -98,10 +97,10 @@ StatusFile::StatusFile(std::string path_, FillFunction fill_) StatusFile::~StatusFile() { if (0 != close(fd)) - LOG_ERROR(&Poco::Logger::get("StatusFile"), "Cannot close file {}, {}", path, errnoToString(ErrorCodes::CANNOT_CLOSE_FILE)); + LOG_ERROR(&Poco::Logger::get("StatusFile"), "Cannot close file {}, {}", path, errnoToString()); if (0 != unlink(path.c_str())) - LOG_ERROR(&Poco::Logger::get("StatusFile"), "Cannot unlink file {}, {}", path, errnoToString(ErrorCodes::CANNOT_CLOSE_FILE)); + LOG_ERROR(&Poco::Logger::get("StatusFile"), "Cannot unlink file {}, {}", path, errnoToString()); } } diff --git a/src/Common/ThreadFuzzer.cpp b/src/Common/ThreadFuzzer.cpp index 962cfee074d..cbfbfc5d690 100644 --- a/src/Common/ThreadFuzzer.cpp +++ b/src/Common/ThreadFuzzer.cpp @@ -82,7 +82,7 @@ ThreadFuzzer::ThreadFuzzer() template static void initFromEnv(T & what, const char * name) { - const char * env = getenv(name); + const char * env = getenv(name); // NOLINT(concurrency-mt-unsafe) if (!env) return; what = parse(env); @@ -91,7 +91,7 @@ static void initFromEnv(T & what, const char * name) template static void initFromEnv(std::atomic & what, const char * name) { - const char * env = getenv(name); + const char * env = getenv(name); // NOLINT(concurrency-mt-unsafe) if (!env) return; what.store(parse(env), std::memory_order_relaxed); diff --git a/src/Common/ThreadProfileEvents.cpp b/src/Common/ThreadProfileEvents.cpp index 09517b148f9..dba65a138c3 100644 --- a/src/Common/ThreadProfileEvents.cpp +++ b/src/Common/ThreadProfileEvents.cpp @@ -301,7 +301,7 @@ static void enablePerfEvent(int event_fd) { LOG_WARNING(&Poco::Logger::get("PerfEvents"), "Can't enable perf event with file descriptor {}: '{}' ({})", - event_fd, errnoToString(errno), errno); + event_fd, errnoToString(), errno); } } @@ -311,7 +311,7 @@ static void disablePerfEvent(int event_fd) { LOG_WARNING(&Poco::Logger::get("PerfEvents"), "Can't disable perf event with file descriptor {}: '{}' ({})", - event_fd, errnoToString(errno), errno); + event_fd, errnoToString(), errno); } } @@ -321,7 +321,7 @@ static void releasePerfEvent(int event_fd) { LOG_WARNING(&Poco::Logger::get("PerfEvents"), "Can't close perf event file descriptor {}: {} ({})", - event_fd, errnoToString(errno), errno); + event_fd, errnoToString(), errno); } } @@ -339,7 +339,7 @@ static bool validatePerfEventDescriptor(int & fd) { LOG_WARNING(&Poco::Logger::get("PerfEvents"), "Error while checking availability of event descriptor {}: {} ({})", - fd, errnoToString(errno), errno); + fd, errnoToString(), errno); disablePerfEvent(fd); releasePerfEvent(fd); @@ -446,7 +446,7 @@ bool PerfEventsCounters::processThreadLocalChanges(const std::string & needed_ev LOG_WARNING(&Poco::Logger::get("PerfEvents"), "Failed to open perf event {} (event_type={}, event_config={}): " "'{}' ({})", event_info.settings_name, event_info.event_type, - event_info.event_config, errnoToString(errno), errno); + event_info.event_config, errnoToString(), errno); } } @@ -532,7 +532,7 @@ void PerfEventsCounters::finalizeProfileEvents(ProfileEvents::Counters & profile { LOG_WARNING(&Poco::Logger::get("PerfEvents"), "Can't read event value from file descriptor {}: '{}' ({})", - fd, errnoToString(errno), errno); + fd, errnoToString(), errno); current_values[i] = {}; } } diff --git a/src/Common/ThreadStatus.cpp b/src/Common/ThreadStatus.cpp index 423a44c97d6..0cbc6f4ce0f 100644 --- a/src/Common/ThreadStatus.cpp +++ b/src/Common/ThreadStatus.cpp @@ -120,7 +120,7 @@ ThreadStatus::ThreadStatus() if (0 != sigaltstack(&altstack_description, nullptr)) { - LOG_WARNING(log, "Cannot set alternative signal stack for thread, {}", errnoToString(errno)); + LOG_WARNING(log, "Cannot set alternative signal stack for thread, {}", errnoToString()); } else { @@ -128,7 +128,7 @@ ThreadStatus::ThreadStatus() struct sigaction action{}; if (0 != sigaction(SIGSEGV, nullptr, &action)) { - LOG_WARNING(log, "Cannot obtain previous signal action to set alternative signal stack for thread, {}", errnoToString(errno)); + LOG_WARNING(log, "Cannot obtain previous signal action to set alternative signal stack for thread, {}", errnoToString()); } else if (!(action.sa_flags & SA_ONSTACK)) { @@ -136,7 +136,7 @@ ThreadStatus::ThreadStatus() if (0 != sigaction(SIGSEGV, &action, nullptr)) { - LOG_WARNING(log, "Cannot set action with alternative signal stack for thread, {}", errnoToString(errno)); + LOG_WARNING(log, "Cannot set action with alternative signal stack for thread, {}", errnoToString()); } } } diff --git a/src/Common/ZooKeeper/examples/CMakeLists.txt b/src/Common/ZooKeeper/examples/CMakeLists.txt index 7377cc956a9..e8932fd3088 100644 --- a/src/Common/ZooKeeper/examples/CMakeLists.txt +++ b/src/Common/ZooKeeper/examples/CMakeLists.txt @@ -6,6 +6,3 @@ target_link_libraries(zkutil_test_commands_new_lib PRIVATE clickhouse_common_zoo clickhouse_add_executable(zkutil_test_async zkutil_test_async.cpp) target_link_libraries(zkutil_test_async PRIVATE clickhouse_common_zookeeper_no_log) - -clickhouse_add_executable (zookeeper_impl zookeeper_impl.cpp) -target_link_libraries (zookeeper_impl PRIVATE clickhouse_common_zookeeper_no_log) diff --git a/src/Common/ZooKeeper/examples/zookeeper_impl.cpp b/src/Common/ZooKeeper/examples/zookeeper_impl.cpp deleted file mode 100644 index 7754e239132..00000000000 --- a/src/Common/ZooKeeper/examples/zookeeper_impl.cpp +++ /dev/null @@ -1,27 +0,0 @@ -#include -#include - - -int main() -try -{ - zkutil::ZooKeeperArgs args; - Coordination::ZooKeeper zookeeper({Coordination::ZooKeeper::Node{Poco::Net::SocketAddress{"localhost:2181"}, false}}, args, nullptr); - - zookeeper.create("/test", "hello", false, false, {}, [](const Coordination::CreateResponse & response) - { - if (response.error != Coordination::Error::ZOK) - std::cerr << "Error: " << Coordination::errorMessage(response.error) << "\n"; - else - std::cerr << "Path created: " << response.path_created << "\n"; - }); - - sleep(100); - - return 0; -} -catch (...) -{ - DB::tryLogCurrentException(__PRETTY_FUNCTION__); - return 1; -} diff --git a/src/Common/base58.h b/src/Common/base58.h index 3d4b55a1fba..bc3c3c7aee8 100644 --- a/src/Common/base58.h +++ b/src/Common/base58.h @@ -5,12 +5,22 @@ namespace DB { -inline size_t encodeBase58(const char8_t * src, char8_t * dst) +inline size_t encodeBase58(const char8_t * src, size_t srclen, char8_t * dst) { const char * base58_encoding_alphabet = "123456789ABCDEFGHJKLMNPQRSTUVWXYZabcdefghijkmnopqrstuvwxyz"; + size_t processed = 0; + size_t zeros = 0; + for (;*src == '\0' && processed < srclen-1; ++src) + { + ++processed; + ++zeros; + *dst++ = '1'; + } + size_t idx = 0; - for (; *src; ++src) + + while (processed < srclen-1) { unsigned int carry = static_cast(*src); for (size_t j = 0; j < idx; ++j) @@ -24,6 +34,8 @@ inline size_t encodeBase58(const char8_t * src, char8_t * dst) dst[idx++] = static_cast(carry % 58); carry /= 58; } + ++src; + ++processed; } size_t c_idx = idx >> 1; @@ -37,23 +49,38 @@ inline size_t encodeBase58(const char8_t * src, char8_t * dst) { dst[c_idx] = base58_encoding_alphabet[static_cast(dst[c_idx])]; } + dst[idx] = '\0'; - return idx + 1; + return zeros + idx + 1; } -inline size_t decodeBase58(const char8_t * src, char8_t * dst) +inline size_t decodeBase58(const char8_t * src, size_t srclen, char8_t * dst) { const signed char uint_max = UINT_MAX; const signed char map_digits[128] = {uint_max, uint_max, uint_max, uint_max, uint_max, uint_max, uint_max, uint_max, uint_max, uint_max, uint_max, uint_max, uint_max, uint_max, uint_max, uint_max, uint_max, uint_max, uint_max, uint_max, uint_max, uint_max, uint_max, uint_max, uint_max, uint_max, uint_max, uint_max, uint_max, uint_max, uint_max, uint_max, uint_max, uint_max, uint_max, uint_max, uint_max, uint_max, uint_max, - uint_max, uint_max, uint_max, uint_max, uint_max, uint_max, uint_max, uint_max, uint_max, uint_max, 0, 1, 2, 3, 4, 5, 6, 7, 8, uint_max, uint_max, uint_max, uint_max, uint_max, uint_max, uint_max, 9, 10, 11, 12, 13, 14, 15, 16, uint_max, 17, 18, 19, 20, 21, uint_max, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, - uint_max, uint_max, uint_max, uint_max, uint_max, uint_max, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, uint_max, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, uint_max, uint_max, uint_max, uint_max, uint_max}; + uint_max, uint_max, uint_max, uint_max, uint_max, uint_max, uint_max, uint_max, uint_max, uint_max, 0, 1, 2, + 3, 4, 5, 6, 7, 8, uint_max, uint_max, uint_max, uint_max, uint_max, uint_max, uint_max, + 9, 10, 11, 12, 13, 14, 15, 16, uint_max, 17, 18, 19, 20, + 21, uint_max, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, + uint_max, uint_max, uint_max, uint_max, uint_max, uint_max, 33, 34, 35, 36, 37, 38, 39, + 40, 41, 42, 43, uint_max, 44, 45, 46, 47, 48, 49, 50, 51, + 52, 53, 54, 55, 56, 57, uint_max, uint_max, uint_max, uint_max, uint_max}; + + size_t processed = 0; + size_t zeros = 0; + for (;*src == '1' && processed < srclen-1; ++src) + { + ++processed; + ++zeros; + *dst++ = '\0'; + } size_t idx = 0; - for (; *src; ++src) + while (processed < srclen-1) { unsigned int carry = map_digits[*src]; if (unlikely(carry == UINT_MAX)) @@ -71,6 +98,8 @@ inline size_t decodeBase58(const char8_t * src, char8_t * dst) dst[idx++] = static_cast(carry & 0xff); carry >>= 8; } + ++src; + ++processed; } size_t c_idx = idx >> 1; @@ -81,7 +110,7 @@ inline size_t decodeBase58(const char8_t * src, char8_t * dst) dst[idx - (i + 1)] = s; } dst[idx] = '\0'; - return idx + 1; + return zeros + idx + 1; } } diff --git a/src/Common/examples/arena_with_free_lists.cpp b/src/Common/examples/arena_with_free_lists.cpp index 5997a9ff4b0..947519ad1cd 100644 --- a/src/Common/examples/arena_with_free_lists.cpp +++ b/src/Common/examples/arena_with_free_lists.cpp @@ -13,6 +13,8 @@ #include #include #include +#include +#include #include #include @@ -218,6 +220,7 @@ int main(int argc, char ** argv) } std::cerr << std::fixed << std::setprecision(2); + pcg64 rng(randomSeed()); size_t n = parse(argv[1]); std::vector data; @@ -281,8 +284,8 @@ int main(int argc, char ** argv) size_t bytes = 0; for (size_t i = 0, size = data.size(); i < size; ++i) { - size_t index_from = lrand48() % size; - size_t index_to = lrand48() % size; + size_t index_from = rng() % size; + size_t index_to = rng() % size; arena.free(const_cast(refs[index_to].data), refs[index_to].size); const auto & s = data[index_from]; @@ -318,8 +321,8 @@ int main(int argc, char ** argv) size_t bytes = 0; for (size_t i = 0, size = data.size(); i < size; ++i) { - size_t index_from = lrand48() % size; - size_t index_to = lrand48() % cache_size; + size_t index_from = rng() % size; + size_t index_to = rng() % cache_size; dictionary.setAttributeValue(attr, index_to, data[index_from]); diff --git a/src/Common/examples/int_hashes_perf.cpp b/src/Common/examples/int_hashes_perf.cpp index bea180f1acd..fecfa0adba8 100644 --- a/src/Common/examples/int_hashes_perf.cpp +++ b/src/Common/examples/int_hashes_perf.cpp @@ -8,9 +8,11 @@ #include #include +#include #include #include #include +#include #include @@ -266,9 +268,9 @@ int main(int argc, char ** argv) { Stopwatch watch; - srand48(rdtsc()); + pcg64 rng(randomSeed()); for (size_t i = 0; i < BUF_SIZE; ++i) - data[i] = lrand48(); + data[i] = rng(); watch.stop(); double elapsed = watch.elapsedSeconds(); diff --git a/src/Common/filesystemHelpers.cpp b/src/Common/filesystemHelpers.cpp index 610608cd312..4c60a6ddac0 100644 --- a/src/Common/filesystemHelpers.cpp +++ b/src/Common/filesystemHelpers.cpp @@ -16,9 +16,17 @@ #include #include #include +#include +#include namespace fs = std::filesystem; + +namespace ProfileEvents +{ + extern const Event ExternalProcessingFilesTotal; +} + namespace DB { @@ -34,7 +42,6 @@ namespace ErrorCodes extern const int CANNOT_CREATE_FILE; } - struct statvfs getStatVFS(const String & path) { struct statvfs fs; @@ -47,18 +54,20 @@ struct statvfs getStatVFS(const String & path) return fs; } - -bool enoughSpaceInDirectory(const std::string & path [[maybe_unused]], size_t data_size [[maybe_unused]]) +bool enoughSpaceInDirectory(const std::string & path, size_t data_size) { - auto free_space = fs::space(path).free; + fs::path filepath(path); + /// `path` may point to nonexisting file, then we can't check it directly, move to parent directory + while (filepath.has_parent_path() && !fs::exists(filepath)) + filepath = filepath.parent_path(); + auto free_space = fs::space(filepath).free; return data_size <= free_space; } std::unique_ptr createTemporaryFile(const std::string & path) { + ProfileEvents::increment(ProfileEvents::ExternalProcessingFilesTotal); fs::create_directories(path); - - /// NOTE: std::make_shared cannot use protected constructors return std::make_unique(path); } @@ -79,6 +88,22 @@ String getBlockDeviceId([[maybe_unused]] const String & path) #endif } + +std::optional tryGetBlockDeviceId([[maybe_unused]] const String & path) +{ +#if defined(OS_LINUX) + struct stat sb; + if (lstat(path.c_str(), &sb)) + return {}; + WriteBufferFromOwnString ss; + ss << major(sb.st_dev) << ":" << minor(sb.st_dev); + return ss.str(); +#else + return {}; +#endif + +} + #if !defined(OS_LINUX) [[noreturn]] #endif diff --git a/src/Common/filesystemHelpers.h b/src/Common/filesystemHelpers.h index f96fe269eab..9faaabb42f2 100644 --- a/src/Common/filesystemHelpers.h +++ b/src/Common/filesystemHelpers.h @@ -19,12 +19,15 @@ using TemporaryFile = Poco::TemporaryFile; bool enoughSpaceInDirectory(const std::string & path, size_t data_size); std::unique_ptr createTemporaryFile(const std::string & path); + // Determine what block device is responsible for specified path #if !defined(OS_LINUX) [[noreturn]] #endif String getBlockDeviceId([[maybe_unused]] const String & path); +std::optional tryGetBlockDeviceId([[maybe_unused]] const String & path); + enum class BlockDeviceType { UNKNOWN = 0, // we were unable to determine device type diff --git a/src/Common/getRandomASCIIString.cpp b/src/Common/getRandomASCIIString.cpp index 788c0d05ff5..594b4cd3228 100644 --- a/src/Common/getRandomASCIIString.cpp +++ b/src/Common/getRandomASCIIString.cpp @@ -5,10 +5,11 @@ namespace DB { -String getRandomASCIIString(size_t len, char first, char last) +String getRandomASCIIString(size_t length) { - std::uniform_int_distribution distribution(first, last); - String res(len, ' '); + std::uniform_int_distribution distribution('a', 'z'); + String res; + res.resize(length); for (auto & c : res) c = distribution(thread_local_rng); return res; diff --git a/src/Common/getRandomASCIIString.h b/src/Common/getRandomASCIIString.h index 69684a9bef2..627d2700ce3 100644 --- a/src/Common/getRandomASCIIString.h +++ b/src/Common/getRandomASCIIString.h @@ -1,10 +1,12 @@ #pragma once + #include + namespace DB { -/// Slow random string. Useful for random names and things like this. Not for -/// generating data. -String getRandomASCIIString(size_t len = 32, char first = 'a', char last = 'z'); + +/// Slow random string. Useful for random names and things like this. Not for generating data. +String getRandomASCIIString(size_t length); } diff --git a/src/Coordination/KeeperServer.cpp b/src/Coordination/KeeperServer.cpp index 8850e33f9c4..0c35510b03e 100644 --- a/src/Coordination/KeeperServer.cpp +++ b/src/Coordination/KeeperServer.cpp @@ -23,6 +23,7 @@ #include #include #include +#include namespace DB { @@ -259,7 +260,7 @@ void KeeperServer::forceRecovery() raft_instance->update_params(params); } -void KeeperServer::launchRaftServer(bool enable_ipv6) +void KeeperServer::launchRaftServer(const Poco::Util::AbstractConfiguration & config, bool enable_ipv6) { nuraft::raft_params params; params.heart_beat_interval_ @@ -311,10 +312,26 @@ void KeeperServer::launchRaftServer(bool enable_ipv6) nuraft::ptr logger = nuraft::cs_new("RaftInstance", coordination_settings->raft_logs_level); asio_service = nuraft::cs_new(asio_opts, logger); - asio_listener = asio_service->create_rpc_listener(state_manager->getPort(), logger, enable_ipv6); - if (!asio_listener) - return; + // we use the same config as for the CH replicas because it is for internal communication between Keeper instances + std::vector listen_hosts = DB::getMultipleValuesFromConfig(config, "", "interserver_listen_host"); + + if (listen_hosts.empty()) + { + auto asio_listener = asio_service->create_rpc_listener(state_manager->getPort(), logger, enable_ipv6); + if (!asio_listener) + return; + asio_listeners.emplace_back(std::move(asio_listener)); + } + else + { + for (const auto & listen_host : listen_hosts) + { + auto asio_listener = asio_service->create_rpc_listener(listen_host, state_manager->getPort(), logger); + if (asio_listener) + asio_listeners.emplace_back(std::move(asio_listener)); + } + } nuraft::ptr scheduler = asio_service; nuraft::ptr rpc_cli_factory = asio_service; @@ -324,17 +341,21 @@ void KeeperServer::launchRaftServer(bool enable_ipv6) /// raft_server creates unique_ptr from it nuraft::context * ctx - = new nuraft::context(casted_state_manager, casted_state_machine, asio_listener, logger, rpc_cli_factory, scheduler, params); + = new nuraft::context(casted_state_manager, casted_state_machine, asio_listeners, logger, rpc_cli_factory, scheduler, params); raft_instance = nuraft::cs_new(ctx, init_options); + if (!raft_instance) + throw Exception(ErrorCodes::RAFT_ERROR, "Cannot allocate RAFT instance"); + raft_instance->start_server(init_options.skip_initial_election_timeout_); nuraft::ptr casted_raft_server = raft_instance; - asio_listener->listen(casted_raft_server); - if (!raft_instance) - throw Exception(ErrorCodes::RAFT_ERROR, "Cannot allocate RAFT instance"); + for (const auto & asio_listener : asio_listeners) + { + asio_listener->listen(casted_raft_server); + } } void KeeperServer::startup(const Poco::Util::AbstractConfiguration & config, bool enable_ipv6) @@ -364,7 +385,7 @@ void KeeperServer::startup(const Poco::Util::AbstractConfiguration & config, boo last_local_config = state_manager->parseServersConfiguration(config, true).cluster_config; - launchRaftServer(enable_ipv6); + launchRaftServer(config, enable_ipv6); keeper_context->server_state = KeeperContext::Phase::RUNNING; } @@ -388,10 +409,13 @@ void KeeperServer::shutdownRaftServer() raft_instance.reset(); - if (asio_listener) + for (const auto & asio_listener : asio_listeners) { - asio_listener->stop(); - asio_listener->shutdown(); + if (asio_listener) + { + asio_listener->stop(); + asio_listener->shutdown(); + } } if (asio_service) diff --git a/src/Coordination/KeeperServer.h b/src/Coordination/KeeperServer.h index 1fb3e579214..6873ef2a01e 100644 --- a/src/Coordination/KeeperServer.h +++ b/src/Coordination/KeeperServer.h @@ -30,7 +30,7 @@ private: struct KeeperRaftServer; nuraft::ptr raft_instance; nuraft::ptr asio_service; - nuraft::ptr asio_listener; + std::vector> asio_listeners; // because some actions can be applied // when we are sure that there are no requests currently being // processed (e.g. recovery) we do all write actions @@ -52,7 +52,7 @@ private: /// Almost copy-paste from nuraft::launcher, but with separated server init and start /// Allows to avoid race conditions. - void launchRaftServer(bool enable_ipv6); + void launchRaftServer(const Poco::Util::AbstractConfiguration & config, bool enable_ipv6); void shutdownRaftServer(); diff --git a/src/Coordination/KeeperStateMachine.cpp b/src/Coordination/KeeperStateMachine.cpp index 90f108dadb6..3d6c80b5e41 100644 --- a/src/Coordination/KeeperStateMachine.cpp +++ b/src/Coordination/KeeperStateMachine.cpp @@ -1,4 +1,5 @@ #include +#include #include #include #include @@ -11,6 +12,7 @@ #include #include "Coordination/KeeperStorage.h" + namespace ProfileEvents { extern const Event KeeperCommits; @@ -446,7 +448,7 @@ static int bufferFromFile(Poco::Logger * log, const std::string & path, nuraft:: LOG_INFO(log, "Opening file {} for read_logical_snp_obj", path); if (fd < 0) { - LOG_WARNING(log, "Error opening {}, error: {}, errno: {}", path, std::strerror(errno), errno); + LOG_WARNING(log, "Error opening {}, error: {}, errno: {}", path, errnoToString(), errno); return errno; } auto file_size = ::lseek(fd, 0, SEEK_END); @@ -454,7 +456,7 @@ static int bufferFromFile(Poco::Logger * log, const std::string & path, nuraft:: auto * chunk = reinterpret_cast(::mmap(nullptr, file_size, PROT_READ, MAP_FILE | MAP_SHARED, fd, 0)); if (chunk == MAP_FAILED) { - LOG_WARNING(log, "Error mmapping {}, error: {}, errno: {}", path, std::strerror(errno), errno); + LOG_WARNING(log, "Error mmapping {}, error: {}, errno: {}", path, errnoToString(), errno); ::close(fd); return errno; } diff --git a/src/Coordination/tests/gtest_coordination.cpp b/src/Coordination/tests/gtest_coordination.cpp index 4beb40f6efd..493e76ee5fc 100644 --- a/src/Coordination/tests/gtest_coordination.cpp +++ b/src/Coordination/tests/gtest_coordination.cpp @@ -137,7 +137,7 @@ struct SimpliestRaftServer if (!raft_instance) { std::cerr << "Failed to initialize launcher" << std::endl; - exit(-1); + _exit(1); } std::cout << "init Raft instance " << server_id; diff --git a/src/Core/DecimalFunctions.h b/src/Core/DecimalFunctions.h index 331df9aa637..0f2158fb83b 100644 --- a/src/Core/DecimalFunctions.h +++ b/src/Core/DecimalFunctions.h @@ -90,24 +90,56 @@ struct DataTypeDecimalTrait * Sign of `fractional` is expected to be positive, otherwise result is undefined. * If `scale` is to big (scale > max_precision), result is undefined. */ -template -inline DecimalType decimalFromComponentsWithMultiplier( - const typename DecimalType::NativeType & whole, - const typename DecimalType::NativeType & fractional, - typename DecimalType::NativeType scale_multiplier) + +template +inline bool decimalFromComponentsWithMultiplierImpl( + const typename DecimalType::NativeType & whole, + const typename DecimalType::NativeType & fractional, + typename DecimalType::NativeType scale_multiplier, + DecimalType & result) { using T = typename DecimalType::NativeType; const auto fractional_sign = whole < 0 ? -1 : 1; T whole_scaled = 0; if (common::mulOverflow(whole, scale_multiplier, whole_scaled)) - throw Exception("Decimal math overflow", ErrorCodes::DECIMAL_OVERFLOW); + { + if constexpr (throw_on_error) + throw Exception("Decimal math overflow", ErrorCodes::DECIMAL_OVERFLOW); + return false; + } T value; if (common::addOverflow(whole_scaled, fractional_sign * (fractional % scale_multiplier), value)) - throw Exception("Decimal math overflow", ErrorCodes::DECIMAL_OVERFLOW); + { + if constexpr (throw_on_error) + throw Exception("Decimal math overflow", ErrorCodes::DECIMAL_OVERFLOW); + return false; + } - return DecimalType(value); + result = DecimalType(value); + return true; +} + +template +inline DecimalType decimalFromComponentsWithMultiplier( + const typename DecimalType::NativeType & whole, + const typename DecimalType::NativeType & fractional, + typename DecimalType::NativeType scale_multiplier) +{ + DecimalType result; + decimalFromComponentsWithMultiplierImpl(whole, fractional, scale_multiplier, result); + return result; +} + +template +inline bool tryGetDecimalFromComponentsWithMultiplier( + const typename DecimalType::NativeType & whole, + const typename DecimalType::NativeType & fractional, + typename DecimalType::NativeType scale_multiplier, + DecimalType & result) +{ + return decimalFromComponentsWithMultiplierImpl(whole, fractional, scale_multiplier, result); } template @@ -118,6 +150,15 @@ inline DecimalType decimalFromComponentsWithMultiplier( return decimalFromComponentsWithMultiplier(components.whole, components.fractional, scale_multiplier); } +template +inline bool tryGetDecimalFromComponentsWithMultiplier( + const DecimalComponents & components, + typename DecimalType::NativeType scale_multiplier, + DecimalType & result) +{ + return tryGetDecimalFromComponentsWithMultiplier(components.whole, components.fractional, scale_multiplier, result); +} + /** Make a decimal value from whole and fractional components with given scale. * @@ -134,6 +175,18 @@ inline DecimalType decimalFromComponents( return decimalFromComponentsWithMultiplier(whole, fractional, scaleMultiplier(scale)); } +template +inline bool tryGetDecimalFromComponents( + const typename DecimalType::NativeType & whole, + const typename DecimalType::NativeType & fractional, + UInt32 scale, + DecimalType & result) +{ + using T = typename DecimalType::NativeType; + + return tryGetDecimalFromComponentsWithMultiplier(whole, fractional, scaleMultiplier(scale), result); +} + /** Make a decimal value from whole and fractional components with given scale. * @see `decimalFromComponentsWithMultiplier` for details. */ @@ -145,6 +198,15 @@ inline DecimalType decimalFromComponents( return decimalFromComponents(components.whole, components.fractional, scale); } +template +inline bool tryGetDecimalFromComponents( + const DecimalComponents & components, + UInt32 scale, + DecimalType & result) +{ + return tryGetDecimalFromComponents(components.whole, components.fractional, scale, result); +} + /** Split decimal into whole and fractional parts with given scale_multiplier. * This is an optimization to reduce number of calls to scaleMultiplier on known scale. */ diff --git a/src/Core/Settings.h b/src/Core/Settings.h index 75062ce9c94..af32c15a867 100644 --- a/src/Core/Settings.h +++ b/src/Core/Settings.h @@ -64,8 +64,8 @@ static constexpr UInt64 operator""_GiB(unsigned long long value) M(Seconds, connect_timeout, DBMS_DEFAULT_CONNECT_TIMEOUT_SEC, "Connection timeout if there are no replicas.", 0) \ M(Milliseconds, connect_timeout_with_failover_ms, 50, "Connection timeout for selecting first healthy replica.", 0) \ M(Milliseconds, connect_timeout_with_failover_secure_ms, 100, "Connection timeout for selecting first healthy replica (for secure connections).", 0) \ - M(Seconds, receive_timeout, DBMS_DEFAULT_RECEIVE_TIMEOUT_SEC, "", 0) \ - M(Seconds, send_timeout, DBMS_DEFAULT_SEND_TIMEOUT_SEC, "", 0) \ + M(Seconds, receive_timeout, DBMS_DEFAULT_RECEIVE_TIMEOUT_SEC, "Timeout for receiving data from network, in seconds. If no bytes were received in this interval, exception is thrown. If you set this setting on client, the 'send_timeout' for the socket will be also set on the corresponding connection end on the server.", 0) \ + M(Seconds, send_timeout, DBMS_DEFAULT_SEND_TIMEOUT_SEC, "Timeout for sending data to network, in seconds. If client needs to sent some data, but it did not able to send any bytes in this interval, exception is thrown. If you set this setting on client, the 'receive_timeout' for the socket will be also set on the corresponding connection end on the server.", 0) \ M(Seconds, drain_timeout, 3, "Timeout for draining remote connections, -1 means synchronous drain without ignoring errors", 0) \ M(Seconds, tcp_keep_alive_timeout, 290 /* less than DBMS_DEFAULT_RECEIVE_TIMEOUT_SEC */, "The time in seconds the connection needs to remain idle before TCP starts sending keepalive probes", 0) \ M(Milliseconds, hedged_connection_timeout_ms, 100, "Connection timeout for establishing connection with replica for Hedged requests", 0) \ @@ -90,6 +90,7 @@ static constexpr UInt64 operator""_GiB(unsigned long long value) M(UInt64, s3_max_connections, 1024, "The maximum number of connections per server.", 0) \ M(Bool, s3_truncate_on_insert, false, "Enables or disables truncate before insert in s3 engine tables.", 0) \ M(Bool, s3_create_new_file_on_insert, false, "Enables or disables creating a new file on each insert in s3 engine tables", 0) \ + M(Bool, s3_check_objects_after_upload, false, "Check each uploaded object to s3 with head request to be sure that upload was successful", 0) \ M(Bool, enable_s3_requests_logging, false, "Enable very explicit logging of S3 requests. Makes sense for debug only.", 0) \ M(UInt64, hdfs_replication, 0, "The actual number of replications can be specified when the hdfs file is created.", 0) \ M(Bool, hdfs_truncate_on_insert, false, "Enables or disables truncate before insert in s3 engine tables", 0) \ @@ -139,8 +140,8 @@ static constexpr UInt64 operator""_GiB(unsigned long long value) M(Bool, group_by_use_nulls, false, "Treat columns mentioned in ROLLUP, CUBE or GROUPING SETS as Nullable", 0) \ \ M(UInt64, max_parallel_replicas, 1, "The maximum number of replicas of each shard used when the query is executed. For consistency (to get different parts of the same partition), this option only works for the specified sampling key. The lag of the replicas is not controlled.", 0) \ - M(UInt64, parallel_replicas_count, 0, "", 0) \ - M(UInt64, parallel_replica_offset, 0, "", 0) \ + M(UInt64, parallel_replicas_count, 0, "This is internal setting that should not be used directly and represents an implementation detail of the 'parallel replicas' mode. This setting will be automatically set up by the initiator server for distributed queries to the number of parallel replicas participating in query processing.", 0) \ + M(UInt64, parallel_replica_offset, 0, "This is internal setting that should not be used directly and represents an implementation detail of the 'parallel replicas' mode. This setting will be automatically set up by the initiator server for distributed queries to the index of the replica participating in query processing among parallel replicas.", 0) \ \ M(Bool, allow_experimental_parallel_reading_from_replicas, false, "If true, ClickHouse will send a SELECT query to all replicas of a table. It will work for any kind on MergeTree table.", 0) \ \ @@ -213,7 +214,7 @@ static constexpr UInt64 operator""_GiB(unsigned long long value) M(Bool, insert_deduplicate, true, "For INSERT queries in the replicated table, specifies that deduplication of insertings blocks should be performed", 0) \ \ M(UInt64Auto, insert_quorum, 0, "For INSERT queries in the replicated table, wait writing for the specified number of replicas and linearize the addition of the data. 0 - disabled.", 0) \ - M(Milliseconds, insert_quorum_timeout, 600000, "", 0) \ + M(Milliseconds, insert_quorum_timeout, 600000, "If the quorum of replicas did not meet in specified time (in milliseconds), exception will be thrown and insertion is aborted.", 0) \ M(Bool, insert_quorum_parallel, true, "For quorum INSERT queries - enable to make parallel inserts without linearizability", 0) \ M(UInt64, select_sequential_consistency, 0, "For SELECT queries from the replicated table, throw an exception if the replica does not have a chunk written with the quorum; do not read the parts that have not yet been written with the quorum.", 0) \ M(UInt64, table_function_remote_max_addresses, 1000, "The maximum number of different shards and the maximum number of replicas of one shard in the `remote` function.", 0) \ @@ -250,7 +251,7 @@ static constexpr UInt64 operator""_GiB(unsigned long long value) M(JoinStrictness, join_default_strictness, JoinStrictness::All, "Set default strictness in JOIN query. Possible values: empty string, 'ANY', 'ALL'. If empty, query without strictness will throw exception.", 0) \ M(Bool, any_join_distinct_right_table_keys, false, "Enable old ANY JOIN logic with many-to-one left-to-right table keys mapping for all ANY JOINs. It leads to confusing not equal results for 't1 ANY LEFT JOIN t2' and 't2 ANY RIGHT JOIN t1'. ANY RIGHT JOIN needs one-to-many keys mapping to be consistent with LEFT one.", IMPORTANT) \ \ - M(UInt64, preferred_block_size_bytes, 1000000, "", 0) \ + M(UInt64, preferred_block_size_bytes, 1000000, "This setting adjusts the data block size for query processing and represents additional fine tune to the more rough 'max_block_size' setting. If the columns are large and with 'max_block_size' rows the block size is likely to be larger than the specified amount of bytes, its size will be lowered for better CPU cache locality.", 0) \ \ M(UInt64, max_replica_delay_for_distributed_queries, 300, "If set, distributed queries of Replicated tables will choose servers with replication delay in seconds less than the specified value (not inclusive). Zero means do not take delay into account.", 0) \ M(Bool, fallback_to_stale_replicas_for_distributed_queries, true, "Suppose max_replica_delay_for_distributed_queries is set and all replicas for the queried table are stale. If this setting is enabled, the query will be performed anyway, otherwise the error will be reported.", 0) \ @@ -280,6 +281,7 @@ static constexpr UInt64 operator""_GiB(unsigned long long value) M(UInt64, http_max_fields, 1000000, "Maximum number of fields in HTTP header", 0) \ M(UInt64, http_max_field_name_size, 1048576, "Maximum length of field name in HTTP header", 0) \ M(UInt64, http_max_field_value_size, 1048576, "Maximum length of field value in HTTP header", 0) \ + M(UInt64, http_max_chunk_size, 100_GiB, "Maximum value of a chunk size in HTTP chunked transfer encoding", 0) \ M(Bool, http_skip_not_found_url_for_globs, true, "Skip url's for globs with HTTP_NOT_FOUND error", 0) \ M(Bool, optimize_throw_if_noop, false, "If setting is enabled and OPTIMIZE query didn't actually assign a merge then an explanatory exception is thrown", 0) \ M(Bool, use_index_for_in_with_subqueries, true, "Try using an index if there is a subquery or a table expression on the right side of the IN operator.", 0) \ @@ -314,14 +316,14 @@ static constexpr UInt64 operator""_GiB(unsigned long long value) M(UInt64, max_bytes_to_read_leaf, 0, "Limit on read bytes (after decompression) on the leaf nodes for distributed queries. Limit is applied for local reads only excluding the final merge stage on the root node.", 0) \ M(OverflowMode, read_overflow_mode_leaf, OverflowMode::THROW, "What to do when the leaf limit is exceeded.", 0) \ \ - M(UInt64, max_rows_to_group_by, 0, "", 0) \ + M(UInt64, max_rows_to_group_by, 0, "If aggregation during GROUP BY is generating more than specified number of rows (unique GROUP BY keys), the behavior will be determined by the 'group_by_overflow_mode' which by default is - throw an exception, but can be also switched to an approximate GROUP BY mode.", 0) \ M(OverflowModeGroupBy, group_by_overflow_mode, OverflowMode::THROW, "What to do when the limit is exceeded.", 0) \ - M(UInt64, max_bytes_before_external_group_by, 0, "", 0) \ + M(UInt64, max_bytes_before_external_group_by, 0, "If memory usage during GROUP BY operation is exceeding this threshold in bytes, activate the 'external aggregation' mode (spill data to disk). Recommended value is half of available system memory.", 0) \ \ - M(UInt64, max_rows_to_sort, 0, "", 0) \ - M(UInt64, max_bytes_to_sort, 0, "", 0) \ + M(UInt64, max_rows_to_sort, 0, "If more than specified amount of records have to be processed for ORDER BY operation, the behavior will be determined by the 'sort_overflow_mode' which by default is - throw an exception", 0) \ + M(UInt64, max_bytes_to_sort, 0, "If more than specified amount of (uncompressed) bytes have to be processed for ORDER BY operation, the behavior will be determined by the 'sort_overflow_mode' which by default is - throw an exception", 0) \ M(OverflowMode, sort_overflow_mode, OverflowMode::THROW, "What to do when the limit is exceeded.", 0) \ - M(UInt64, max_bytes_before_external_sort, 0, "", 0) \ + M(UInt64, max_bytes_before_external_sort, 0, "If memory usage during ORDER BY operation is exceeding this threshold in bytes, activate the 'external sorting' mode (spill data to disk). Recommended value is half of available system memory.", 0) \ M(UInt64, max_bytes_before_remerge_sort, 1000000000, "In case of ORDER BY with LIMIT, when memory usage is higher than specified threshold, perform additional steps of merging blocks before final merge to keep just top LIMIT rows.", 0) \ M(Float, remerge_sort_lowered_memory_bytes_ratio, 2., "If memory usage after remerge does not reduced by this ratio, remerge will be disabled.", 0) \ \ @@ -330,7 +332,7 @@ static constexpr UInt64 operator""_GiB(unsigned long long value) M(OverflowMode, result_overflow_mode, OverflowMode::THROW, "What to do when the limit is exceeded.", 0) \ \ /* TODO: Check also when merging and finalizing aggregate functions. */ \ - M(Seconds, max_execution_time, 0, "", 0) \ + M(Seconds, max_execution_time, 0, "If query run time exceeded the specified number of seconds, the behavior will be determined by the 'timeout_overflow_mode' which by default is - throw an exception. Note that the timeout is checked and query can stop only in designated places during data processing. It currently cannot stop during merging of aggregation states or during query analysis, and the actual run time will be higher than the value of this setting.", 0) \ M(OverflowMode, timeout_overflow_mode, OverflowMode::THROW, "What to do when the limit is exceeded.", 0) \ \ M(UInt64, min_execution_speed, 0, "Minimum number of execution rows per second.", 0) \ @@ -339,12 +341,12 @@ static constexpr UInt64 operator""_GiB(unsigned long long value) M(UInt64, max_execution_speed_bytes, 0, "Maximum number of execution bytes per second.", 0) \ M(Seconds, timeout_before_checking_execution_speed, 10, "Check that the speed is not too low after the specified time has elapsed.", 0) \ \ - M(UInt64, max_columns_to_read, 0, "", 0) \ - M(UInt64, max_temporary_columns, 0, "", 0) \ - M(UInt64, max_temporary_non_const_columns, 0, "", 0) \ + M(UInt64, max_columns_to_read, 0, "If a query requires reading more than specified number of columns, exception is thrown. Zero value means unlimited. This setting is useful to prevent too complex queries.", 0) \ + M(UInt64, max_temporary_columns, 0, "If a query generates more than the specified number of temporary columns in memory as a result of intermediate calculation, exception is thrown. Zero value means unlimited. This setting is useful to prevent too complex queries.", 0) \ + M(UInt64, max_temporary_non_const_columns, 0, "Similar to the 'max_temporary_columns' setting but applies only to non-constant columns. This makes sense, because constant columns are cheap and it is reasonable to allow more of them.", 0) \ \ - M(UInt64, max_subquery_depth, 100, "", 0) \ - M(UInt64, max_pipeline_depth, 1000, "", 0) \ + M(UInt64, max_subquery_depth, 100, "If a query has more than specified number of nested subqueries, throw an exception. This allows you to have a sanity check to protect the users of your cluster from going insane with their queries.", 0) \ + M(UInt64, max_pipeline_depth, 1000, "If a query has more than specified stages in the query pipeline, throw an exception. Pipeline has stages for every relational operator. This allows to limit the complexity of the queries.", 0) \ M(UInt64, max_ast_depth, 1000, "Maximum depth of query syntax tree. Checked after parsing.", 0) \ M(UInt64, max_ast_elements, 50000, "Maximum size of query syntax tree in number of nodes. Checked after parsing.", 0) \ M(UInt64, max_expanded_ast_elements, 500000, "Maximum size of query syntax tree in number of nodes after expansion of aliases and the asterisk.", 0) \ @@ -504,7 +506,7 @@ static constexpr UInt64 operator""_GiB(unsigned long long value) M(Bool, alter_partition_verbose_result, false, "Output information about affected parts. Currently works only for FREEZE and ATTACH commands.", 0) \ M(Bool, allow_experimental_database_materialized_mysql, false, "Allow to create database with Engine=MaterializedMySQL(...).", 0) \ M(Bool, allow_experimental_database_materialized_postgresql, false, "Allow to create database with Engine=MaterializedPostgreSQL(...).", 0) \ - M(Bool, system_events_show_zero_values, false, "Include all metrics, even with zero values", 0) \ + M(Bool, system_events_show_zero_values, false, "When querying system.events or system.metrics tables, include all metrics, even with zero values.", 0) \ M(MySQLDataTypesSupport, mysql_datatypes_support_level, 0, "Which MySQL types should be converted to corresponding ClickHouse types (rather than being represented as String). Can be empty or any combination of 'decimal', 'datetime64', 'date2Date32' or 'date2String'. When empty MySQL's DECIMAL and DATETIME/TIMESTAMP with non-zero precision are seen as String on ClickHouse's side.", 0) \ M(Bool, optimize_trivial_insert_select, true, "Optimize trivial 'INSERT INTO table SELECT ... FROM TABLES' query", 0) \ M(Bool, allow_non_metadata_alters, true, "Allow to execute alters which affects not only tables metadata, but also data on disk", 0) \ @@ -530,6 +532,7 @@ static constexpr UInt64 operator""_GiB(unsigned long long value) M(UInt64, max_entries_for_hash_table_stats, 10'000, "How many entries hash table statistics collected during aggregation is allowed to have", 0) \ M(UInt64, max_size_to_preallocate_for_aggregation, 10'000'000, "For how many elements it is allowed to preallocate space in all hash tables in total before aggregation", 0) \ \ + M(Bool, kafka_disable_num_consumers_limit, false, "Disable limit on kafka_num_consumers that depends on the number of available CPU cores", 0) \ /** Experimental feature for moving data between shards. */ \ \ M(Bool, allow_experimental_query_deduplication, false, "Experimental data deduplication for SELECT queries based on part UUIDs", 0) \ @@ -588,10 +591,9 @@ static constexpr UInt64 operator""_GiB(unsigned long long value) M(UInt64, remote_fs_read_max_backoff_ms, 10000, "Max wait time when trying to read data for remote disk", 0) \ M(UInt64, remote_fs_read_backoff_max_tries, 5, "Max attempts to read with backoff", 0) \ M(Bool, enable_filesystem_cache, true, "Use cache for remote filesystem. This setting does not turn on/off cache for disks (must be done via disk config), but allows to bypass cache for some queries if intended", 0) \ - M(UInt64, filesystem_cache_max_wait_sec, 5, "Allow to wait at most this number of seconds for download of current remote_fs_buffer_size bytes, and skip cache if exceeded", 0) \ M(Bool, enable_filesystem_cache_on_write_operations, false, "Write into cache on write operations. To actually work this setting requires be added to disk config too", 0) \ M(Bool, enable_filesystem_cache_log, false, "Allows to record the filesystem caching log for each query", 0) \ - M(Bool, read_from_filesystem_cache_if_exists_otherwise_bypass_cache, false, "", 0) \ + M(Bool, read_from_filesystem_cache_if_exists_otherwise_bypass_cache, false, "Allow to use the filesystem cache in passive mode - benefit from the existing cache entries, but don't put more entries into the cache. If you set this setting for heavy ad-hoc queries and leave it disabled for short real-time queries, this will allows to avoid cache threshing by too heavy queries and to improve the overall system efficiency.", 0) \ M(Bool, enable_filesystem_cache_on_lower_level, true, "If read buffer supports caching inside threadpool, allow it to do it, otherwise cache outside ot threadpool. Do not use this setting, it is needed for testing", 0) \ M(Bool, skip_download_if_exceeds_query_cache, true, "Skip download from remote filesystem if exceeds query cache size", 0) \ M(UInt64, max_query_cache_size, (128UL * 1024 * 1024 * 1024), "Max remote filesystem cache size that can be used by a single query", 0) \ diff --git a/src/Daemon/BaseDaemon.cpp b/src/Daemon/BaseDaemon.cpp index c89f55092f8..19c61e434ae 100644 --- a/src/Daemon/BaseDaemon.cpp +++ b/src/Daemon/BaseDaemon.cpp @@ -4,12 +4,14 @@ #include #include +#include #include #include #include #include #include + #if defined(OS_LINUX) #include #endif @@ -315,13 +317,13 @@ private: { LOG_FATAL(log, "(version {}{}, {}) (from thread {}) (no query) Received signal {} ({})", VERSION_STRING, VERSION_OFFICIAL, daemon.build_id_info, - thread_num, strsignal(sig), sig); + thread_num, strsignal(sig), sig); // NOLINT(concurrency-mt-unsafe) // it is not thread-safe but ok in this context } else { LOG_FATAL(log, "(version {}{}, {}) (from thread {}) (query_id: {}) (query: {}) Received signal {} ({})", VERSION_STRING, VERSION_OFFICIAL, daemon.build_id_info, - thread_num, query_id, query, strsignal(sig), sig); + thread_num, query_id, query, strsignal(sig), sig); // NOLINT(concurrency-mt-unsafe) // it is not thread-safe but ok in this context) } String error_message; @@ -665,7 +667,7 @@ void BaseDaemon::initialize(Application & self) if (config().has("timezone")) { const std::string config_timezone = config().getString("timezone"); - if (0 != setenv("TZ", config_timezone.data(), 1)) + if (0 != setenv("TZ", config_timezone.data(), 1)) // NOLINT(concurrency-mt-unsafe) // ok if not called concurrently with other setenv/getenv throw Poco::Exception("Cannot setenv TZ variable"); tzset(); @@ -940,13 +942,13 @@ void BaseDaemon::handleSignal(int signal_id) onInterruptSignals(signal_id); } else - throw DB::Exception(std::string("Unsupported signal: ") + strsignal(signal_id), 0); + throw DB::Exception(std::string("Unsupported signal: ") + strsignal(signal_id), 0); // NOLINT(concurrency-mt-unsafe) // it is not thread-safe but ok in this context } void BaseDaemon::onInterruptSignals(int signal_id) { is_cancelled = true; - LOG_INFO(&logger(), "Received termination signal ({})", strsignal(signal_id)); + LOG_INFO(&logger(), "Received termination signal ({})", strsignal(signal_id)); // NOLINT(concurrency-mt-unsafe) // it is not thread-safe but ok in this context if (sigint_signals_counter >= 2) { @@ -1064,7 +1066,7 @@ void BaseDaemon::setupWatchdog() break; } else if (errno != EINTR) - throw Poco::Exception("Cannot waitpid, errno: " + std::string(strerror(errno))); + throw Poco::Exception("Cannot waitpid, errno: " + errnoToString()); } while (true); if (errno == ECHILD) diff --git a/src/Daemon/SentryWriter.cpp b/src/Daemon/SentryWriter.cpp index ec19913ff7d..51794bfdc37 100644 --- a/src/Daemon/SentryWriter.cpp +++ b/src/Daemon/SentryWriter.cpp @@ -146,7 +146,7 @@ void SentryWriter::onFault(int sig, const std::string & error_message, const Sta if (initialized) { sentry_value_t event = sentry_value_new_message_event(SENTRY_LEVEL_FATAL, "fault", error_message.c_str()); - sentry_set_tag("signal", strsignal(sig)); + sentry_set_tag("signal", strsignal(sig)); // NOLINT(concurrency-mt-unsafe) // not thread-safe but ok in this context sentry_set_extra("signal_number", sentry_value_new_int32(sig)); #if defined(__ELF__) && !defined(OS_FREEBSD) diff --git a/src/DataTypes/NestedUtils.cpp b/src/DataTypes/NestedUtils.cpp index b28b70f676a..8eb839e2780 100644 --- a/src/DataTypes/NestedUtils.cpp +++ b/src/DataTypes/NestedUtils.cpp @@ -265,6 +265,18 @@ std::unordered_set getAllTableNames(const Block & block, bool to_lower_c } return nested_table_names; } + +Names getAllNestedColumnsForTable(const Block & block, const std::string & table_name) +{ + Names names; + for (const auto & name: block.getNames()) + { + if (extractTableName(name) == table_name) + names.push_back(name); + } + return names; +} + } NestedColumnExtractHelper::NestedColumnExtractHelper(const Block & block_, bool case_insentive_) diff --git a/src/DataTypes/NestedUtils.h b/src/DataTypes/NestedUtils.h index 38da382254c..90fdd683493 100644 --- a/src/DataTypes/NestedUtils.h +++ b/src/DataTypes/NestedUtils.h @@ -34,6 +34,9 @@ namespace Nested /// Get all nested tables names from a block. std::unordered_set getAllTableNames(const Block & block, bool to_lower_case = false); + + /// Extract all column names that are nested for specifying table. + Names getAllNestedColumnsForTable(const Block & block, const std::string & table_name); } /// Use this class to extract element columns from columns of nested type in a block, e.g. named Tuple. diff --git a/src/DataTypes/Serializations/SerializationArray.cpp b/src/DataTypes/Serializations/SerializationArray.cpp index 30ee5e98b74..625f2dce0b0 100644 --- a/src/DataTypes/Serializations/SerializationArray.cpp +++ b/src/DataTypes/Serializations/SerializationArray.cpp @@ -535,7 +535,7 @@ void SerializationArray::deserializeTextCSV(IColumn & column, ReadBuffer & istr, readCSV(s, istr, settings.csv); ReadBufferFromString rb(s); - if (settings.csv.input_format_arrays_as_nested_csv) + if (settings.csv.arrays_as_nested_csv) { deserializeTextImpl(column, rb, [&](IColumn & nested_column) diff --git a/src/DataTypes/Serializations/SerializationEnum.cpp b/src/DataTypes/Serializations/SerializationEnum.cpp index a1b9c8bf95a..afa67ba800a 100644 --- a/src/DataTypes/Serializations/SerializationEnum.cpp +++ b/src/DataTypes/Serializations/SerializationEnum.cpp @@ -24,7 +24,7 @@ void SerializationEnum::serializeTextEscaped(const IColumn & column, size_ template void SerializationEnum::deserializeTextEscaped(IColumn & column, ReadBuffer & istr, const FormatSettings & settings) const { - if (settings.tsv.input_format_enum_as_number) + if (settings.tsv.enum_as_number) assert_cast(column).getData().push_back(readValue(istr)); else { @@ -52,7 +52,7 @@ void SerializationEnum::deserializeTextQuoted(IColumn & column, ReadBuffer template void SerializationEnum::deserializeWholeText(IColumn & column, ReadBuffer & istr, const FormatSettings & settings) const { - if (settings.tsv.input_format_enum_as_number) + if (settings.tsv.enum_as_number) { assert_cast(column).getData().push_back(readValue(istr)); if (!istr.eof()) @@ -100,7 +100,7 @@ void SerializationEnum::serializeTextCSV(const IColumn & column, size_t ro template void SerializationEnum::deserializeTextCSV(IColumn & column, ReadBuffer & istr, const FormatSettings & settings) const { - if (settings.csv.input_format_enum_as_number) + if (settings.csv.enum_as_number) assert_cast(column).getData().push_back(readValue(istr)); else { diff --git a/src/Databases/DatabaseReplicatedWorker.cpp b/src/Databases/DatabaseReplicatedWorker.cpp index 63d5af8da3d..a63235b3db0 100644 --- a/src/Databases/DatabaseReplicatedWorker.cpp +++ b/src/Databases/DatabaseReplicatedWorker.cpp @@ -133,10 +133,10 @@ bool DatabaseReplicatedDDLWorker::waitForReplicaToProcessAllEntries(UInt64 timeo return true; auto max_log = DDLTask::getLogEntryName(max_log_ptr); - LOG_TRACE(log, "Waiting for worker thread to process all entries before {}, current task is {}", max_log, current_task); { std::unique_lock lock{mutex}; + LOG_TRACE(log, "Waiting for worker thread to process all entries before {}, current task is {}", max_log, current_task); bool processed = wait_current_task_change.wait_for(lock, std::chrono::milliseconds(timeout_ms), [&]() { return zookeeper->expired() || current_task == max_log || stop_flag; @@ -146,8 +146,6 @@ bool DatabaseReplicatedDDLWorker::waitForReplicaToProcessAllEntries(UInt64 timeo return false; } - LOG_TRACE(log, "Waiting for worker thread to process all entries before {}, current task is {}", max_log, current_task); - /// Lets now wait for max_log_ptr to be processed Coordination::Stat stat; auto event_ptr = std::make_shared(); diff --git a/src/Databases/SQLite/fetchSQLiteTableStructure.cpp b/src/Databases/SQLite/fetchSQLiteTableStructure.cpp index 32341a36f3e..e920ccf3c1c 100644 --- a/src/Databases/SQLite/fetchSQLiteTableStructure.cpp +++ b/src/Databases/SQLite/fetchSQLiteTableStructure.cpp @@ -35,9 +35,9 @@ static DataTypePtr convertSQLiteDataType(String type) res = std::make_shared(); else if (type == "smallint") res = std::make_shared(); - else if (type.starts_with("int") || type == "mediumint") + else if ((type.starts_with("int") && type != "int8") || type == "mediumint") res = std::make_shared(); - else if (type == "bigint") + else if (type == "bigint" || type == "int8") res = std::make_shared(); else if (type == "float") res = std::make_shared(); diff --git a/src/Dictionaries/DictionaryStructure.cpp b/src/Dictionaries/DictionaryStructure.cpp index 3ba82164eb2..ec6a56ec2ab 100644 --- a/src/Dictionaries/DictionaryStructure.cpp +++ b/src/Dictionaries/DictionaryStructure.cpp @@ -55,6 +55,7 @@ std::optional tryGetAttributeUnderlyingType(TypeIndex i return magic_enum::enum_cast(static_cast(index)); } + } diff --git a/src/Disks/DiskDecorator.h b/src/Disks/DiskDecorator.h index 2298ab56f68..6bb8b541767 100644 --- a/src/Disks/DiskDecorator.h +++ b/src/Disks/DiskDecorator.h @@ -72,7 +72,7 @@ public: void sync(int fd) const; String getUniqueId(const String & path) const override { return delegate->getUniqueId(path); } bool checkUniqueId(const String & id) const override { return delegate->checkUniqueId(id); } - DiskType getType() const override { return delegate->getType(); } + DataSourceDescription getDataSourceDescription() const override { return delegate->getDataSourceDescription(); } bool isRemote() const override { return delegate->isRemote(); } bool supportZeroCopyReplication() const override { return delegate->supportZeroCopyReplication(); } bool supportParallelWrite() const override { return delegate->supportParallelWrite(); } diff --git a/src/Disks/DiskEncrypted.h b/src/Disks/DiskEncrypted.h index 1a714395f82..d2795e01086 100644 --- a/src/Disks/DiskEncrypted.h +++ b/src/Disks/DiskEncrypted.h @@ -234,7 +234,13 @@ public: void applyNewSettings(const Poco::Util::AbstractConfiguration & config, ContextPtr context, const String & config_prefix, const DisksMap & map) override; - DiskType getType() const override { return DiskType::Encrypted; } + DataSourceDescription getDataSourceDescription() const override + { + auto delegate_description = delegate->getDataSourceDescription(); + delegate_description.is_encrypted = true; + return delegate_description; + } + bool isRemote() const override { return delegate->isRemote(); } SyncGuardPtr getDirectorySyncGuard(const String & path) const override; diff --git a/src/Disks/DiskLocal.cpp b/src/Disks/DiskLocal.cpp index 9015154a343..750d08ef80c 100644 --- a/src/Disks/DiskLocal.cpp +++ b/src/Disks/DiskLocal.cpp @@ -230,14 +230,14 @@ std::optional DiskLocal::tryReserve(UInt64 bytes) if (bytes == 0) { - LOG_DEBUG(log, "Reserving 0 bytes on disk {}", backQuote(name)); + LOG_DEBUG(logger, "Reserving 0 bytes on disk {}", backQuote(name)); ++reservation_count; return {unreserved_space}; } if (unreserved_space >= bytes) { - LOG_DEBUG(log, "Reserving {} on disk {}, having unreserved {}.", + LOG_DEBUG(logger, "Reserving {} on disk {}, having unreserved {}.", ReadableSize(bytes), backQuote(name), ReadableSize(unreserved_space)); ++reservation_count; reserved_bytes += bytes; @@ -497,6 +497,14 @@ DiskLocal::DiskLocal(const String & name_, const String & path_, UInt64 keep_fre , keep_free_space_bytes(keep_free_space_bytes_) , logger(&Poco::Logger::get("DiskLocal")) { + data_source_description.type = DataSourceType::Local; + + if (auto block_device_id = tryGetBlockDeviceId(disk_path); block_device_id.has_value()) + data_source_description.description = *block_device_id; + else + data_source_description.description = disk_path; + data_source_description.is_encrypted = false; + data_source_description.is_cached = false; } DiskLocal::DiskLocal( @@ -507,6 +515,11 @@ DiskLocal::DiskLocal( disk_checker = std::make_unique(this, context, local_disk_check_period_ms); } +DataSourceDescription DiskLocal::getDataSourceDescription() const +{ + return data_source_description; +} + void DiskLocal::startup(ContextPtr) { try @@ -615,7 +628,6 @@ DiskObjectStoragePtr DiskLocal::createDiskObjectStorage() "Local", metadata_storage, object_storage, - DiskType::Local, false, /* threadpool_size */16 ); @@ -714,6 +726,13 @@ void DiskLocal::chmod(const String & path, mode_t mode) DB::throwFromErrnoWithPath("Cannot chmod file: " + path, path, DB::ErrorCodes::PATH_ACCESS_DENIED); } +MetadataStoragePtr DiskLocal::getMetadataStorage() +{ + auto object_storage = std::make_shared(); + return std::make_shared( + std::static_pointer_cast(shared_from_this()), object_storage, getPath()); +} + void registerDiskLocal(DiskFactory & factory) { auto creator = [](const String & name, diff --git a/src/Disks/DiskLocal.h b/src/Disks/DiskLocal.h index 9d3ce1d36b1..f79647b8541 100644 --- a/src/Disks/DiskLocal.h +++ b/src/Disks/DiskLocal.h @@ -101,7 +101,8 @@ public: void truncateFile(const String & path, size_t size) override; - DiskType getType() const override { return DiskType::Local; } + DataSourceDescription getDataSourceDescription() const override; + bool isRemote() const override { return false; } bool supportZeroCopyReplication() const override { return false; } @@ -130,6 +131,8 @@ public: bool supportsChmod() const override { return true; } void chmod(const String & path, mode_t mode) override; + MetadataStoragePtr getMetadataStorage() override; + private: std::optional tryReserve(UInt64 bytes); @@ -145,14 +148,13 @@ private: const String disk_checker_path = ".disk_checker_file"; std::atomic keep_free_space_bytes; Poco::Logger * logger; + DataSourceDescription data_source_description; UInt64 reserved_bytes = 0; UInt64 reservation_count = 0; static std::mutex reservation_mutex; - Poco::Logger * log = &Poco::Logger::get("DiskLocal"); - std::atomic broken{false}; std::atomic readonly{false}; std::unique_ptr disk_checker; diff --git a/src/Disks/DiskMemory.cpp b/src/Disks/DiskMemory.cpp index 9b857d617c9..f4ca2a7459a 100644 --- a/src/Disks/DiskMemory.cpp +++ b/src/Disks/DiskMemory.cpp @@ -7,6 +7,8 @@ #include #include +#include +#include namespace DB { @@ -443,6 +445,13 @@ void DiskMemory::truncateFile(const String & path, size_t size) file_it->second.data.resize(size); } +MetadataStoragePtr DiskMemory::getMetadataStorage() +{ + auto object_storage = std::make_shared(); + return std::make_shared( + std::static_pointer_cast(shared_from_this()), object_storage, getPath()); +} + using DiskMemoryPtr = std::shared_ptr; diff --git a/src/Disks/DiskMemory.h b/src/Disks/DiskMemory.h index 0a1b16a4fea..78fb52a768d 100644 --- a/src/Disks/DiskMemory.h +++ b/src/Disks/DiskMemory.h @@ -91,11 +91,14 @@ public: void truncateFile(const String & path, size_t size) override; - DiskType getType() const override { return DiskType::RAM; } + DataSourceDescription getDataSourceDescription() const override { return DataSourceDescription{DataSourceType::RAM, "", false, false}; } + bool isRemote() const override { return false; } bool supportZeroCopyReplication() const override { return false; } + MetadataStoragePtr getMetadataStorage() override; + private: void createDirectoriesImpl(const String & path); void replaceFileImpl(const String & from_path, const String & to_path); diff --git a/src/Disks/DiskType.cpp b/src/Disks/DiskType.cpp new file mode 100644 index 00000000000..92979ab505c --- /dev/null +++ b/src/Disks/DiskType.cpp @@ -0,0 +1,11 @@ +#include "DiskType.h" + +namespace DB +{ + +bool DataSourceDescription::operator==(const DataSourceDescription & other) const +{ + return std::tie(type, description, is_encrypted) == std::tie(other.type, other.description, other.is_encrypted); +} + +} diff --git a/src/Disks/DiskType.h b/src/Disks/DiskType.h index 2bb4be8f5c9..1a5c7312cb3 100644 --- a/src/Disks/DiskType.h +++ b/src/Disks/DiskType.h @@ -5,40 +5,45 @@ namespace DB { -enum class DiskType +enum class DataSourceType { Local, RAM, S3, HDFS, - Encrypted, WebServer, AzureBlobStorage, - Cache, }; -inline String toString(DiskType disk_type) +inline String toString(DataSourceType data_source_type) { - switch (disk_type) + switch (data_source_type) { - case DiskType::Local: + case DataSourceType::Local: return "local"; - case DiskType::RAM: + case DataSourceType::RAM: return "memory"; - case DiskType::S3: + case DataSourceType::S3: return "s3"; - case DiskType::HDFS: + case DataSourceType::HDFS: return "hdfs"; - case DiskType::Encrypted: - return "encrypted"; - case DiskType::WebServer: + case DataSourceType::WebServer: return "web"; - case DiskType::AzureBlobStorage: + case DataSourceType::AzureBlobStorage: return "azure_blob_storage"; - case DiskType::Cache: - return "cache"; } __builtin_unreachable(); } +struct DataSourceDescription +{ + DataSourceType type; + std::string description; + + bool is_encrypted = false; + bool is_cached = false; + + bool operator==(const DataSourceDescription & other) const; +}; + } diff --git a/src/Disks/IDisk.cpp b/src/Disks/IDisk.cpp index 7d89fb28271..3704a511478 100644 --- a/src/Disks/IDisk.cpp +++ b/src/Disks/IDisk.cpp @@ -113,7 +113,7 @@ void IDisk::copyDirectoryContent(const String & from_dir, const std::shared_ptr< void IDisk::truncateFile(const String &, size_t) { - throw Exception(ErrorCodes::NOT_IMPLEMENTED, "Truncate operation is not implemented for disk of type {}", getType()); + throw Exception(ErrorCodes::NOT_IMPLEMENTED, "Truncate operation is not implemented for disk of type {}", getDataSourceDescription().type); } SyncGuardPtr IDisk::getDirectorySyncGuard(const String & /* path */) const @@ -121,18 +121,4 @@ SyncGuardPtr IDisk::getDirectorySyncGuard(const String & /* path */) const return nullptr; } -MetadataStoragePtr IDisk::getMetadataStorage() -{ - if (isRemote()) - { - return std::make_shared(std::static_pointer_cast(shared_from_this()), ""); - } - else - { - auto object_storage = std::make_shared(); - return std::make_shared( - std::static_pointer_cast(shared_from_this()), object_storage, getPath()); - } -} - } diff --git a/src/Disks/IDisk.h b/src/Disks/IDisk.h index 3e2b7535fcc..bfbdba0e050 100644 --- a/src/Disks/IDisk.h +++ b/src/Disks/IDisk.h @@ -227,7 +227,7 @@ public: virtual NameSet getCacheLayersNames() const { - throw Exception(ErrorCodes::NOT_IMPLEMENTED, "Method `getCacheLayersNames()` is not implemented for disk: {}", getType()); + throw Exception(ErrorCodes::NOT_IMPLEMENTED, "Method `getCacheLayersNames()` is not implemented for disk: {}", getDataSourceDescription().type); } /// Returns a list of storage objects (contains path, size, ...). @@ -235,7 +235,7 @@ public: /// be multiple files in remote fs for single clickhouse file. virtual StoredObjects getStorageObjects(const String &) const { - throw Exception(ErrorCodes::NOT_IMPLEMENTED, "Method `getStorageObjects() not implemented for disk: {}`", getType()); + throw Exception(ErrorCodes::NOT_IMPLEMENTED, "Method `getStorageObjects() not implemented for disk: {}`", getDataSourceDescription().type); } /// For one local path there might be multiple remote paths in case of Log family engines. @@ -243,7 +243,7 @@ public: virtual void getRemotePathsRecursive(const String &, std::vector &) { - throw Exception(ErrorCodes::NOT_IMPLEMENTED, "Method `getRemotePathsRecursive() not implemented for disk: {}`", getType()); + throw Exception(ErrorCodes::NOT_IMPLEMENTED, "Method `getRemotePathsRecursive() not implemented for disk: {}`", getDataSourceDescription().type); } /// Batch request to remove multiple files. @@ -271,8 +271,8 @@ public: /// Truncate file to specified size. virtual void truncateFile(const String & path, size_t size); - /// Return disk type - "local", "s3", etc. - virtual DiskType getType() const = 0; + /// Return data source description + virtual DataSourceDescription getDataSourceDescription() const = 0; /// Involves network interaction. virtual bool isRemote() const = 0; @@ -321,7 +321,7 @@ public: /// Actually it's a part of IDiskRemote implementation but we have so /// complex hierarchy of disks (with decorators), so we cannot even /// dynamic_cast some pointer to IDisk to pointer to IDiskRemote. - virtual MetadataStoragePtr getMetadataStorage(); + virtual MetadataStoragePtr getMetadataStorage() = 0; /// Very similar case as for getMetadataDiskIfExistsOrSelf(). If disk has "metadata" /// it will return mapping for each required path: path -> metadata as string. @@ -357,7 +357,7 @@ public: throw Exception( ErrorCodes::NOT_IMPLEMENTED, "Method createDiskObjectStorage() is not implemented for disk type: {}", - getType()); + getDataSourceDescription().type); } virtual bool supportsStat() const { return false; } diff --git a/src/Disks/IO/CachedOnDiskReadBufferFromFile.cpp b/src/Disks/IO/CachedOnDiskReadBufferFromFile.cpp index d3777fbcbd4..dac59c596f5 100644 --- a/src/Disks/IO/CachedOnDiskReadBufferFromFile.cpp +++ b/src/Disks/IO/CachedOnDiskReadBufferFromFile.cpp @@ -222,9 +222,6 @@ CachedOnDiskReadBufferFromFile::getReadBufferForFileSegment(FileSegmentPtr & fil { auto range = file_segment->range(); - size_t wait_download_max_tries = settings.filesystem_cache_max_wait_sec; - size_t wait_download_tries = 0; - auto download_state = file_segment->state(); LOG_TEST(log, "getReadBufferForFileSegment: {}", file_segment->getInfoForLog()); @@ -274,16 +271,7 @@ CachedOnDiskReadBufferFromFile::getReadBufferForFileSegment(FileSegmentPtr & fil return getCacheReadBuffer(range.left); } - if (wait_download_tries++ < wait_download_max_tries) - { - download_state = file_segment->wait(); - } - else - { - LOG_DEBUG(log, "Retries to wait for file segment download exceeded ({})", wait_download_tries); - download_state = FileSegment::State::SKIP_CACHE; - } - + download_state = file_segment->wait(); continue; } case FileSegment::State::DOWNLOADED: diff --git a/src/Disks/IO/ThreadPoolReader.cpp b/src/Disks/IO/ThreadPoolReader.cpp index 8e51b1bbfbd..a175fa18380 100644 --- a/src/Disks/IO/ThreadPoolReader.cpp +++ b/src/Disks/IO/ThreadPoolReader.cpp @@ -165,8 +165,7 @@ std::future ThreadPoolReader::submit(Request reques { ProfileEvents::increment(ProfileEvents::ReadBufferFromFileDescriptorReadFailed); promise.set_exception(std::make_exception_ptr(ErrnoException( - fmt::format("Cannot read from file {}, {}", fd, - errnoToString(ErrorCodes::CANNOT_READ_FROM_FILE_DESCRIPTOR, errno)), + fmt::format("Cannot read from file {}, {}", fd, errnoToString()), ErrorCodes::CANNOT_READ_FROM_FILE_DESCRIPTOR, errno))); return future; } diff --git a/src/Disks/ObjectStorages/AzureBlobStorage/AzureObjectStorage.cpp b/src/Disks/ObjectStorages/AzureBlobStorage/AzureObjectStorage.cpp index 7c88ea70cec..40f68b86e9d 100644 --- a/src/Disks/ObjectStorages/AzureBlobStorage/AzureObjectStorage.cpp +++ b/src/Disks/ObjectStorages/AzureBlobStorage/AzureObjectStorage.cpp @@ -32,11 +32,15 @@ AzureObjectStorage::AzureObjectStorage( , settings(std::move(settings_)) , log(&Poco::Logger::get("AzureObjectStorage")) { + data_source_description.type = DataSourceType::AzureBlobStorage; + data_source_description.description = client.get()->GetUrl(); + data_source_description.is_cached = false; + data_source_description.is_encrypted = false; } std::string AzureObjectStorage::generateBlobNameForPath(const std::string & /* path */) { - return getRandomASCIIString(); + return getRandomASCIIString(32); } bool AzureObjectStorage::exists(const StoredObject & object) const diff --git a/src/Disks/ObjectStorages/AzureBlobStorage/AzureObjectStorage.h b/src/Disks/ObjectStorages/AzureBlobStorage/AzureObjectStorage.h index 9b5a16f6be5..4fc434e691b 100644 --- a/src/Disks/ObjectStorages/AzureBlobStorage/AzureObjectStorage.h +++ b/src/Disks/ObjectStorages/AzureBlobStorage/AzureObjectStorage.h @@ -9,7 +9,6 @@ #include #include #include -#include #include #if USE_AZURE_BLOB_STORAGE @@ -58,6 +57,8 @@ public: AzureClientPtr && client_, SettingsPtr && settings_); + DataSourceDescription getDataSourceDescription() const override { return data_source_description; } + std::string getName() const override { return "AzureObjectStorage"; } bool exists(const StoredObject & object) const override; @@ -129,6 +130,8 @@ private: MultiVersion settings; Poco::Logger * log; + + DataSourceDescription data_source_description; }; } diff --git a/src/Disks/ObjectStorages/AzureBlobStorage/registerDiskAzureBlobStorage.cpp b/src/Disks/ObjectStorages/AzureBlobStorage/registerDiskAzureBlobStorage.cpp index 8ec93aedfef..e6615316391 100644 --- a/src/Disks/ObjectStorages/AzureBlobStorage/registerDiskAzureBlobStorage.cpp +++ b/src/Disks/ObjectStorages/AzureBlobStorage/registerDiskAzureBlobStorage.cpp @@ -90,7 +90,6 @@ void registerDiskAzureBlobStorage(DiskFactory & factory) "DiskAzureBlobStorage", std::move(metadata_storage), std::move(azure_object_storage), - DiskType::AzureBlobStorage, send_metadata, copy_thread_pool_size ); diff --git a/src/Disks/ObjectStorages/Cached/CachedObjectStorage.cpp b/src/Disks/ObjectStorages/Cached/CachedObjectStorage.cpp index f24b81bf2de..a68f9fdaf2d 100644 --- a/src/Disks/ObjectStorages/Cached/CachedObjectStorage.cpp +++ b/src/Disks/ObjectStorages/Cached/CachedObjectStorage.cpp @@ -34,6 +34,13 @@ CachedObjectStorage::CachedObjectStorage( cache->initialize(); } +DataSourceDescription CachedObjectStorage::getDataSourceDescription() const +{ + auto wrapped_object_storage_data_source = object_storage->getDataSourceDescription(); + wrapped_object_storage_data_source.is_cached = true; + return wrapped_object_storage_data_source; +} + FileCache::Key CachedObjectStorage::getCacheKey(const std::string & path) const { return cache->hash(path); diff --git a/src/Disks/ObjectStorages/Cached/CachedObjectStorage.h b/src/Disks/ObjectStorages/Cached/CachedObjectStorage.h index f99addb97d4..258b70ed579 100644 --- a/src/Disks/ObjectStorages/Cached/CachedObjectStorage.h +++ b/src/Disks/ObjectStorages/Cached/CachedObjectStorage.h @@ -20,6 +20,8 @@ class CachedObjectStorage final : public IObjectStorage public: CachedObjectStorage(ObjectStoragePtr object_storage_, FileCachePtr cache_, const FileCacheSettings & cache_settings_, const String & cache_config_name_); + DataSourceDescription getDataSourceDescription() const override; + std::string getName() const override { return fmt::format("CachedObjectStorage-{}({})", cache_config_name, object_storage->getName()); } bool exists(const StoredObject & object) const override; diff --git a/src/Disks/ObjectStorages/DiskObjectStorage.cpp b/src/Disks/ObjectStorages/DiskObjectStorage.cpp index 75579247752..bac0d8e4c12 100644 --- a/src/Disks/ObjectStorages/DiskObjectStorage.cpp +++ b/src/Disks/ObjectStorages/DiskObjectStorage.cpp @@ -103,14 +103,12 @@ DiskObjectStorage::DiskObjectStorage( const String & log_name, MetadataStoragePtr metadata_storage_, ObjectStoragePtr object_storage_, - DiskType disk_type_, bool send_metadata_, uint64_t thread_pool_size_) : IDisk(getAsyncExecutor(log_name, thread_pool_size_)) , name(name_) , object_storage_root_path(object_storage_root_path_) , log (&Poco::Logger::get("DiskObjectStorage(" + log_name + ")")) - , disk_type(disk_type_) , metadata_storage(std::move(metadata_storage_)) , object_storage(std::move(object_storage_)) , send_metadata(send_metadata_) @@ -216,6 +214,22 @@ void DiskObjectStorage::moveFile(const String & from_path, const String & to_pat transaction->commit(); } + +void DiskObjectStorage::copy(const String & from_path, const std::shared_ptr & to_disk, const String & to_path) +{ + /// It's the same object storage disk + if (this == to_disk.get()) + { + auto transaction = createObjectStorageTransaction(); + transaction->copyFile(from_path, to_path); + transaction->commit(); + } + else + { + IDisk::copy(from_path, to_disk, to_path); + } +} + void DiskObjectStorage::moveFile(const String & from_path, const String & to_path) { moveFile(from_path, to_path, send_metadata); @@ -469,7 +483,6 @@ DiskObjectStoragePtr DiskObjectStorage::createDiskObjectStorage() getName(), metadata_storage, object_storage, - disk_type, send_metadata, threadpool_size); } diff --git a/src/Disks/ObjectStorages/DiskObjectStorage.h b/src/Disks/ObjectStorages/DiskObjectStorage.h index 1b53cd514a0..6e2d719e3ad 100644 --- a/src/Disks/ObjectStorages/DiskObjectStorage.h +++ b/src/Disks/ObjectStorages/DiskObjectStorage.h @@ -34,14 +34,13 @@ public: const String & log_name, MetadataStoragePtr metadata_storage_, ObjectStoragePtr object_storage_, - DiskType disk_type_, bool send_metadata_, uint64_t thread_pool_size_); /// Create fake transaction DiskTransactionPtr createTransaction() override; - DiskType getType() const override { return disk_type; } + DataSourceDescription getDataSourceDescription() const override { return object_storage->getDataSourceDescription(); } bool supportZeroCopyReplication() const override { return true; } @@ -154,6 +153,8 @@ public: WriteMode mode, const WriteSettings & settings) override; + void copy(const String & from_path, const std::shared_ptr & to_disk, const String & to_path) override; + void applyNewSettings(const Poco::Util::AbstractConfiguration & config, ContextPtr context_, const String &, const DisksMap &) override; void restoreMetadataIfNeeded(const Poco::Util::AbstractConfiguration & config, const std::string & config_prefix, ContextPtr context); @@ -206,7 +207,6 @@ private: const String object_storage_root_path; Poco::Logger * log; - const DiskType disk_type; MetadataStoragePtr metadata_storage; ObjectStoragePtr object_storage; diff --git a/src/Disks/ObjectStorages/DiskObjectStorageCommon.h b/src/Disks/ObjectStorages/DiskObjectStorageCommon.h index 56c73649c1f..0bdbe0dfd36 100644 --- a/src/Disks/ObjectStorages/DiskObjectStorageCommon.h +++ b/src/Disks/ObjectStorages/DiskObjectStorageCommon.h @@ -5,10 +5,10 @@ #include #include -#include #include + namespace DB { diff --git a/src/Disks/ObjectStorages/DiskObjectStorageTransaction.cpp b/src/Disks/ObjectStorages/DiskObjectStorageTransaction.cpp index 28015ff0b04..54d5a2f2368 100644 --- a/src/Disks/ObjectStorages/DiskObjectStorageTransaction.cpp +++ b/src/Disks/ObjectStorages/DiskObjectStorageTransaction.cpp @@ -1,10 +1,10 @@ #include #include #include -#include #include #include + namespace DB { diff --git a/src/Disks/ObjectStorages/HDFS/HDFSObjectStorage.cpp b/src/Disks/ObjectStorages/HDFS/HDFSObjectStorage.cpp index 78bbed21e39..2303401466d 100644 --- a/src/Disks/ObjectStorages/HDFS/HDFSObjectStorage.cpp +++ b/src/Disks/ObjectStorages/HDFS/HDFSObjectStorage.cpp @@ -35,7 +35,7 @@ void HDFSObjectStorage::startup() std::string HDFSObjectStorage::generateBlobNameForPath(const std::string & /* path */) { - return getRandomASCIIString(); + return getRandomASCIIString(32); } bool HDFSObjectStorage::exists(const StoredObject & object) const diff --git a/src/Disks/ObjectStorages/HDFS/HDFSObjectStorage.h b/src/Disks/ObjectStorages/HDFS/HDFSObjectStorage.h index 4687d63c128..bbf2f593a68 100644 --- a/src/Disks/ObjectStorages/HDFS/HDFSObjectStorage.h +++ b/src/Disks/ObjectStorages/HDFS/HDFSObjectStorage.h @@ -48,10 +48,20 @@ public: , hdfs_builder(createHDFSBuilder(hdfs_root_path_, config)) , hdfs_fs(createHDFSFS(hdfs_builder.get())) , settings(std::move(settings_)) - {} + { + data_source_description.type = DataSourceType::HDFS; + data_source_description.description = hdfs_root_path_; + data_source_description.is_cached = false; + data_source_description.is_encrypted = false; + } std::string getName() const override { return "HDFSObjectStorage"; } + DataSourceDescription getDataSourceDescription() const override + { + return data_source_description; + } + bool exists(const StoredObject & object) const override; std::unique_ptr readObject( /// NOLINT @@ -121,6 +131,8 @@ private: HDFSFSPtr hdfs_fs; SettingsPtr settings; + + DataSourceDescription data_source_description; }; } diff --git a/src/Disks/ObjectStorages/HDFS/registerDiskHDFS.cpp b/src/Disks/ObjectStorages/HDFS/registerDiskHDFS.cpp index a2e9fb3e4ad..a9189e0101b 100644 --- a/src/Disks/ObjectStorages/HDFS/registerDiskHDFS.cpp +++ b/src/Disks/ObjectStorages/HDFS/registerDiskHDFS.cpp @@ -49,7 +49,6 @@ void registerDiskHDFS(DiskFactory & factory) "DiskHDFS", std::move(metadata_storage), std::move(hdfs_storage), - DiskType::HDFS, /* send_metadata = */ false, copy_thread_pool_size); diff --git a/src/Disks/ObjectStorages/IObjectStorage.h b/src/Disks/ObjectStorages/IObjectStorage.h index ae5a2587ddd..89d0e0d6d03 100644 --- a/src/Disks/ObjectStorages/IObjectStorage.h +++ b/src/Disks/ObjectStorages/IObjectStorage.h @@ -15,6 +15,7 @@ #include #include +#include #include #include #include @@ -58,6 +59,8 @@ class IObjectStorage public: IObjectStorage() = default; + virtual DataSourceDescription getDataSourceDescription() const = 0; + virtual std::string getName() const = 0; /// Object exists or not diff --git a/src/Disks/ObjectStorages/LocalObjectStorage.cpp b/src/Disks/ObjectStorages/LocalObjectStorage.cpp index a25f2ba18c4..64512a07919 100644 --- a/src/Disks/ObjectStorages/LocalObjectStorage.cpp +++ b/src/Disks/ObjectStorages/LocalObjectStorage.cpp @@ -28,6 +28,14 @@ namespace ErrorCodes LocalObjectStorage::LocalObjectStorage() : log(&Poco::Logger::get("LocalObjectStorage")) { + data_source_description.type = DataSourceType::Local; + if (auto block_device_id = tryGetBlockDeviceId("/"); block_device_id.has_value()) + data_source_description.description = *block_device_id; + else + data_source_description.description = "/"; + + data_source_description.is_cached = false; + data_source_description.is_encrypted = false; } bool LocalObjectStorage::exists(const StoredObject & object) const diff --git a/src/Disks/ObjectStorages/LocalObjectStorage.h b/src/Disks/ObjectStorages/LocalObjectStorage.h index 2225d0c72b7..de38581e7bb 100644 --- a/src/Disks/ObjectStorages/LocalObjectStorage.h +++ b/src/Disks/ObjectStorages/LocalObjectStorage.h @@ -17,6 +17,8 @@ class LocalObjectStorage : public IObjectStorage public: LocalObjectStorage(); + DataSourceDescription getDataSourceDescription() const override { return data_source_description; } + std::string getName() const override { return "LocalObjectStorage"; } bool exists(const StoredObject & object) const override; @@ -86,6 +88,7 @@ public: private: Poco::Logger * log; + DataSourceDescription data_source_description; }; } diff --git a/src/Disks/ObjectStorages/MetadataStorageFromDisk.cpp b/src/Disks/ObjectStorages/MetadataStorageFromDisk.cpp index cdd0fdc8457..0ad46a1327d 100644 --- a/src/Disks/ObjectStorages/MetadataStorageFromDisk.cpp +++ b/src/Disks/ObjectStorages/MetadataStorageFromDisk.cpp @@ -1,13 +1,12 @@ #include #include -#include #include #include #include -#include #include #include + namespace DB { diff --git a/src/Disks/ObjectStorages/MetadataStorageFromDiskTransactionOperations.cpp b/src/Disks/ObjectStorages/MetadataStorageFromDiskTransactionOperations.cpp index 4a3f76cb2aa..39c1b020b3a 100644 --- a/src/Disks/ObjectStorages/MetadataStorageFromDiskTransactionOperations.cpp +++ b/src/Disks/ObjectStorages/MetadataStorageFromDiskTransactionOperations.cpp @@ -15,7 +15,7 @@ namespace DB static std::string getTempFileName(const std::string & dir) { - return fs::path(dir) / getRandomASCIIString(); + return fs::path(dir) / getRandomASCIIString(32); } SetLastModifiedOperation::SetLastModifiedOperation(const std::string & path_, Poco::Timestamp new_timestamp_, IDisk & disk_) diff --git a/src/Disks/ObjectStorages/S3/ProxyResolverConfiguration.cpp b/src/Disks/ObjectStorages/S3/ProxyResolverConfiguration.cpp index 109ccf0eba7..9648a263741 100644 --- a/src/Disks/ObjectStorages/S3/ProxyResolverConfiguration.cpp +++ b/src/Disks/ObjectStorages/S3/ProxyResolverConfiguration.cpp @@ -54,9 +54,6 @@ ClientConfigurationPerRequest ProxyResolverConfiguration::getConfiguration(const const auto & host = endpoint.getHost(); auto resolved_hosts = DNSResolver::instance().resolveHostAll(host); - if (resolved_hosts.empty()) - throw Exception(ErrorCodes::BAD_ARGUMENTS, "Proxy resolver cannot resolve host {}", host); - HTTPSessionPtr session; for (size_t i = 0; i < resolved_hosts.size(); ++i) @@ -68,6 +65,7 @@ ClientConfigurationPerRequest ProxyResolverConfiguration::getConfiguration(const try { session->sendRequest(request); + break; } catch (...) { diff --git a/src/Disks/ObjectStorages/S3/S3ObjectStorage.cpp b/src/Disks/ObjectStorages/S3/S3ObjectStorage.cpp index a18275ff5ff..ccde7d20778 100644 --- a/src/Disks/ObjectStorages/S3/S3ObjectStorage.cpp +++ b/src/Disks/ObjectStorages/S3/S3ObjectStorage.cpp @@ -2,6 +2,8 @@ #if USE_AWS_S3 +#include + #include #include #include @@ -25,6 +27,7 @@ #include #include + #include #include @@ -87,7 +90,7 @@ void logIfError(const Aws::Utils::Outcome & response, std::functi std::string S3ObjectStorage::generateBlobNameForPath(const std::string & /* path */) { - return getRandomASCIIString(); + return getRandomASCIIString(32); } Aws::S3::Model::HeadObjectOutcome S3ObjectStorage::requestObjectHeadData(const std::string & bucket_from, const std::string & key) const @@ -369,6 +372,15 @@ void S3ObjectStorage::copyObjectImpl( } throwIfError(outcome); + + auto settings_ptr = s3_settings.get(); + if (settings_ptr->s3_settings.check_objects_after_upload) + { + auto object_head = requestObjectHeadData(dst_bucket, dst_key); + if (!object_head.IsSuccess()) + throw Exception(ErrorCodes::S3_ERROR, "Object {} from bucket {} disappeared immediately after upload, it's a bug in S3 or S3 API.", dst_key, dst_bucket); + } + } void S3ObjectStorage::copyObjectMultipartImpl( @@ -450,6 +462,14 @@ void S3ObjectStorage::copyObjectMultipartImpl( throwIfError(outcome); } + + if (settings_ptr->s3_settings.check_objects_after_upload) + { + auto object_head = requestObjectHeadData(dst_bucket, dst_key); + if (!object_head.IsSuccess()) + throw Exception(ErrorCodes::S3_ERROR, "Object {} from bucket {} disappeared immediately after upload, it's a bug in S3 or S3 API.", dst_key, dst_bucket); + } + } void S3ObjectStorage::copyObject( // NOLINT @@ -511,7 +531,8 @@ std::unique_ptr S3ObjectStorage::cloneObjectStorage( return std::make_unique( getClient(config, config_prefix, context), getSettings(config, config_prefix, context), - version_id, s3_capabilities, new_namespace); + version_id, s3_capabilities, new_namespace, + S3::URI(Poco::URI(config.getString(config_prefix + ".endpoint"))).endpoint); } } diff --git a/src/Disks/ObjectStorages/S3/S3ObjectStorage.h b/src/Disks/ObjectStorages/S3/S3ObjectStorage.h index 63b254b719a..448826bfa71 100644 --- a/src/Disks/ObjectStorages/S3/S3ObjectStorage.h +++ b/src/Disks/ObjectStorages/S3/S3ObjectStorage.h @@ -48,13 +48,23 @@ public: std::unique_ptr && s3_settings_, String version_id_, const S3Capabilities & s3_capabilities_, - String bucket_) + String bucket_, + String connection_string) : bucket(bucket_) , client(std::move(client_)) , s3_settings(std::move(s3_settings_)) , s3_capabilities(s3_capabilities_) , version_id(std::move(version_id_)) { + data_source_description.type = DataSourceType::S3; + data_source_description.description = connection_string; + data_source_description.is_cached = false; + data_source_description.is_encrypted = false; + } + + DataSourceDescription getDataSourceDescription() const override + { + return data_source_description; } std::string getName() const override { return "S3ObjectStorage"; } @@ -169,6 +179,8 @@ private: S3Capabilities s3_capabilities; const String version_id; + + DataSourceDescription data_source_description; }; } diff --git a/src/Disks/ObjectStorages/S3/diskSettings.cpp b/src/Disks/ObjectStorages/S3/diskSettings.cpp index 11f7b2e8ad7..4cdb867e48c 100644 --- a/src/Disks/ObjectStorages/S3/diskSettings.cpp +++ b/src/Disks/ObjectStorages/S3/diskSettings.cpp @@ -40,6 +40,7 @@ std::unique_ptr getSettings(const Poco::Util::AbstractC rw_settings.upload_part_size_multiply_factor = config.getUInt64(config_prefix + ".s3_upload_part_size_multiply_factor", context->getSettingsRef().s3_upload_part_size_multiply_factor); rw_settings.upload_part_size_multiply_parts_count_threshold = config.getUInt64(config_prefix + ".s3_upload_part_size_multiply_parts_count_threshold", context->getSettingsRef().s3_upload_part_size_multiply_parts_count_threshold); rw_settings.max_single_part_upload_size = config.getUInt64(config_prefix + ".s3_max_single_part_upload_size", context->getSettingsRef().s3_max_single_part_upload_size); + rw_settings.check_objects_after_upload = config.getUInt64(config_prefix + ".s3_check_objects_after_upload", context->getSettingsRef().s3_check_objects_after_upload); return std::make_unique( rw_settings, diff --git a/src/Disks/ObjectStorages/S3/registerDiskS3.cpp b/src/Disks/ObjectStorages/S3/registerDiskS3.cpp index 37d23d87c0f..12e4df42863 100644 --- a/src/Disks/ObjectStorages/S3/registerDiskS3.cpp +++ b/src/Disks/ObjectStorages/S3/registerDiskS3.cpp @@ -15,9 +15,6 @@ #include #include #include -#include -#include -#include #include #include #include @@ -133,7 +130,7 @@ void registerDiskS3(DiskFactory & factory) auto s3_storage = std::make_unique( getClient(config, config_prefix, context), getSettings(config, config_prefix, context), - uri.version_id, s3_capabilities, uri.bucket); + uri.version_id, s3_capabilities, uri.bucket, uri.endpoint); bool skip_access_check = config.getBool(config_prefix + ".skip_access_check", false); @@ -162,7 +159,6 @@ void registerDiskS3(DiskFactory & factory) "DiskS3", std::move(metadata_storage), std::move(s3_storage), - DiskType::S3, send_metadata, copy_thread_pool_size); diff --git a/src/Disks/ObjectStorages/Web/WebObjectStorage.h b/src/Disks/ObjectStorages/Web/WebObjectStorage.h index fd9e9e9ce42..7d0380a37fd 100644 --- a/src/Disks/ObjectStorages/Web/WebObjectStorage.h +++ b/src/Disks/ObjectStorages/Web/WebObjectStorage.h @@ -20,6 +20,16 @@ class WebObjectStorage : public IObjectStorage, WithContext public: WebObjectStorage(const String & url_, ContextPtr context_); + DataSourceDescription getDataSourceDescription() const override + { + return DataSourceDescription{ + .type = DataSourceType::WebServer, + .description = url, + .is_encrypted = false, + .is_cached = false, + }; + } + std::string getName() const override { return "WebObjectStorage"; } bool exists(const StoredObject & object) const override; diff --git a/src/Disks/ObjectStorages/Web/registerDiskWebServer.cpp b/src/Disks/ObjectStorages/Web/registerDiskWebServer.cpp index 1051440e16c..5ef3fad4a0a 100644 --- a/src/Disks/ObjectStorages/Web/registerDiskWebServer.cpp +++ b/src/Disks/ObjectStorages/Web/registerDiskWebServer.cpp @@ -47,7 +47,6 @@ void registerDiskWebServer(DiskFactory & factory) "DiskWebServer", metadata_storage, object_storage, - DiskType::WebServer, /* send_metadata */false, /* threadpool_size */16); }; diff --git a/src/Disks/TemporaryFileOnDisk.cpp b/src/Disks/TemporaryFileOnDisk.cpp index 6251bff23a1..e63500d735f 100644 --- a/src/Disks/TemporaryFileOnDisk.cpp +++ b/src/Disks/TemporaryFileOnDisk.cpp @@ -1,17 +1,39 @@ #include -#include #include +#include +namespace ProfileEvents +{ + extern const Event ExternalProcessingFilesTotal; +} namespace DB { +TemporaryFileOnDisk::TemporaryFileOnDisk(const DiskPtr & disk_) + : TemporaryFileOnDisk(disk_, disk_->getPath()) +{} + +TemporaryFileOnDisk::TemporaryFileOnDisk(const DiskPtr & disk_, CurrentMetrics::Value metric_scope) + : TemporaryFileOnDisk(disk_) +{ + sub_metric_increment.emplace(metric_scope); +} + TemporaryFileOnDisk::TemporaryFileOnDisk(const DiskPtr & disk_, const String & prefix_) : disk(disk_) { + /// is is possible to use with disk other than DickLocal ? + disk->createDirectories(prefix_); + + ProfileEvents::increment(ProfileEvents::ExternalProcessingFilesTotal); + + /// Do not use default temporaty root path `/tmp/tmpXXXXXX`. + /// The `dummy_prefix` is used to know what to replace with the real prefix. String dummy_prefix = "a/"; filepath = Poco::TemporaryFile::tempName(dummy_prefix); dummy_prefix += "tmp"; + /// a/tmpXXXXX -> XXXXX assert(filepath.starts_with(dummy_prefix)); filepath.replace(0, dummy_prefix.length(), prefix_); } diff --git a/src/Disks/TemporaryFileOnDisk.h b/src/Disks/TemporaryFileOnDisk.h index b82cb7d2254..de20481c939 100644 --- a/src/Disks/TemporaryFileOnDisk.h +++ b/src/Disks/TemporaryFileOnDisk.h @@ -2,28 +2,46 @@ #include #include +#include +#include + + +namespace CurrentMetrics +{ + extern const Metric TotalTemporaryFiles; +} namespace DB { -class IDisk; using DiskPtr = std::shared_ptr; /// This class helps with the handling of temporary files or directories. /// A unique name for the temporary file or directory is automatically chosen based on a specified prefix. -/// Optionally can create a directory in the constructor. +/// Create a directory in the constructor. /// The destructor always removes the temporary file or directory with all contained files. class TemporaryFileOnDisk { public: - explicit TemporaryFileOnDisk(const DiskPtr & disk_, const String & prefix_ = "tmp"); + explicit TemporaryFileOnDisk(const DiskPtr & disk_); + explicit TemporaryFileOnDisk(const DiskPtr & disk_, CurrentMetrics::Value metric_scope); + explicit TemporaryFileOnDisk(const DiskPtr & disk_, const String & prefix_); + ~TemporaryFileOnDisk(); DiskPtr getDisk() const { return disk; } const String & getPath() const { return filepath; } + const String & path() const { return filepath; } private: DiskPtr disk; + String filepath; + + CurrentMetrics::Increment metric_increment{CurrentMetrics::TotalTemporaryFiles}; + /// Specified if we know what for file is used (sort/aggregate/join). + std::optional sub_metric_increment = {}; }; +using TemporaryFileOnDiskHolder = std::unique_ptr; + } diff --git a/src/Formats/EscapingRuleUtils.cpp b/src/Formats/EscapingRuleUtils.cpp index a5c4dd1dd47..b0033f7d228 100644 --- a/src/Formats/EscapingRuleUtils.cpp +++ b/src/Formats/EscapingRuleUtils.cpp @@ -697,7 +697,7 @@ DataTypePtr determineDataTypeByEscapingRule(const String & field, const FormatSe return JSONUtils::getDataTypeFromField(field, format_settings); case FormatSettings::EscapingRule::CSV: { - if (!format_settings.csv.input_format_use_best_effort_in_schema_inference) + if (!format_settings.csv.use_best_effort_in_schema_inference) return makeNullable(std::make_shared()); if (field.empty() || field == format_settings.csv.null_representation) @@ -745,7 +745,7 @@ DataTypePtr determineDataTypeByEscapingRule(const String & field, const FormatSe case FormatSettings::EscapingRule::Raw: [[fallthrough]]; case FormatSettings::EscapingRule::Escaped: { - if (!format_settings.tsv.input_format_use_best_effort_in_schema_inference) + if (!format_settings.tsv.use_best_effort_in_schema_inference) return makeNullable(std::make_shared()); if (field.empty() || field == format_settings.tsv.null_representation) @@ -799,4 +799,49 @@ DataTypes getDefaultDataTypeForEscapingRules(const std::vector * numbers_parsed_from_json_strings = nullptr); void transformInferredJSONTypesIfNeeded(DataTypePtr & first, DataTypePtr & second, const FormatSettings & settings); +String getAdditionalFormatInfoByEscapingRule(const FormatSettings & settings,FormatSettings::EscapingRule escaping_rule); + } diff --git a/src/Formats/FormatFactory.cpp b/src/Formats/FormatFactory.cpp index 149ce1711ca..780b6bb6201 100644 --- a/src/Formats/FormatFactory.cpp +++ b/src/Formats/FormatFactory.cpp @@ -63,10 +63,10 @@ FormatSettings getFormatSettings(ContextPtr context, const Settings & settings) format_settings.csv.delimiter = settings.format_csv_delimiter; format_settings.csv.tuple_delimiter = settings.format_csv_delimiter; format_settings.csv.empty_as_default = settings.input_format_csv_empty_as_default; - format_settings.csv.input_format_enum_as_number = settings.input_format_csv_enum_as_number; + format_settings.csv.enum_as_number = settings.input_format_csv_enum_as_number; format_settings.csv.null_representation = settings.format_csv_null_representation; - format_settings.csv.input_format_arrays_as_nested_csv = settings.input_format_csv_arrays_as_nested_csv; - format_settings.csv.input_format_use_best_effort_in_schema_inference = settings.input_format_csv_use_best_effort_in_schema_inference; + format_settings.csv.arrays_as_nested_csv = settings.input_format_csv_arrays_as_nested_csv; + format_settings.csv.use_best_effort_in_schema_inference = settings.input_format_csv_use_best_effort_in_schema_inference; format_settings.csv.skip_first_lines = settings.input_format_csv_skip_first_lines; format_settings.hive_text.fields_delimiter = settings.input_format_hive_text_fields_delimiter; format_settings.hive_text.collection_items_delimiter = settings.input_format_hive_text_collection_items_delimiter; @@ -124,9 +124,9 @@ FormatSettings getFormatSettings(ContextPtr context, const Settings & settings) format_settings.template_settings.row_format = settings.format_template_row; format_settings.tsv.crlf_end_of_line = settings.output_format_tsv_crlf_end_of_line; format_settings.tsv.empty_as_default = settings.input_format_tsv_empty_as_default; - format_settings.tsv.input_format_enum_as_number = settings.input_format_tsv_enum_as_number; + format_settings.tsv.enum_as_number = settings.input_format_tsv_enum_as_number; format_settings.tsv.null_representation = settings.format_tsv_null_representation; - format_settings.tsv.input_format_use_best_effort_in_schema_inference = settings.input_format_tsv_use_best_effort_in_schema_inference; + format_settings.tsv.use_best_effort_in_schema_inference = settings.input_format_tsv_use_best_effort_in_schema_inference; format_settings.tsv.skip_first_lines = settings.input_format_tsv_skip_first_lines; format_settings.values.accurate_types_of_literals = settings.input_format_values_accurate_types_of_literals; format_settings.values.deduce_templates_of_expressions = settings.input_format_values_deduce_templates_of_expressions; diff --git a/src/Formats/FormatSettings.h b/src/Formats/FormatSettings.h index a2bb28d4aff..9466a64590d 100644 --- a/src/Formats/FormatSettings.h +++ b/src/Formats/FormatSettings.h @@ -111,11 +111,11 @@ struct FormatSettings bool allow_double_quotes = true; bool empty_as_default = false; bool crlf_end_of_line = false; - bool input_format_enum_as_number = false; - bool input_format_arrays_as_nested_csv = false; + bool enum_as_number = false; + bool arrays_as_nested_csv = false; String null_representation = "\\N"; char tuple_delimiter = ','; - bool input_format_use_best_effort_in_schema_inference = true; + bool use_best_effort_in_schema_inference = true; UInt64 skip_first_lines = 0; } csv; @@ -227,8 +227,8 @@ struct FormatSettings bool empty_as_default = false; bool crlf_end_of_line = false; String null_representation = "\\N"; - bool input_format_enum_as_number = false; - bool input_format_use_best_effort_in_schema_inference = true; + bool enum_as_number = false; + bool use_best_effort_in_schema_inference = true; UInt64 skip_first_lines = 0; } tsv; diff --git a/src/Formats/ReadSchemaUtils.cpp b/src/Formats/ReadSchemaUtils.cpp index 22eef5bd75b..d09cb9ff9ad 100644 --- a/src/Formats/ReadSchemaUtils.cpp +++ b/src/Formats/ReadSchemaUtils.cpp @@ -239,26 +239,17 @@ NamesAndTypesList getNamesAndRecursivelyNullableTypes(const Block & header) return result; } -String getKeyForSchemaCache(const String & source, const String & format, const std::optional & format_settings, const ContextPtr & context) +SchemaCache::Key getKeyForSchemaCache(const String & source, const String & format, const std::optional & format_settings, const ContextPtr & context) { return getKeysForSchemaCache({source}, format, format_settings, context).front(); } -static String makeSchemaCacheKey(const String & source, const String & format, const String & additional_format_info) +static SchemaCache::Key makeSchemaCacheKey(const String & source, const String & format, const String & additional_format_info) { - return source + "@@" + format + "@@" + additional_format_info; + return SchemaCache::Key{source, format, additional_format_info}; } -void splitSchemaCacheKey(const String & key, String & source, String & format, String & additional_format_info) -{ - size_t additional_format_info_pos = key.rfind("@@"); - additional_format_info = key.substr(additional_format_info_pos + 2, key.size() - additional_format_info_pos - 2); - size_t format_pos = key.rfind("@@", additional_format_info_pos - 1); - format = key.substr(format_pos + 2, additional_format_info_pos - format_pos - 2); - source = key.substr(0, format_pos); -} - -Strings getKeysForSchemaCache(const Strings & sources, const String & format, const std::optional & format_settings, const ContextPtr & context) +SchemaCache::Keys getKeysForSchemaCache(const Strings & sources, const String & format, const std::optional & format_settings, const ContextPtr & context) { /// For some formats data schema depends on some settings, so it's possible that /// two queries to the same source will get two different schemas. To process this @@ -266,7 +257,7 @@ Strings getKeysForSchemaCache(const Strings & sources, const String & format, co /// For example, for Protobuf format additional information is the path to the schema /// and message name. String additional_format_info = FormatFactory::instance().getAdditionalInfoForSchemaCache(format, context, format_settings); - Strings cache_keys; + SchemaCache::Keys cache_keys; cache_keys.reserve(sources.size()); std::transform(sources.begin(), sources.end(), std::back_inserter(cache_keys), [&](const auto & source){ return makeSchemaCacheKey(source, format, additional_format_info); }); return cache_keys; diff --git a/src/Formats/ReadSchemaUtils.h b/src/Formats/ReadSchemaUtils.h index 56b48823464..6e731d9dd9e 100644 --- a/src/Formats/ReadSchemaUtils.h +++ b/src/Formats/ReadSchemaUtils.h @@ -1,6 +1,7 @@ #pragma once #include +#include #include namespace DB @@ -47,8 +48,8 @@ DataTypePtr makeNullableRecursivelyAndCheckForNothing(DataTypePtr type); /// in the block and return names and types. NamesAndTypesList getNamesAndRecursivelyNullableTypes(const Block & header); -String getKeyForSchemaCache(const String & source, const String & format, const std::optional & format_settings, const ContextPtr & context); -Strings getKeysForSchemaCache(const Strings & sources, const String & format, const std::optional & format_settings, const ContextPtr & context); +SchemaCache::Key getKeyForSchemaCache(const String & source, const String & format, const std::optional & format_settings, const ContextPtr & context); +SchemaCache::Keys getKeysForSchemaCache(const Strings & sources, const String & format, const std::optional & format_settings, const ContextPtr & context); void splitSchemaCacheKey(const String & key, String & source, String & format, String & additional_format_info); } diff --git a/src/Formats/TemporaryFileStream.cpp b/src/Formats/TemporaryFileStream.cpp index b19c4aeff35..f4c66b67a45 100644 --- a/src/Formats/TemporaryFileStream.cpp +++ b/src/Formats/TemporaryFileStream.cpp @@ -25,7 +25,7 @@ TemporaryFileStream::TemporaryFileStream(const std::string & path, const Block & {} /// Flush data from input stream into file for future reading -void TemporaryFileStream::write(const std::string & path, const Block & header, QueryPipelineBuilder builder, const std::string & codec) +TemporaryFileStream::Stat TemporaryFileStream::write(const std::string & path, const Block & header, QueryPipelineBuilder builder, const std::string & codec) { WriteBufferFromFile file_buf(path); CompressedWriteBuffer compressed_buf(file_buf, CompressionCodecFactory::instance().get(codec, {})); @@ -39,6 +39,7 @@ void TemporaryFileStream::write(const std::string & path, const Block & header, output.write(block); compressed_buf.finalize(); + return Stat{compressed_buf.getCompressedBytes(), compressed_buf.getUncompressedBytes()}; } } diff --git a/src/Formats/TemporaryFileStream.h b/src/Formats/TemporaryFileStream.h index 4a2aa2d55e0..e858da1dc33 100644 --- a/src/Formats/TemporaryFileStream.h +++ b/src/Formats/TemporaryFileStream.h @@ -12,6 +12,12 @@ namespace DB /// To read the data that was flushed into the temporary data file. struct TemporaryFileStream { + struct Stat + { + size_t compressed_bytes = 0; + size_t uncompressed_bytes = 0; + }; + ReadBufferFromFile file_in; CompressedReadBuffer compressed_in; std::unique_ptr block_in; @@ -20,7 +26,7 @@ struct TemporaryFileStream TemporaryFileStream(const std::string & path, const Block & header_); /// Flush data from input stream into file for future reading - static void write(const std::string & path, const Block & header, QueryPipelineBuilder builder, const std::string & codec); + static Stat write(const std::string & path, const Block & header, QueryPipelineBuilder builder, const std::string & codec); }; } diff --git a/src/Functions/CRC.cpp b/src/Functions/CRC.cpp index 10045a246c0..92f0130c19b 100644 --- a/src/Functions/CRC.cpp +++ b/src/Functions/CRC.cpp @@ -140,7 +140,7 @@ using FunctionCRC64ECMA = FunctionCRC; template void registerFunctionCRCImpl(FunctionFactory & factory) { - factory.registerFunction(T::name, FunctionFactory::CaseInsensitive); + factory.registerFunction(T::name, {}, FunctionFactory::CaseInsensitive); } REGISTER_FUNCTION(CRC) diff --git a/src/Functions/CastOverloadResolver.cpp b/src/Functions/CastOverloadResolver.cpp index 761e49fe7bd..20a08e3b60b 100644 --- a/src/Functions/CastOverloadResolver.cpp +++ b/src/Functions/CastOverloadResolver.cpp @@ -7,11 +7,10 @@ namespace DB REGISTER_FUNCTION(CastOverloadResolvers) { - factory.registerFunction>(FunctionFactory::CaseInsensitive); - factory.registerFunction>(); - factory.registerFunction>(); + factory.registerFunction>({}, FunctionFactory::CaseInsensitive); + /// Note: "internal" (not affected by null preserving setting) versions of accurate cast functions are unneeded. - factory.registerFunction>(FunctionFactory::CaseInsensitive); + factory.registerFunction>({}, FunctionFactory::CaseInsensitive); factory.registerFunction>(); factory.registerFunction>(); } diff --git a/src/Functions/CastOverloadResolver.h b/src/Functions/CastOverloadResolver.h index e62020e10fe..6aa3d97ff0a 100644 --- a/src/Functions/CastOverloadResolver.h +++ b/src/Functions/CastOverloadResolver.h @@ -9,14 +9,13 @@ namespace ErrorCodes extern const int ILLEGAL_TYPE_OF_ARGUMENT; } -/* - * CastInternal does not preserve nullability of the data type, - * i.e. CastInternal(toNullable(toInt8(1)) as Int32) will be Int32(1). - * - * Cast preserves nullability according to setting `cast_keep_nullable`, - * i.e. Cast(toNullable(toInt8(1)) as Int32) will be Nullable(Int32(1)) if `cast_keep_nullable` == 1. -**/ -template +/** CastInternal does not preserve nullability of the data type, + * i.e. CastInternal(toNullable(toInt8(1)) as Int32) will be Int32(1). + * + * Cast preserves nullability according to setting `cast_keep_nullable`, + * i.e. Cast(toNullable(toInt8(1)) as Int32) will be Nullable(Int32(1)) if `cast_keep_nullable` == 1. + */ +template class CastOverloadResolverImpl : public IFunctionOverloadResolver { public: diff --git a/src/Functions/CustomWeekTransforms.h b/src/Functions/CustomWeekTransforms.h index c296c8228b1..3378aec02d5 100644 --- a/src/Functions/CustomWeekTransforms.h +++ b/src/Functions/CustomWeekTransforms.h @@ -62,7 +62,10 @@ struct ToStartOfWeekImpl static inline UInt16 execute(Int64 t, UInt8 week_mode, const DateLUTImpl & time_zone) { - return time_zone.toFirstDayNumOfWeek(time_zone.toDayNum(t), week_mode); + if (t < 0) + return 0; + + return time_zone.toFirstDayNumOfWeek(DayNum(std::min(Int32(time_zone.toDayNum(t)), Int32(DATE_LUT_MAX_DAY_NUM))), week_mode); } static inline UInt16 execute(UInt32 t, UInt8 week_mode, const DateLUTImpl & time_zone) { @@ -70,7 +73,10 @@ struct ToStartOfWeekImpl } static inline UInt16 execute(Int32 d, UInt8 week_mode, const DateLUTImpl & time_zone) { - return time_zone.toFirstDayNumOfWeek(ExtendedDayNum(d), week_mode); + if (d < 0) + return 0; + + return time_zone.toFirstDayNumOfWeek(DayNum(std::min(d, Int32(DATE_LUT_MAX_DAY_NUM))), week_mode); } static inline UInt16 execute(UInt16 d, UInt8 week_mode, const DateLUTImpl & time_zone) { diff --git a/src/Functions/DateTimeTransforms.h b/src/Functions/DateTimeTransforms.h index 065f08296d0..66d57f2463f 100644 --- a/src/Functions/DateTimeTransforms.h +++ b/src/Functions/DateTimeTransforms.h @@ -61,15 +61,15 @@ struct ToDateImpl static inline UInt16 execute(Int64 t, const DateLUTImpl & time_zone) { - return UInt16(time_zone.toDayNum(t)); + return t < 0 ? 0 : std::min(Int32(time_zone.toDayNum(t)), Int32(DATE_LUT_MAX_DAY_NUM)); } static inline UInt16 execute(UInt32 t, const DateLUTImpl & time_zone) { - return UInt16(time_zone.toDayNum(t)); + return time_zone.toDayNum(t); } - static inline UInt16 execute(Int32, const DateLUTImpl &) + static inline UInt16 execute(Int32 t, const DateLUTImpl &) { - return dateIsNotSupported(name); + return t < 0 ? 0 : std::min(t, Int32(DATE_LUT_MAX_DAY_NUM)); } static inline UInt16 execute(UInt16 d, const DateLUTImpl &) { @@ -111,7 +111,10 @@ struct ToStartOfDayImpl //TODO: right now it is hardcoded to produce DateTime only, needs fixing later. See date_and_time_type_details::ResultDataTypeMap for deduction of result type example. static inline UInt32 execute(const DecimalUtils::DecimalComponents & t, const DateLUTImpl & time_zone) { - return time_zone.toDate(static_cast(t.whole)); + if (t.whole < 0 || (t.whole >= 0 && t.fractional < 0)) + return 0; + + return time_zone.toDate(std::min(t.whole, Int64(0xffffffff))); } static inline UInt32 execute(UInt32 t, const DateLUTImpl & time_zone) { @@ -119,11 +122,19 @@ struct ToStartOfDayImpl } static inline UInt32 execute(Int32 d, const DateLUTImpl & time_zone) { - return time_zone.toDate(ExtendedDayNum(d)); + if (d < 0) + return 0; + + auto date_time = time_zone.fromDayNum(ExtendedDayNum(d)); + if (date_time <= 0xffffffff) + return date_time; + else + return time_zone.toDate(0xffffffff); } static inline UInt32 execute(UInt16 d, const DateLUTImpl & time_zone) { - return time_zone.toDate(DayNum(d)); + auto date_time = time_zone.fromDayNum(ExtendedDayNum(d)); + return date_time < 0xffffffff ? date_time : time_zone.toDate(0xffffffff); } using FactorTransform = ZeroTransform; @@ -135,17 +146,16 @@ struct ToMondayImpl static inline UInt16 execute(Int64 t, const DateLUTImpl & time_zone) { - //return time_zone.toFirstDayNumOfWeek(time_zone.toDayNum(t)); - return time_zone.toFirstDayNumOfWeek(t); + return t < 0 ? 0 : time_zone.toFirstDayNumOfWeek(ExtendedDayNum( + std::min(Int32(time_zone.toDayNum(t)), Int32(DATE_LUT_MAX_DAY_NUM)))); } static inline UInt16 execute(UInt32 t, const DateLUTImpl & time_zone) { - //return time_zone.toFirstDayNumOfWeek(time_zone.toDayNum(t)); return time_zone.toFirstDayNumOfWeek(t); } static inline UInt16 execute(Int32 d, const DateLUTImpl & time_zone) { - return time_zone.toFirstDayNumOfWeek(ExtendedDayNum(d)); + return d < 0 ? 0 : time_zone.toFirstDayNumOfWeek(ExtendedDayNum(std::min(d, Int32(DATE_LUT_MAX_DAY_NUM)))); } static inline UInt16 execute(UInt16 d, const DateLUTImpl & time_zone) { @@ -161,15 +171,15 @@ struct ToStartOfMonthImpl static inline UInt16 execute(Int64 t, const DateLUTImpl & time_zone) { - return time_zone.toFirstDayNumOfMonth(time_zone.toDayNum(t)); + return t < 0 ? 0 : time_zone.toFirstDayNumOfMonth(ExtendedDayNum(std::min(Int32(time_zone.toDayNum(t)), Int32(DATE_LUT_MAX_DAY_NUM)))); } static inline UInt16 execute(UInt32 t, const DateLUTImpl & time_zone) { - return time_zone.toFirstDayNumOfMonth(time_zone.toDayNum(t)); + return time_zone.toFirstDayNumOfMonth(ExtendedDayNum(time_zone.toDayNum(t))); } static inline UInt16 execute(Int32 d, const DateLUTImpl & time_zone) { - return time_zone.toFirstDayNumOfMonth(ExtendedDayNum(d)); + return d < 0 ? 0 : time_zone.toFirstDayNumOfMonth(ExtendedDayNum(std::min(d, Int32(DATE_LUT_MAX_DAY_NUM)))); } static inline UInt16 execute(UInt16 d, const DateLUTImpl & time_zone) { @@ -185,7 +195,11 @@ struct ToLastDayOfMonthImpl static inline UInt16 execute(Int64 t, const DateLUTImpl & time_zone) { - return time_zone.toLastDayNumOfMonth(time_zone.toDayNum(t)); + if (t < 0) + return 0; + + /// 0xFFF9 is Int value for 2149-05-31 -- the last day where we can actually find LastDayOfMonth. This will also be the return value. + return time_zone.toLastDayNumOfMonth(ExtendedDayNum(std::min(Int32(time_zone.toDayNum(t)), Int32(0xFFF9)))); } static inline UInt16 execute(UInt32 t, const DateLUTImpl & time_zone) { @@ -193,11 +207,16 @@ struct ToLastDayOfMonthImpl } static inline UInt16 execute(Int32 d, const DateLUTImpl & time_zone) { - return time_zone.toLastDayNumOfMonth(ExtendedDayNum(d)); + if (d < 0) + return 0; + + /// 0xFFF9 is Int value for 2149-05-31 -- the last day where we can actually find LastDayOfMonth. This will also be the return value. + return time_zone.toLastDayNumOfMonth(ExtendedDayNum(std::min(d, Int32(0xFFF9)))); } static inline UInt16 execute(UInt16 d, const DateLUTImpl & time_zone) { - return time_zone.toLastDayNumOfMonth(DayNum(d)); + /// 0xFFF9 is Int value for 2149-05-31 -- the last day where we can actually find LastDayOfMonth. This will also be the return value. + return time_zone.toLastDayNumOfMonth(DayNum(std::min(d, UInt16(0xFFF9)))); } using FactorTransform = ZeroTransform; @@ -209,7 +228,7 @@ struct ToStartOfQuarterImpl static inline UInt16 execute(Int64 t, const DateLUTImpl & time_zone) { - return time_zone.toFirstDayNumOfQuarter(time_zone.toDayNum(t)); + return t < 0 ? 0 : time_zone.toFirstDayNumOfQuarter(ExtendedDayNum(std::min(Int64(time_zone.toDayNum(t)), Int64(DATE_LUT_MAX_DAY_NUM)))); } static inline UInt16 execute(UInt32 t, const DateLUTImpl & time_zone) { @@ -217,7 +236,7 @@ struct ToStartOfQuarterImpl } static inline UInt16 execute(Int32 d, const DateLUTImpl & time_zone) { - return time_zone.toFirstDayNumOfQuarter(ExtendedDayNum(d)); + return d < 0 ? 0 : time_zone.toFirstDayNumOfQuarter(ExtendedDayNum(std::min(d, Int32(DATE_LUT_MAX_DAY_NUM)))); } static inline UInt16 execute(UInt16 d, const DateLUTImpl & time_zone) { @@ -233,7 +252,7 @@ struct ToStartOfYearImpl static inline UInt16 execute(Int64 t, const DateLUTImpl & time_zone) { - return time_zone.toFirstDayNumOfYear(time_zone.toDayNum(t)); + return t < 0 ? 0 : time_zone.toFirstDayNumOfYear(ExtendedDayNum(std::min(Int32(time_zone.toDayNum(t)), Int32(DATE_LUT_MAX_DAY_NUM)))); } static inline UInt16 execute(UInt32 t, const DateLUTImpl & time_zone) { @@ -241,7 +260,7 @@ struct ToStartOfYearImpl } static inline UInt16 execute(Int32 d, const DateLUTImpl & time_zone) { - return time_zone.toFirstDayNumOfYear(ExtendedDayNum(d)); + return d < 0 ? 0 : time_zone.toFirstDayNumOfYear(ExtendedDayNum(std::min(d, Int32(DATE_LUT_MAX_DAY_NUM)))); } static inline UInt16 execute(UInt16 d, const DateLUTImpl & time_zone) { @@ -283,7 +302,10 @@ struct ToStartOfMinuteImpl static inline UInt32 execute(const DecimalUtils::DecimalComponents & t, const DateLUTImpl & time_zone) { - return time_zone.toStartOfMinute(t.whole); + if (t.whole < 0 || (t.whole >= 0 && t.fractional < 0)) + return 0; + + return time_zone.toStartOfMinute(std::min(t.whole, Int64(0xffffffff))); } static inline UInt32 execute(UInt32 t, const DateLUTImpl & time_zone) { @@ -574,7 +596,10 @@ struct ToStartOfHourImpl static inline UInt32 execute(const DecimalUtils::DecimalComponents & t, const DateLUTImpl & time_zone) { - return time_zone.toStartOfHour(t.whole); + if (t.whole < 0 || (t.whole >= 0 && t.fractional < 0)) + return 0; + + return time_zone.toStartOfHour(std::min(t.whole, Int64(0xffffffff))); } static inline UInt32 execute(UInt32 t, const DateLUTImpl & time_zone) diff --git a/src/Functions/FunctionBase58Conversion.h b/src/Functions/FunctionBase58Conversion.h index 82e2a2caac0..bc166f2c9f5 100644 --- a/src/Functions/FunctionBase58Conversion.h +++ b/src/Functions/FunctionBase58Conversion.h @@ -48,7 +48,7 @@ struct Base58Encode for (size_t row = 0; row < input_rows_count; ++row) { size_t srclen = src_offsets[row] - src_offset_prev; - auto encoded_size = encodeBase58(src, dst_pos); + auto encoded_size = encodeBase58(src, srclen, dst_pos); src += srclen; dst_pos += encoded_size; @@ -90,7 +90,7 @@ struct Base58Decode { size_t srclen = src_offsets[row] - src_offset_prev; - auto decoded_size = decodeBase58(src, dst_pos); + auto decoded_size = decodeBase58(src, srclen, dst_pos); if (!decoded_size) throw Exception("Invalid Base58 value, cannot be decoded", ErrorCodes::BAD_ARGUMENTS); diff --git a/src/Functions/FunctionChar.cpp b/src/Functions/FunctionChar.cpp index c022fda04c8..9a5a7a2689f 100644 --- a/src/Functions/FunctionChar.cpp +++ b/src/Functions/FunctionChar.cpp @@ -115,7 +115,7 @@ private: REGISTER_FUNCTION(Char) { - factory.registerFunction(FunctionFactory::CaseInsensitive); + factory.registerFunction({}, FunctionFactory::CaseInsensitive); } } diff --git a/src/Functions/FunctionCustomWeekToSomething.h b/src/Functions/FunctionCustomWeekToSomething.h index 6ed751fd889..8a0f474a7e8 100644 --- a/src/Functions/FunctionCustomWeekToSomething.h +++ b/src/Functions/FunctionCustomWeekToSomething.h @@ -41,23 +41,20 @@ public: if (!isDate(arguments[0].type) && !isDate32(arguments[0].type) && !isDateTime(arguments[0].type) && !isDateTime64(arguments[0].type)) throw Exception( "Illegal type " + arguments[0].type->getName() + " of argument of function " + getName() - + ". Should be a date or a date with time", + + ". Must be Date, Date32, DateTime or DateTime64.", ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT); } else if (arguments.size() == 2) { if (!isDate(arguments[0].type) && !isDate32(arguments[0].type) && !isDateTime(arguments[0].type) && !isDateTime64(arguments[0].type)) throw Exception( - "Illegal type " + arguments[0].type->getName() + " of argument of function " + getName() - + ". Should be a date or a date with time", + "Illegal type " + arguments[0].type->getName() + " of 1st argument of function " + getName() + + ". Must be Date, Date32, DateTime or DateTime64.", ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT); if (!isUInt8(arguments[1].type)) throw Exception( - "Function " + getName() - + " supports 1 or 2 or 3 arguments. The 1st argument " - "must be of type Date or DateTime. The 2nd argument (optional) must be " - "a constant UInt8 with week mode. The 3rd argument (optional) must be " - "a constant string with timezone name", + "Illegal type of 2nd (optional) argument of function " + getName() + + ". Must be constant UInt8 (week mode).", ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT); } else if (arguments.size() == 3) @@ -65,33 +62,28 @@ public: if (!isDate(arguments[0].type) && !isDate32(arguments[0].type) && !isDateTime(arguments[0].type) && !isDateTime64(arguments[0].type)) throw Exception( "Illegal type " + arguments[0].type->getName() + " of argument of function " + getName() - + ". Should be a date or a date with time", + + ". Must be Date, Date32, DateTime or DateTime64", ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT); if (!isUInt8(arguments[1].type)) throw Exception( - "Function " + getName() - + " supports 1 or 2 or 3 arguments. The 1st argument " - "must be of type Date or DateTime. The 2nd argument (optional) must be " - "a constant UInt8 with week mode. The 3rd argument (optional) must be " - "a constant string with timezone name", + "Illegal type of 2nd (optional) argument of function " + getName() + + ". Must be constant UInt8 (week mode).", ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT); if (!isString(arguments[2].type)) throw Exception( - "Function " + getName() - + " supports 1 or 2 or 3 arguments. The 1st argument " - "must be of type Date or DateTime. The 2nd argument (optional) must be " - "a constant UInt8 with week mode. The 3rd argument (optional) must be " - "a constant string with timezone name", + "Illegal type of 3rd (optional) argument of function " + getName() + + ". Must be constant string (timezone name).", ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT); - if (isDate(arguments[0].type) && std::is_same_v) + if ((isDate(arguments[0].type) || isDate32(arguments[0].type)) + && (std::is_same_v || std::is_same_v)) throw Exception( - "The timezone argument of function " + getName() + " is allowed only when the 1st argument has the type DateTime", + "The timezone argument of function " + getName() + " is allowed only when the 1st argument is DateTime or DateTime64.", ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT); } else throw Exception( "Number of arguments for function " + getName() + " doesn't match: passed " + toString(arguments.size()) - + ", should be 1 or 2 or 3", + + ", expected 1, 2 or 3.", ErrorCodes::NUMBER_OF_ARGUMENTS_DOESNT_MATCH); return std::make_shared(); diff --git a/src/Functions/FunctionFQDN.cpp b/src/Functions/FunctionFQDN.cpp index c4ac409ca04..b054ff8e1d7 100644 --- a/src/Functions/FunctionFQDN.cpp +++ b/src/Functions/FunctionFQDN.cpp @@ -46,7 +46,7 @@ public: REGISTER_FUNCTION(FQDN) { - factory.registerFunction(FunctionFactory::CaseInsensitive); + factory.registerFunction({}, FunctionFactory::CaseInsensitive); factory.registerFunction("fullHostName"); } diff --git a/src/Functions/FunctionFactory.cpp b/src/Functions/FunctionFactory.cpp index 664c18a9aaf..f36a0bc69ab 100644 --- a/src/Functions/FunctionFactory.cpp +++ b/src/Functions/FunctionFactory.cpp @@ -28,10 +28,11 @@ const String & getFunctionCanonicalNameIfAny(const String & name) void FunctionFactory::registerFunction( const std::string & name, - Value creator, + FunctionCreator creator, + Documentation doc, CaseSensitiveness case_sensitiveness) { - if (!functions.emplace(name, creator).second) + if (!functions.emplace(name, FunctionFactoryData{creator, doc}).second) throw Exception("FunctionFactory: the function name '" + name + "' is not unique", ErrorCodes::LOGICAL_ERROR); @@ -42,7 +43,7 @@ void FunctionFactory::registerFunction( if (case_sensitiveness == CaseInsensitive) { - if (!case_insensitive_functions.emplace(function_name_lowercase, creator).second) + if (!case_insensitive_functions.emplace(function_name_lowercase, FunctionFactoryData{creator, doc}).second) throw Exception("FunctionFactory: the case insensitive function name '" + name + "' is not unique", ErrorCodes::LOGICAL_ERROR); case_insensitive_name_mapping[function_name_lowercase] = name; @@ -105,13 +106,13 @@ FunctionOverloadResolverPtr FunctionFactory::tryGetImpl( auto it = functions.find(name); if (functions.end() != it) - res = it->second(context); + res = it->second.first(context); else { name = Poco::toLower(name); it = case_insensitive_functions.find(name); if (case_insensitive_functions.end() != it) - res = it->second(context); + res = it->second.first(context); } if (!res) @@ -141,4 +142,13 @@ FunctionFactory & FunctionFactory::instance() return ret; } +Documentation FunctionFactory::getDocumentation(const std::string & name) const +{ + auto it = functions.find(name); + if (it == functions.end()) + throw Exception(ErrorCodes::UNKNOWN_FUNCTION, "Unknown function {}", name); + + return it->second.second; +} + } diff --git a/src/Functions/FunctionFactory.h b/src/Functions/FunctionFactory.h index 6758592558c..b6a2adcb424 100644 --- a/src/Functions/FunctionFactory.h +++ b/src/Functions/FunctionFactory.h @@ -3,6 +3,7 @@ #include #include #include +#include #include #include @@ -15,30 +16,32 @@ namespace DB { +using FunctionCreator = std::function; +using FunctionFactoryData = std::pair; + /** Creates function by name. * Function could use for initialization (take ownership of shared_ptr, for example) * some dictionaries from Context. */ -class FunctionFactory : private boost::noncopyable, - public IFactoryWithAliases> +class FunctionFactory : private boost::noncopyable, public IFactoryWithAliases { public: static FunctionFactory & instance(); template - void registerFunction(CaseSensitiveness case_sensitiveness = CaseSensitive) + void registerFunction(Documentation doc = {}, CaseSensitiveness case_sensitiveness = CaseSensitive) { - registerFunction(Function::name, case_sensitiveness); + registerFunction(Function::name, std::move(doc), case_sensitiveness); } template - void registerFunction(const std::string & name, CaseSensitiveness case_sensitiveness = CaseSensitive) + void registerFunction(const std::string & name, Documentation doc = {}, CaseSensitiveness case_sensitiveness = CaseSensitive) { if constexpr (std::is_base_of_v) - registerFunction(name, &adaptFunctionToOverloadResolver, case_sensitiveness); + registerFunction(name, &adaptFunctionToOverloadResolver, std::move(doc), case_sensitiveness); else - registerFunction(name, &Function::create, case_sensitiveness); + registerFunction(name, &Function::create, std::move(doc), case_sensitiveness); } /// This function is used by YQL - innovative transactional DBMS that depends on ClickHouse by source code. @@ -60,9 +63,12 @@ public: /// No locking, you must register all functions before usage of get. void registerFunction( const std::string & name, - Value creator, + FunctionCreator creator, + Documentation doc = {}, CaseSensitiveness case_sensitiveness = CaseSensitive); + Documentation getDocumentation(const std::string & name) const; + private: using Functions = std::unordered_map; diff --git a/src/Functions/FunctionsBinaryRepresentation.cpp b/src/Functions/FunctionsBinaryRepresentation.cpp index d53963ace8a..775696ded8a 100644 --- a/src/Functions/FunctionsBinaryRepresentation.cpp +++ b/src/Functions/FunctionsBinaryRepresentation.cpp @@ -623,10 +623,10 @@ public: REGISTER_FUNCTION(BinaryRepr) { - factory.registerFunction>(FunctionFactory::CaseInsensitive); - factory.registerFunction>(FunctionFactory::CaseInsensitive); - factory.registerFunction>(FunctionFactory::CaseInsensitive); - factory.registerFunction>(FunctionFactory::CaseInsensitive); + factory.registerFunction>({}, FunctionFactory::CaseInsensitive); + factory.registerFunction>({}, FunctionFactory::CaseInsensitive); + factory.registerFunction>({}, FunctionFactory::CaseInsensitive); + factory.registerFunction>({}, FunctionFactory::CaseInsensitive); } } diff --git a/src/Functions/FunctionsConversion.cpp b/src/Functions/FunctionsConversion.cpp index dc0235f810f..d607af54fcd 100644 --- a/src/Functions/FunctionsConversion.cpp +++ b/src/Functions/FunctionsConversion.cpp @@ -28,8 +28,12 @@ REGISTER_FUNCTION(Conversion) factory.registerFunction(); factory.registerFunction(); - /// MySQL compatibility alias. - factory.registerFunction("DATE", FunctionFactory::CaseInsensitive); + + /// MySQL compatibility alias. Cannot be registered as alias, + /// because we don't want it to be normalized to toDate in queries, + /// otherwise CREATE DICTIONARY query breaks. + factory.registerFunction("DATE", {}, FunctionFactory::CaseInsensitive); + factory.registerFunction(); factory.registerFunction(); factory.registerFunction(); diff --git a/src/Functions/FunctionsConversion.h b/src/Functions/FunctionsConversion.h index d888a181d7a..96c28b21ef0 100644 --- a/src/Functions/FunctionsConversion.h +++ b/src/Functions/FunctionsConversion.h @@ -191,29 +191,27 @@ struct ConvertImpl vec_null_map_to = &col_null_map_to->getData(); } - if constexpr (std::is_same_v) + bool result_is_bool = isBool(result_type); + for (size_t i = 0; i < input_rows_count; ++i) { - if (isBool(result_type)) + if constexpr (std::is_same_v) { - for (size_t i = 0; i < input_rows_count; ++i) + if (result_is_bool) { vec_to[i] = vec_from[i] != FromFieldType(0); + continue; } - goto done; } - } - if constexpr (std::is_same_v != std::is_same_v) - { - throw Exception("Conversion between numeric types and UUID is not supported. Probably the passed UUID is unquoted", ErrorCodes::NOT_IMPLEMENTED); - } - else - { - if constexpr (IsDataTypeDecimal || IsDataTypeDecimal) + if constexpr (std::is_same_v != std::is_same_v) { - if constexpr (std::is_same_v) + throw Exception("Conversion between numeric types and UUID is not supported. Probably the passed UUID is unquoted", ErrorCodes::NOT_IMPLEMENTED); + } + else + { + if constexpr (IsDataTypeDecimal || IsDataTypeDecimal) { - for (size_t i = 0; i < input_rows_count; ++i) + if constexpr (std::is_same_v) { ToFieldType result; bool convert_result = false; @@ -233,10 +231,7 @@ struct ConvertImpl (*vec_null_map_to)[i] = true; } } - } - else - { - for (size_t i = 0; i < input_rows_count; ++i) + else { if constexpr (IsDataTypeDecimal && IsDataTypeDecimal) vec_to[i] = convertDecimals(vec_from[i], col_from->getScale(), col_to->getScale()); @@ -248,13 +243,10 @@ struct ConvertImpl throw Exception("Unsupported data type in conversion function", ErrorCodes::CANNOT_CONVERT_TYPE); } } - } - else - { - /// If From Data is Nan or Inf and we convert to integer type, throw exception - if constexpr (std::is_floating_point_v && !std::is_floating_point_v) + else { - for (size_t i = 0; i < input_rows_count; ++i) + /// If From Data is Nan or Inf and we convert to integer type, throw exception + if constexpr (std::is_floating_point_v && !std::is_floating_point_v) { if (!isFinite(vec_from[i])) { @@ -262,46 +254,15 @@ struct ConvertImpl { vec_to[i] = 0; (*vec_null_map_to)[i] = true; + continue; } else throw Exception("Unexpected inf or nan to integer conversion", ErrorCodes::CANNOT_CONVERT_TYPE); } - else - { - if constexpr (std::is_same_v - || std::is_same_v) - { - bool convert_result = accurate::convertNumeric(vec_from[i], vec_to[i]); - - if (!convert_result) - { - if (std::is_same_v) - { - vec_to[i] = 0; - (*vec_null_map_to)[i] = true; - } - else - { - throw Exception( - "Value in column " + named_from.column->getName() + " cannot be safely converted into type " - + result_type->getName(), - ErrorCodes::CANNOT_CONVERT_TYPE); - } - } - } - else - { - vec_to[i] = static_cast(vec_from[i]); - } - } } - goto done; - } - if constexpr (std::is_same_v - || std::is_same_v) - { - for (size_t i = 0; i < input_rows_count; ++i) + if constexpr (std::is_same_v + || std::is_same_v) { bool convert_result = accurate::convertNumeric(vec_from[i], vec_to[i]); @@ -321,38 +282,14 @@ struct ConvertImpl } } } - } - else - { - if constexpr (std::is_same_v && std::is_same_v) - { - /// Turns out that when ClickHouse is compiled with AVX1 or AVX2 instructions, Clang's autovectorizer produces - /// code for UInt64-to-Float23 conversion which is only ~50% as fast as scalar code. Interestingly, scalar code - /// is equally fast than code compiled for SSE4.2, so we might as well disable vectorization. This situation - /// may change with AVX512 which has a dediated instruction for that usecase (_mm512_cvtepi64_ps). -#if defined(__x86_64__) -# ifdef __clang__ -# pragma clang loop vectorize(disable) interleave(disable) -# endif -#endif - for (size_t i = 0; i < input_rows_count; ++i) - { - vec_to[i] = static_cast(vec_from[i]); - } - } else { - for (size_t i = 0; i < input_rows_count; ++i) - { - vec_to[i] = static_cast(vec_from[i]); - } + vec_to[i] = static_cast(vec_from[i]); } } } } -done: - if constexpr (std::is_same_v) return ColumnNullable::create(std::move(col_to), std::move(col_null_map_to)); else @@ -364,6 +301,11 @@ done: } }; +/** Conversion of Date32 to Date: check bounds. + */ +template struct ConvertImpl + : DateTimeTransformImpl {}; + /** Conversion of DateTime to Date: throw off time component. */ template struct ConvertImpl @@ -382,12 +324,17 @@ struct ToDateTimeImpl static inline UInt32 execute(UInt16 d, const DateLUTImpl & time_zone) { - return time_zone.fromDayNum(DayNum(d)); + auto date_time = time_zone.fromDayNum(ExtendedDayNum(d)); + return date_time <= 0xffffffff ? UInt32(date_time) : UInt32(0xffffffff); } - static inline Int64 execute(Int32 d, const DateLUTImpl & time_zone) + static inline UInt32 execute(Int32 d, const DateLUTImpl & time_zone) { - return time_zone.fromDayNum(ExtendedDayNum(d)); + if (d < 0) + return 0; + + auto date_time = time_zone.fromDayNum(ExtendedDayNum(d)); + return date_time <= 0xffffffff ? date_time : 0xffffffff; } static inline UInt32 execute(UInt32 dt, const DateLUTImpl & /*time_zone*/) @@ -395,10 +342,21 @@ struct ToDateTimeImpl return dt; } - // TODO: return UInt32 ??? - static inline Int64 execute(Int64 dt64, const DateLUTImpl & /*time_zone*/) + static inline UInt32 execute(Int64 d, const DateLUTImpl & time_zone) { - return dt64; + if (d < 0) + return 0; + + auto date_time = time_zone.toDate(d); + return date_time <= 0xffffffff ? date_time : 0xffffffff; + } + + static inline UInt32 execute(const DecimalUtils::DecimalComponents & t, const DateLUTImpl & /*time_zone*/) + { + if (t.whole < 0 || (t.whole >= 0 && t.fractional < 0)) + return 0; + + return std::min(t.whole, Int64(0xFFFFFFFF)); } }; @@ -418,9 +376,12 @@ struct ToDateTransform32Or64 static inline NO_SANITIZE_UNDEFINED ToType execute(const FromType & from, const DateLUTImpl & time_zone) { // since converting to Date, no need in values outside of default LUT range. + if (from < 0) + return 0; + return (from < DATE_LUT_MAX_DAY_NUM) ? from - : time_zone.toDayNum(std::min(time_t(from), time_t(0xFFFFFFFF))); + : std::min(Int32(time_zone.toDayNum(from)), Int32(DATE_LUT_MAX_DAY_NUM)); } }; @@ -435,9 +396,14 @@ struct ToDateTransform32Or64Signed /// The function should be monotonic (better for query optimizations), so we saturate instead of overflow. if (from < 0) return 0; + + auto day_num = time_zone.toDayNum(ExtendedDayNum(from)); + return day_num < DATE_LUT_MAX_DAY_NUM ? day_num : DATE_LUT_MAX_DAY_NUM; + return (from < DATE_LUT_MAX_DAY_NUM) ? from - : time_zone.toDayNum(std::min(time_t(from), time_t(0xFFFFFFFF))); + : std::min(Int32(time_zone.toDayNum(from)), Int32(0xFFFFFFFF)); + } }; @@ -468,7 +434,7 @@ struct ToDate32Transform32Or64 { return (from < DATE_LUT_MAX_EXTEND_DAY_NUM) ? from - : time_zone.toDayNum(std::min(time_t(from), time_t(0xFFFFFFFF))); + : std::min(Int32(time_zone.toDayNum(from)), Int32(DATE_LUT_MAX_EXTEND_DAY_NUM)); } }; @@ -484,7 +450,7 @@ struct ToDate32Transform32Or64Signed return daynum_min_offset; return (from < DATE_LUT_MAX_EXTEND_DAY_NUM) ? from - : time_zone.toDayNum(std::min(time_t(from), time_t(0xFFFFFFFF))); + : time_zone.toDayNum(std::min(Int64(from), Int64(0xFFFFFFFF))); } }; @@ -510,35 +476,49 @@ struct ToDate32Transform8Or16Signed */ template struct ConvertImpl : DateTimeTransformImpl> {}; + template struct ConvertImpl : DateTimeTransformImpl> {}; + template struct ConvertImpl : DateTimeTransformImpl> {}; + template struct ConvertImpl : DateTimeTransformImpl> {}; + template struct ConvertImpl : DateTimeTransformImpl> {}; + template struct ConvertImpl : DateTimeTransformImpl> {}; + template struct ConvertImpl : DateTimeTransformImpl> {}; + template struct ConvertImpl : DateTimeTransformImpl> {}; template struct ConvertImpl : DateTimeTransformImpl> {}; + template struct ConvertImpl : DateTimeTransformImpl> {}; + template struct ConvertImpl : DateTimeTransformImpl> {}; + template struct ConvertImpl : DateTimeTransformImpl> {}; + template struct ConvertImpl : DateTimeTransformImpl> {}; + template struct ConvertImpl : DateTimeTransformImpl> {}; + template struct ConvertImpl : DateTimeTransformImpl> {}; + template struct ConvertImpl : DateTimeTransformImpl> {}; @@ -550,7 +530,7 @@ struct ToDateTimeTransform64 static inline NO_SANITIZE_UNDEFINED ToType execute(const FromType & from, const DateLUTImpl &) { - return std::min(time_t(from), time_t(0xFFFFFFFF)); + return std::min(Int64(from), Int64(0xFFFFFFFF)); } }; @@ -572,11 +552,12 @@ struct ToDateTimeTransform64Signed { static constexpr auto name = "toDateTime"; - static inline NO_SANITIZE_UNDEFINED ToType execute(const FromType & from, const DateLUTImpl &) + static inline NO_SANITIZE_UNDEFINED ToType execute(const FromType & from, const DateLUTImpl & /* time_zone */) { if (from < 0) return 0; - return std::min(time_t(from), time_t(0xFFFFFFFF)); + + return std::min(Int64(from), Int64(0xFFFFFFFF)); } }; @@ -697,8 +678,6 @@ struct FromDateTime64Transform } }; -/** Conversion of DateTime64 to Date or DateTime: discards fractional part. - */ template struct ConvertImpl : DateTimeTransformImpl> {}; template struct ConvertImpl @@ -722,7 +701,7 @@ struct ToDateTime64Transform inline DateTime64::NativeType execute(Int32 d, const DateLUTImpl & time_zone) const { - const auto dt = ToDateTimeImpl::execute(d, time_zone); + const auto dt = time_zone.fromDayNum(ExtendedDayNum(d)); return DecimalUtils::decimalFromComponentsWithMultiplier(dt, 0, scale_multiplier); } @@ -1080,9 +1059,7 @@ inline bool tryParseImpl(DataTypeDate32::FieldType & x, ReadBuff { ExtendedDayNum tmp(0); if (!tryReadDateText(tmp, rb)) - { return false; - } x = tmp; return true; } @@ -1165,9 +1142,27 @@ struct ConvertThroughParsing if (in.eof()) return true; - /// Special case, that allows to parse string with DateTime as Date. - if (std::is_same_v && (in.buffer().size()) == strlen("YYYY-MM-DD hh:mm:ss")) - return true; + /// Special case, that allows to parse string with DateTime or DateTime64 as Date or Date32. + if constexpr (std::is_same_v || std::is_same_v) + { + if (!in.eof() && (*in.position() == ' ' || *in.position() == 'T')) + { + if (in.buffer().size() == strlen("YYYY-MM-DD hh:mm:ss")) + return true; + + if (in.buffer().size() >= strlen("YYYY-MM-DD hh:mm:ss.x") + && in.buffer().begin()[19] == '.') + { + in.position() = in.buffer().begin() + 20; + + while (!in.eof() && isNumericASCII(*in.position())) + ++in.position(); + + if (in.eof()) + return true; + } + } + } return false; } @@ -1189,9 +1184,7 @@ struct ConvertThroughParsing if (const auto dt_col = checkAndGetDataType(result_type.get())) local_time_zone = &dt_col->getTimeZone(); else - { local_time_zone = &extractTimeZoneFromFunctionArguments(arguments, 1, 0); - } if constexpr (parsing_mode == ConvertFromStringParsingMode::BestEffort || parsing_mode == ConvertFromStringParsingMode::BestEffortUS) utc_time_zone = &DateLUT::instance("UTC"); @@ -1305,8 +1298,10 @@ struct ConvertThroughParsing vec_to[i] = value; } else if constexpr (IsDataTypeDecimal) + { SerializationDecimal::readText( vec_to[i], read_buffer, ToDataType::maxPrecision(), col_to->getScale()); + } else { parseImpl(vec_to[i], read_buffer, local_time_zone); @@ -1359,8 +1354,10 @@ struct ConvertThroughParsing vec_to[i] = value; } else if constexpr (IsDataTypeDecimal) + { parsed = SerializationDecimal::tryReadText( vec_to[i], read_buffer, ToDataType::maxPrecision(), col_to->getScale()); + } else parsed = tryParseImpl(vec_to[i], read_buffer, local_time_zone); } @@ -1851,7 +1848,7 @@ private: { /// Account for optional timezone argument. if (arguments.size() != 2 && arguments.size() != 3) - throw Exception{"Function " + getName() + " expects 2 or 3 arguments for DataTypeDateTime64.", + throw Exception{"Function " + getName() + " expects 2 or 3 arguments for DateTime64.", ErrorCodes::TOO_FEW_ARGUMENTS_FOR_FUNCTION}; } else if (arguments.size() != 2) diff --git a/src/Functions/FunctionsJSON.cpp b/src/Functions/FunctionsJSON.cpp index 6beb8155965..c90e8cad324 100644 --- a/src/Functions/FunctionsJSON.cpp +++ b/src/Functions/FunctionsJSON.cpp @@ -813,8 +813,8 @@ struct JSONExtractTree auto from_col = dictionary_type->createColumn(); if (impl->insertResultToColumn(*from_col, element)) { - StringRef value = from_col->getDataAt(0); - assert_cast(dest).insertData(value.data, value.size); + std::string_view value = from_col->getDataAt(0).toView(); + assert_cast(dest).insertData(value.data(), value.size()); return true; } return false; diff --git a/src/Functions/FunctionsLogical.cpp b/src/Functions/FunctionsLogical.cpp index 05ec1e54c94..63a2818a0c0 100644 --- a/src/Functions/FunctionsLogical.cpp +++ b/src/Functions/FunctionsLogical.cpp @@ -27,7 +27,7 @@ REGISTER_FUNCTION(Logical) factory.registerFunction(); factory.registerFunction(); factory.registerFunction(); - factory.registerFunction(FunctionFactory::CaseInsensitive); /// Operator NOT(x) can be parsed as a function. + factory.registerFunction({}, FunctionFactory::CaseInsensitive); /// Operator NOT(x) can be parsed as a function. } namespace ErrorCodes diff --git a/src/Functions/FunctionsRound.cpp b/src/Functions/FunctionsRound.cpp index a08ebbaf038..02fe1d659de 100644 --- a/src/Functions/FunctionsRound.cpp +++ b/src/Functions/FunctionsRound.cpp @@ -7,11 +7,11 @@ namespace DB REGISTER_FUNCTION(Round) { - factory.registerFunction("round", FunctionFactory::CaseInsensitive); - factory.registerFunction("roundBankers", FunctionFactory::CaseSensitive); - factory.registerFunction("floor", FunctionFactory::CaseInsensitive); - factory.registerFunction("ceil", FunctionFactory::CaseInsensitive); - factory.registerFunction("trunc", FunctionFactory::CaseInsensitive); + factory.registerFunction("round", {}, FunctionFactory::CaseInsensitive); + factory.registerFunction("roundBankers", {}, FunctionFactory::CaseSensitive); + factory.registerFunction("floor", {}, FunctionFactory::CaseInsensitive); + factory.registerFunction("ceil", {}, FunctionFactory::CaseInsensitive); + factory.registerFunction("trunc", {}, FunctionFactory::CaseInsensitive); factory.registerFunction(); /// Compatibility aliases. diff --git a/src/Functions/URL/topLevelDomain.cpp b/src/Functions/URL/topLevelDomain.cpp index 23ba5201c18..9937618cae9 100644 --- a/src/Functions/URL/topLevelDomain.cpp +++ b/src/Functions/URL/topLevelDomain.cpp @@ -11,19 +11,19 @@ struct ExtractTopLevelDomain static void execute(Pos data, size_t size, Pos & res_data, size_t & res_size) { - StringRef host = StringRef(getURLHost(data, size)); + std::string_view host = getURLHost(data, size); res_data = data; res_size = 0; - if (host.size != 0) + if (!host.empty()) { - if (host.data[host.size - 1] == '.') - host.size -= 1; + if (host[host.size() - 1] == '.') + host.remove_suffix(1); - const auto * host_end = host.data + host.size; + const auto * host_end = host.data() + host.size(); - Pos last_dot = find_last_symbols_or_null<'.'>(host.data, host_end); + Pos last_dot = find_last_symbols_or_null<'.'>(host.data(), host_end); if (!last_dot) return; diff --git a/src/Functions/abs.cpp b/src/Functions/abs.cpp index 67aa5ec6e90..ae2a2412a4b 100644 --- a/src/Functions/abs.cpp +++ b/src/Functions/abs.cpp @@ -52,7 +52,7 @@ template <> struct FunctionUnaryArithmeticMonotonicity REGISTER_FUNCTION(Abs) { - factory.registerFunction(FunctionFactory::CaseInsensitive); + factory.registerFunction({}, FunctionFactory::CaseInsensitive); } } diff --git a/src/Functions/acos.cpp b/src/Functions/acos.cpp index 1fbd636f14e..bc300ee77fb 100644 --- a/src/Functions/acos.cpp +++ b/src/Functions/acos.cpp @@ -14,7 +14,7 @@ using FunctionAcos = FunctionMathUnary>; REGISTER_FUNCTION(Acos) { - factory.registerFunction(FunctionFactory::CaseInsensitive); + factory.registerFunction({}, FunctionFactory::CaseInsensitive); } } diff --git a/src/Functions/array/FunctionArrayMapped.h b/src/Functions/array/FunctionArrayMapped.h index c4ac89df78e..6d500cc15c4 100644 --- a/src/Functions/array/FunctionArrayMapped.h +++ b/src/Functions/array/FunctionArrayMapped.h @@ -85,6 +85,9 @@ class FunctionArrayMapped : public IFunction { public: static constexpr auto name = Name::name; + static constexpr bool is_argument_type_map = std::is_same_v; + static constexpr bool is_argument_type_array = std::is_same_v; + static constexpr auto argument_type_name = is_argument_type_map ? "Map" : "Array"; static FunctionPtr create(ContextPtr) { return std::make_shared(); } String getName() const override @@ -112,20 +115,25 @@ public: throw Exception(ErrorCodes::NUMBER_OF_ARGUMENTS_DOESNT_MATCH, "Function {} needs one argument with data", getName()); - size_t nested_types_count = std::is_same_v ? (arguments.size() - 1) * 2 : (arguments.size() - 1); + size_t nested_types_count = is_argument_type_map ? (arguments.size() - 1) * 2 : (arguments.size() - 1); DataTypes nested_types(nested_types_count); for (size_t i = 0; i < arguments.size() - 1; ++i) { const auto * array_type = checkAndGetDataType(&*arguments[i + 1]); if (!array_type) - throw Exception("Argument " + toString(i + 2) + " of function " + getName() + " must be array. Found " - + arguments[i + 1]->getName() + " instead.", ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT); - if constexpr (std::is_same_v) + throw Exception( + ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT, + "Argument {} of function {} must be {}. Found {} instead", + toString(i + 2), + getName(), + argument_type_name, + arguments[i + 1]->getName()); + if constexpr (is_argument_type_map) { nested_types[2 * i] = recursiveRemoveLowCardinality(array_type->getKeyType()); nested_types[2 * i + 1] = recursiveRemoveLowCardinality(array_type->getValueType()); } - else if constexpr (std::is_same_v) + else if constexpr (is_argument_type_array) { nested_types[i] = recursiveRemoveLowCardinality(array_type->getNestedType()); } @@ -149,7 +157,7 @@ public: "Function {} needs at least {} argument, passed {}", getName(), min_args, arguments.size()); - if ((arguments.size() == 1) && std::is_same_v) + if ((arguments.size() == 1) && is_argument_type_array) { const auto * data_type = checkAndGetDataType(arguments[0].type.get()); @@ -163,7 +171,7 @@ public: throw Exception("The only argument for function " + getName() + " must be array of UInt8. Found " + arguments[0].type->getName() + " instead", ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT); - if constexpr (std::is_same_v) + if constexpr (is_argument_type_array) return Impl::getReturnType(nested_type, nested_type); else throw DB::Exception(ErrorCodes::LOGICAL_ERROR, "Unreachable code reached"); @@ -193,10 +201,7 @@ public: throw Exception("Expression for function " + getName() + " must return UInt8 or Nullable(UInt8), found " + return_type->getName(), ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT); - static_assert( - std::is_same_v || - std::is_same_v, - "unsupported type"); + static_assert(is_argument_type_map || is_argument_type_array, "unsupported type"); if (arguments.size() < 2) { @@ -208,10 +213,10 @@ public: if (!first_array_type) throw DB::Exception(ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT, "Unsupported type {}", arguments[1].type->getName()); - if constexpr (std::is_same_v) + if constexpr (is_argument_type_array) return Impl::getReturnType(return_type, first_array_type->getNestedType()); - if constexpr (std::is_same_v) + if constexpr (is_argument_type_map) return Impl::getReturnType(return_type, first_array_type->getKeyValueTypes()); throw DB::Exception(ErrorCodes::LOGICAL_ERROR, "Unreachable code reached"); @@ -229,7 +234,11 @@ public: { const ColumnConst * column_const_array = checkAndGetColumnConst(column_array_ptr.get()); if (!column_const_array) - throw Exception("Expected array column, found " + column_array_ptr->getName(), ErrorCodes::ILLEGAL_COLUMN); + throw Exception( + ErrorCodes::ILLEGAL_COLUMN, + "Expected {} column, found {}", + argument_type_name, + column_array_ptr->getName()); column_array_ptr = column_const_array->convertToFullColumn(); column_array = assert_cast(column_array_ptr.get()); } @@ -279,13 +288,15 @@ public: { const ColumnConst * column_const_array = checkAndGetColumnConst(column_array_ptr.get()); if (!column_const_array) - throw Exception("Expected array column, found " + column_array_ptr->getName(), ErrorCodes::ILLEGAL_COLUMN); + throw Exception( + ErrorCodes::ILLEGAL_COLUMN, "Expected {} column, found {}", argument_type_name, column_array_ptr->getName()); column_array_ptr = recursiveRemoveLowCardinality(column_const_array->convertToFullColumn()); column_array = checkAndGetColumn(column_array_ptr.get()); } if (!array_type) - throw Exception("Expected array type, found " + array_type_ptr->getName(), ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT); + throw Exception( + ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT, "Expected {} type, found {}", argument_type_name, array_type_ptr->getName()); if (!offsets_column) { @@ -296,7 +307,11 @@ public: /// The first condition is optimization: do not compare data if the pointers are equal. if (getOffsetsPtr(*column_array) != offsets_column && getOffsets(*column_array) != typeid_cast(*offsets_column).getData()) - throw Exception("Arrays passed to " + getName() + " must have equal size", ErrorCodes::SIZES_OF_ARRAYS_DOESNT_MATCH); + throw Exception( + ErrorCodes::SIZES_OF_ARRAYS_DOESNT_MATCH, + "{}s passed to {} must have equal size", + argument_type_name, + getName()); } if (i == 1) @@ -305,7 +320,7 @@ public: column_first_array = column_array; } - if constexpr (std::is_same_v) + if constexpr (is_argument_type_map) { arrays.emplace_back(ColumnWithTypeAndName( column_array->getNestedData().getColumnPtr(0), recursiveRemoveLowCardinality(array_type->getKeyType()), array_with_type_and_name.name+".key")); diff --git a/src/Functions/array/arrayElement.cpp b/src/Functions/array/arrayElement.cpp index a7b27bae268..7ffe71a7e5a 100644 --- a/src/Functions/array/arrayElement.cpp +++ b/src/Functions/array/arrayElement.cpp @@ -875,7 +875,8 @@ bool FunctionArrayElement::matchKeyToIndexStringConst( return castColumnString(&data, [&](const auto & data_column) { using DataColumn = std::decay_t; - + if (index.getType() != Field::Types::String) + return false; MatcherStringConst matcher{data_column, get(index)}; executeMatchKeyToIndex(offsets, matched_idxs, matcher); return true; diff --git a/src/Functions/array/length.cpp b/src/Functions/array/length.cpp index dca38474ab0..7a64c24fd6b 100644 --- a/src/Functions/array/length.cpp +++ b/src/Functions/array/length.cpp @@ -55,7 +55,42 @@ using FunctionLength = FunctionStringOrArrayToT(FunctionFactory::CaseInsensitive); + factory.registerFunction( + { + R"( +Calculates the length of the string or array. + +For String or FixedString argument: calculates the number of bytes in string. +[example:string1] + +For Array argument: calculates the number of elements in the array. +[example:arr1] + +If applied for FixedString argument, the function is a constant expression: +[example:constexpr] + +Please note that the number of bytes in a string is not the same as the number of Unicode "code points" +and it is not the same as the number of Unicode "grapheme clusters" (what we usually call "characters") +and it is not the same as the visible string width. +[example:unicode] + +It is ok to have ASCII NUL bytes in strings, and they will be counted as well. +[example:nul] +)", + Documentation::Examples{ + {"string1", "SELECT length('Hello, world!')"}, + {"arr1", "SELECT length(['Hello'], ['world'])"}, + {"constexpr", "WITH 'hello' || toString(number) AS str\n" + "SELECT str, \n" + " isConstant(length(str)) AS str_length_is_constant, \n" + " isConstant(length(str::FixedString(6))) AS fixed_str_length_is_constant\n" + "FROM numbers(3)"}, + {"unicode", "SELECT 'ёлка' AS str1, length(str1), lengthUTF8(str1), normalizeUTF8NFKD(str1) AS str2, length(str2), lengthUTF8(str2)"}, + {"nul", R"(SELECT 'abc\0\0\0' AS str, length(str))"}, + }, + Documentation::Categories{"String", "Array"} + }, + FunctionFactory::CaseInsensitive); } } diff --git a/src/Functions/asin.cpp b/src/Functions/asin.cpp index a02175367b0..264d2389974 100644 --- a/src/Functions/asin.cpp +++ b/src/Functions/asin.cpp @@ -14,7 +14,34 @@ using FunctionAsin = FunctionMathUnary>; REGISTER_FUNCTION(Asin) { - factory.registerFunction(FunctionFactory::CaseInsensitive); + factory.registerFunction( + { + R"( +Calculates the arcsine of the argument. + +Takes arbitrary numeric type, which includes floating point and integer numbers, as well as big integers and decimals and returns Float64. + +For arguments in range [-1, 1] it returns the value in range of [-pi() / 2, pi() / 2]. + +It represents an inverse function to function 'sin' on this range: +[example:inverse] + +It always returns Float64, even if the argument has Float32 type: +[example:float32] + +For arguments outside of this range, it returns nan: +[example:nan] + +Every self-respectful data scientist knows how to apply arcsine to improve ads click-through rate with ClickHouse. +For more details, see [https://en.wikipedia.org/wiki/Inverse_trigonometric_functions]. +)", + Documentation::Examples{ + {"inverse", "SELECT asin(1.0) = pi() / 2, sin(asin(1)), asin(sin(1))"}, + {"float32", "SELECT toTypeName(asin(1.0::Float32))"}, + {"nan", "SELECT asin(1.1), asin(-2), asin(inf), asin(nan)"}}, + Documentation::Categories{"Mathematical", "Trigonometric"} + }, + FunctionFactory::CaseInsensitive); } } diff --git a/src/Functions/atan.cpp b/src/Functions/atan.cpp index b735846fea7..32a0f06db8a 100644 --- a/src/Functions/atan.cpp +++ b/src/Functions/atan.cpp @@ -14,7 +14,7 @@ using FunctionAtan = FunctionMathUnary>; REGISTER_FUNCTION(Atan) { - factory.registerFunction(FunctionFactory::CaseInsensitive); + factory.registerFunction({}, FunctionFactory::CaseInsensitive); } } diff --git a/src/Functions/atan2.cpp b/src/Functions/atan2.cpp index c6a9f70286c..7be177f6dfb 100644 --- a/src/Functions/atan2.cpp +++ b/src/Functions/atan2.cpp @@ -15,7 +15,7 @@ namespace REGISTER_FUNCTION(Atan2) { - factory.registerFunction(FunctionFactory::CaseInsensitive); + factory.registerFunction({}, FunctionFactory::CaseInsensitive); } } diff --git a/src/Functions/base64Decode.cpp b/src/Functions/base64Decode.cpp index 8922e1e0095..f6943233d44 100644 --- a/src/Functions/base64Decode.cpp +++ b/src/Functions/base64Decode.cpp @@ -12,7 +12,7 @@ REGISTER_FUNCTION(Base64Decode) factory.registerFunction>(); /// MysQL compatibility alias. - factory.registerFunction>("FROM_BASE64", FunctionFactory::CaseInsensitive); + factory.registerAlias("FROM_BASE64", "base64Decode", FunctionFactory::CaseInsensitive); } } #endif diff --git a/src/Functions/base64Encode.cpp b/src/Functions/base64Encode.cpp index 14523f8b0f3..e895230d44f 100644 --- a/src/Functions/base64Encode.cpp +++ b/src/Functions/base64Encode.cpp @@ -14,7 +14,7 @@ REGISTER_FUNCTION(Base64Encode) factory.registerFunction>(); /// MysQL compatibility alias. - factory.registerFunction>("TO_BASE64", FunctionFactory::CaseInsensitive); + factory.registerAlias("TO_BASE64", "base64Encode", FunctionFactory::CaseInsensitive); } } #endif diff --git a/src/Functions/coalesce.cpp b/src/Functions/coalesce.cpp index aafbcd7d714..befebd1ff52 100644 --- a/src/Functions/coalesce.cpp +++ b/src/Functions/coalesce.cpp @@ -176,7 +176,7 @@ private: REGISTER_FUNCTION(Coalesce) { - factory.registerFunction(FunctionFactory::CaseInsensitive); + factory.registerFunction({}, FunctionFactory::CaseInsensitive); } } diff --git a/src/Functions/concat.cpp b/src/Functions/concat.cpp index 3b02f2c0b78..9f459711aa5 100644 --- a/src/Functions/concat.cpp +++ b/src/Functions/concat.cpp @@ -230,7 +230,7 @@ private: REGISTER_FUNCTION(Concat) { - factory.registerFunction(FunctionFactory::CaseInsensitive); + factory.registerFunction({}, FunctionFactory::CaseInsensitive); factory.registerFunction(); } diff --git a/src/Functions/connectionId.cpp b/src/Functions/connectionId.cpp index b9d772e3871..9c53482482b 100644 --- a/src/Functions/connectionId.cpp +++ b/src/Functions/connectionId.cpp @@ -33,7 +33,7 @@ public: REGISTER_FUNCTION(ConnectionId) { - factory.registerFunction(FunctionFactory::CaseInsensitive); + factory.registerFunction({}, FunctionFactory::CaseInsensitive); factory.registerAlias("connection_id", "connectionID", FunctionFactory::CaseInsensitive); } diff --git a/src/Functions/cos.cpp b/src/Functions/cos.cpp index e7c9d7759ed..3496373a9d5 100644 --- a/src/Functions/cos.cpp +++ b/src/Functions/cos.cpp @@ -13,7 +13,7 @@ using FunctionCos = FunctionMathUnary>; REGISTER_FUNCTION(Cos) { - factory.registerFunction(FunctionFactory::CaseInsensitive); + factory.registerFunction({}, FunctionFactory::CaseInsensitive); } } diff --git a/src/Functions/countMatches.cpp b/src/Functions/countMatches.cpp index d8948f85d44..a8620080012 100644 --- a/src/Functions/countMatches.cpp +++ b/src/Functions/countMatches.cpp @@ -22,8 +22,8 @@ namespace DB REGISTER_FUNCTION(CountMatches) { - factory.registerFunction>(FunctionFactory::CaseSensitive); - factory.registerFunction>(FunctionFactory::CaseSensitive); + factory.registerFunction>({}, FunctionFactory::CaseSensitive); + factory.registerFunction>({}, FunctionFactory::CaseSensitive); } } diff --git a/src/Functions/countSubstrings.cpp b/src/Functions/countSubstrings.cpp index ba8d150fb41..843b81437f5 100644 --- a/src/Functions/countSubstrings.cpp +++ b/src/Functions/countSubstrings.cpp @@ -19,6 +19,6 @@ using FunctionCountSubstrings = FunctionsStringSearch(FunctionFactory::CaseInsensitive); + factory.registerFunction({}, FunctionFactory::CaseInsensitive); } } diff --git a/src/Functions/currentDatabase.cpp b/src/Functions/currentDatabase.cpp index 981b324fb51..b1a3cbf5856 100644 --- a/src/Functions/currentDatabase.cpp +++ b/src/Functions/currentDatabase.cpp @@ -54,7 +54,7 @@ public: REGISTER_FUNCTION(CurrentDatabase) { factory.registerFunction(); - factory.registerFunction("DATABASE", FunctionFactory::CaseInsensitive); + factory.registerAlias("DATABASE", "currentDatabase", FunctionFactory::CaseInsensitive); } } diff --git a/src/Functions/dateDiff.cpp b/src/Functions/dateDiff.cpp index 27948a35f57..b8bf3c11698 100644 --- a/src/Functions/dateDiff.cpp +++ b/src/Functions/dateDiff.cpp @@ -263,8 +263,7 @@ private: REGISTER_FUNCTION(DateDiff) { - factory.registerFunction(FunctionFactory::CaseInsensitive); + factory.registerFunction({}, FunctionFactory::CaseInsensitive); } } - diff --git a/src/Functions/dateName.cpp b/src/Functions/dateName.cpp index 8f551dfd136..3911b1cf838 100644 --- a/src/Functions/dateName.cpp +++ b/src/Functions/dateName.cpp @@ -345,7 +345,7 @@ private: REGISTER_FUNCTION(DateName) { - factory.registerFunction(FunctionFactory::CaseInsensitive); + factory.registerFunction({}, FunctionFactory::CaseInsensitive); } } diff --git a/src/Functions/date_trunc.cpp b/src/Functions/date_trunc.cpp index bb891ac702f..1c2475cf56a 100644 --- a/src/Functions/date_trunc.cpp +++ b/src/Functions/date_trunc.cpp @@ -23,7 +23,7 @@ namespace class FunctionDateTrunc : public IFunction { public: - static constexpr auto name = "date_trunc"; + static constexpr auto name = "dateTrunc"; explicit FunctionDateTrunc(ContextPtr context_) : context(context_) {} @@ -153,12 +153,13 @@ private: } + REGISTER_FUNCTION(DateTrunc) { - factory.registerFunction(FunctionFactory::CaseInsensitive); + factory.registerFunction(); /// Compatibility alias. - factory.registerAlias("dateTrunc", FunctionDateTrunc::name); + factory.registerAlias("DATE_TRUNC", "dateTrunc", FunctionFactory::CaseInsensitive); } } diff --git a/src/Functions/degrees.cpp b/src/Functions/degrees.cpp index 2881f8d2df6..3aa20a77a0d 100644 --- a/src/Functions/degrees.cpp +++ b/src/Functions/degrees.cpp @@ -23,7 +23,7 @@ namespace REGISTER_FUNCTION(Degrees) { - factory.registerFunction(FunctionFactory::CaseInsensitive); + factory.registerFunction({}, FunctionFactory::CaseInsensitive); } } diff --git a/src/Functions/exp.cpp b/src/Functions/exp.cpp index 71037692f15..d352cda7460 100644 --- a/src/Functions/exp.cpp +++ b/src/Functions/exp.cpp @@ -36,7 +36,7 @@ using FunctionExp = FunctionMathUnary>; REGISTER_FUNCTION(Exp) { - factory.registerFunction(FunctionFactory::CaseInsensitive); + factory.registerFunction({}, FunctionFactory::CaseInsensitive); } } diff --git a/src/Functions/extractAllGroups.h b/src/Functions/extractAllGroups.h index 1a40afbbe8e..06b16181c94 100644 --- a/src/Functions/extractAllGroups.h +++ b/src/Functions/extractAllGroups.h @@ -129,11 +129,11 @@ public: root_offsets_data.resize(input_rows_count); for (size_t i = 0; i < input_rows_count; ++i) { - StringRef current_row = column_haystack->getDataAt(i); + std::string_view current_row = column_haystack->getDataAt(i).toView(); // Extract all non-intersecting matches from haystack except group #0. - const auto * pos = current_row.data; - const auto * end = pos + current_row.size; + const auto * pos = current_row.data(); + const auto * end = pos + current_row.size(); while (pos < end && regexp->Match({pos, static_cast(end - pos)}, 0, end - pos, regexp->UNANCHORED, matched_groups.data(), matched_groups.size())) diff --git a/src/Functions/formatDateTime.cpp b/src/Functions/formatDateTime.cpp index 37f1f7e83f8..328e252b67e 100644 --- a/src/Functions/formatDateTime.cpp +++ b/src/Functions/formatDateTime.cpp @@ -722,19 +722,19 @@ struct NameFormatDateTime struct NameFromUnixTime { - static constexpr auto name = "FROM_UNIXTIME"; + static constexpr auto name = "fromUnixTimestamp"; }; using FunctionFormatDateTime = FunctionFormatDateTimeImpl; -using FunctionFROM_UNIXTIME = FunctionFormatDateTimeImpl; +using FunctionFromUnixTimestamp = FunctionFormatDateTimeImpl; } REGISTER_FUNCTION(FormatDateTime) { factory.registerFunction(); - factory.registerFunction(); - factory.registerAlias("fromUnixTimestamp", "FROM_UNIXTIME"); + factory.registerFunction(); + factory.registerAlias("FROM_UNIXTIME", "fromUnixTimestamp"); } } diff --git a/src/Functions/greatest.cpp b/src/Functions/greatest.cpp index cac02eea1be..93fd7e24853 100644 --- a/src/Functions/greatest.cpp +++ b/src/Functions/greatest.cpp @@ -65,7 +65,7 @@ using FunctionGreatest = FunctionBinaryArithmetic; REGISTER_FUNCTION(Greatest) { - factory.registerFunction>(FunctionFactory::CaseInsensitive); + factory.registerFunction>({}, FunctionFactory::CaseInsensitive); } } diff --git a/src/Functions/hypot.cpp b/src/Functions/hypot.cpp index 4963e0262e4..465471cb09b 100644 --- a/src/Functions/hypot.cpp +++ b/src/Functions/hypot.cpp @@ -15,7 +15,7 @@ namespace REGISTER_FUNCTION(Hypot) { - factory.registerFunction(FunctionFactory::CaseInsensitive); + factory.registerFunction({}, FunctionFactory::CaseInsensitive); } } diff --git a/src/Functions/if.cpp b/src/Functions/if.cpp index d7fefb1ad0e..86707fc62d6 100644 --- a/src/Functions/if.cpp +++ b/src/Functions/if.cpp @@ -1122,7 +1122,7 @@ public: REGISTER_FUNCTION(If) { - factory.registerFunction(FunctionFactory::CaseInsensitive); + factory.registerFunction({}, FunctionFactory::CaseInsensitive); } } diff --git a/src/Functions/ifNull.cpp b/src/Functions/ifNull.cpp index a586a695752..ef301a9662e 100644 --- a/src/Functions/ifNull.cpp +++ b/src/Functions/ifNull.cpp @@ -91,7 +91,7 @@ private: REGISTER_FUNCTION(IfNull) { - factory.registerFunction(FunctionFactory::CaseInsensitive); + factory.registerFunction({}, FunctionFactory::CaseInsensitive); } } diff --git a/src/Functions/isNull.cpp b/src/Functions/isNull.cpp index 1e1d4edd6ed..cdce037088d 100644 --- a/src/Functions/isNull.cpp +++ b/src/Functions/isNull.cpp @@ -74,7 +74,7 @@ public: REGISTER_FUNCTION(IsNull) { - factory.registerFunction(FunctionFactory::CaseInsensitive); + factory.registerFunction({}, FunctionFactory::CaseInsensitive); } } diff --git a/src/Functions/least.cpp b/src/Functions/least.cpp index 53676f0c00d..f5680d4d468 100644 --- a/src/Functions/least.cpp +++ b/src/Functions/least.cpp @@ -65,7 +65,7 @@ using FunctionLeast = FunctionBinaryArithmetic; REGISTER_FUNCTION(Least) { - factory.registerFunction>(FunctionFactory::CaseInsensitive); + factory.registerFunction>({}, FunctionFactory::CaseInsensitive); } } diff --git a/src/Functions/left.cpp b/src/Functions/left.cpp index 93983d698ce..006706c8f21 100644 --- a/src/Functions/left.cpp +++ b/src/Functions/left.cpp @@ -6,8 +6,8 @@ namespace DB REGISTER_FUNCTION(Left) { - factory.registerFunction>(FunctionFactory::CaseInsensitive); - factory.registerFunction>(FunctionFactory::CaseSensitive); + factory.registerFunction>({}, FunctionFactory::CaseInsensitive); + factory.registerFunction>({}, FunctionFactory::CaseSensitive); } } diff --git a/src/Functions/lemmatize.cpp b/src/Functions/lemmatize.cpp index 72d4fe98a86..873a12baf40 100644 --- a/src/Functions/lemmatize.cpp +++ b/src/Functions/lemmatize.cpp @@ -122,7 +122,7 @@ public: REGISTER_FUNCTION(Lemmatize) { - factory.registerFunction(FunctionFactory::CaseInsensitive); + factory.registerFunction(); } } diff --git a/src/Functions/lengthUTF8.cpp b/src/Functions/lengthUTF8.cpp index b139f87bc64..9e5b5d04dd2 100644 --- a/src/Functions/lengthUTF8.cpp +++ b/src/Functions/lengthUTF8.cpp @@ -73,8 +73,8 @@ REGISTER_FUNCTION(LengthUTF8) factory.registerFunction(); /// Compatibility aliases. - factory.registerFunction("CHAR_LENGTH", FunctionFactory::CaseInsensitive); - factory.registerFunction("CHARACTER_LENGTH", FunctionFactory::CaseInsensitive); + factory.registerAlias("CHAR_LENGTH", "lengthUTF8", FunctionFactory::CaseInsensitive); + factory.registerAlias("CHARACTER_LENGTH", "lengthUTF8", FunctionFactory::CaseInsensitive); } } diff --git a/src/Functions/log.cpp b/src/Functions/log.cpp index cacb6dec1d2..9096b8c6f22 100644 --- a/src/Functions/log.cpp +++ b/src/Functions/log.cpp @@ -34,7 +34,7 @@ using FunctionLog = FunctionMathUnary>; REGISTER_FUNCTION(Log) { - factory.registerFunction(FunctionFactory::CaseInsensitive); + factory.registerFunction({}, FunctionFactory::CaseInsensitive); factory.registerAlias("ln", "log", FunctionFactory::CaseInsensitive); } diff --git a/src/Functions/log10.cpp b/src/Functions/log10.cpp index 87b1e84f0fd..5dfe4ac9357 100644 --- a/src/Functions/log10.cpp +++ b/src/Functions/log10.cpp @@ -13,7 +13,7 @@ using FunctionLog10 = FunctionMathUnary(FunctionFactory::CaseInsensitive); + factory.registerFunction({}, FunctionFactory::CaseInsensitive); } } diff --git a/src/Functions/log2.cpp b/src/Functions/log2.cpp index 487a9850958..9457ac64bc6 100644 --- a/src/Functions/log2.cpp +++ b/src/Functions/log2.cpp @@ -13,7 +13,7 @@ using FunctionLog2 = FunctionMathUnary>; REGISTER_FUNCTION(Log2) { - factory.registerFunction(FunctionFactory::CaseInsensitive); + factory.registerFunction({}, FunctionFactory::CaseInsensitive); } } diff --git a/src/Functions/lower.cpp b/src/Functions/lower.cpp index a1b777db112..38ae5a8a7f0 100644 --- a/src/Functions/lower.cpp +++ b/src/Functions/lower.cpp @@ -19,7 +19,7 @@ using FunctionLower = FunctionStringToString, NameLower REGISTER_FUNCTION(Lower) { - factory.registerFunction(FunctionFactory::CaseInsensitive); + factory.registerFunction({}, FunctionFactory::CaseInsensitive); factory.registerAlias("lcase", NameLower::name, FunctionFactory::CaseInsensitive); } diff --git a/src/Functions/mathConstants.cpp b/src/Functions/mathConstants.cpp index c65b55cf7cf..c7eb37289ac 100644 --- a/src/Functions/mathConstants.cpp +++ b/src/Functions/mathConstants.cpp @@ -41,7 +41,7 @@ REGISTER_FUNCTION(E) REGISTER_FUNCTION(Pi) { - factory.registerFunction(FunctionFactory::CaseInsensitive); + factory.registerFunction({}, FunctionFactory::CaseInsensitive); } } diff --git a/src/Functions/max2.cpp b/src/Functions/max2.cpp index 3a693f1f5bb..928e6f22918 100644 --- a/src/Functions/max2.cpp +++ b/src/Functions/max2.cpp @@ -21,6 +21,6 @@ namespace REGISTER_FUNCTION(Max2) { - factory.registerFunction(FunctionFactory::CaseInsensitive); + factory.registerFunction({}, FunctionFactory::CaseInsensitive); } } diff --git a/src/Functions/min2.cpp b/src/Functions/min2.cpp index 10233ab4011..f031530edf5 100644 --- a/src/Functions/min2.cpp +++ b/src/Functions/min2.cpp @@ -22,6 +22,6 @@ namespace REGISTER_FUNCTION(Min2) { - factory.registerFunction(FunctionFactory::CaseInsensitive); + factory.registerFunction({}, FunctionFactory::CaseInsensitive); } } diff --git a/src/Functions/monthName.cpp b/src/Functions/monthName.cpp index e841f68b326..f782ac647cc 100644 --- a/src/Functions/monthName.cpp +++ b/src/Functions/monthName.cpp @@ -74,7 +74,7 @@ private: REGISTER_FUNCTION(MonthName) { - factory.registerFunction(FunctionFactory::CaseInsensitive); + factory.registerFunction({}, FunctionFactory::CaseInsensitive); } } diff --git a/src/Functions/now.cpp b/src/Functions/now.cpp index 9ecaca55e52..d257bf4314e 100644 --- a/src/Functions/now.cpp +++ b/src/Functions/now.cpp @@ -128,7 +128,7 @@ public: REGISTER_FUNCTION(Now) { - factory.registerFunction(FunctionFactory::CaseInsensitive); + factory.registerFunction({}, FunctionFactory::CaseInsensitive); } } diff --git a/src/Functions/now64.cpp b/src/Functions/now64.cpp index 0308fa95b39..c5225d3317f 100644 --- a/src/Functions/now64.cpp +++ b/src/Functions/now64.cpp @@ -160,7 +160,7 @@ public: REGISTER_FUNCTION(Now64) { - factory.registerFunction(FunctionFactory::CaseInsensitive); + factory.registerFunction({}, FunctionFactory::CaseInsensitive); } } diff --git a/src/Functions/nullIf.cpp b/src/Functions/nullIf.cpp index e85747834b1..392cc20cfcf 100644 --- a/src/Functions/nullIf.cpp +++ b/src/Functions/nullIf.cpp @@ -69,8 +69,7 @@ public: REGISTER_FUNCTION(NullIf) { - factory.registerFunction(FunctionFactory::CaseInsensitive); + factory.registerFunction({}, FunctionFactory::CaseInsensitive); } } - diff --git a/src/Functions/position.cpp b/src/Functions/position.cpp index c25beec5ed9..409a593b44c 100644 --- a/src/Functions/position.cpp +++ b/src/Functions/position.cpp @@ -19,7 +19,7 @@ using FunctionPosition = FunctionsStringSearch(FunctionFactory::CaseInsensitive); + factory.registerFunction({}, FunctionFactory::CaseInsensitive); factory.registerAlias("locate", NamePosition::name, FunctionFactory::CaseInsensitive); } } diff --git a/src/Functions/pow.cpp b/src/Functions/pow.cpp index afbf9d10f16..9b383da97e7 100644 --- a/src/Functions/pow.cpp +++ b/src/Functions/pow.cpp @@ -13,7 +13,7 @@ using FunctionPow = FunctionMathBinaryFloat64(FunctionFactory::CaseInsensitive); + factory.registerFunction({}, FunctionFactory::CaseInsensitive); factory.registerAlias("power", "pow", FunctionFactory::CaseInsensitive); } diff --git a/src/Functions/radians.cpp b/src/Functions/radians.cpp index 5e46ccca5bd..2c2c2743532 100644 --- a/src/Functions/radians.cpp +++ b/src/Functions/radians.cpp @@ -23,7 +23,7 @@ namespace REGISTER_FUNCTION(Radians) { - factory.registerFunction(FunctionFactory::CaseInsensitive); + factory.registerFunction({}, FunctionFactory::CaseInsensitive); } } diff --git a/src/Functions/rand.cpp b/src/Functions/rand.cpp index ba511382651..ea30922d731 100644 --- a/src/Functions/rand.cpp +++ b/src/Functions/rand.cpp @@ -13,9 +13,8 @@ using FunctionRand = FunctionRandom; REGISTER_FUNCTION(Rand) { - factory.registerFunction(FunctionFactory::CaseInsensitive); + factory.registerFunction({}, FunctionFactory::CaseInsensitive); factory.registerAlias("rand32", NameRand::name); } } - diff --git a/src/Functions/reinterpretAs.cpp b/src/Functions/reinterpretAs.cpp index 8e656863cdb..76afedb4f06 100644 --- a/src/Functions/reinterpretAs.cpp +++ b/src/Functions/reinterpretAs.cpp @@ -301,7 +301,7 @@ private: ColumnFixedString::Chars & data_to = dst.getChars(); data_to.resize(n * rows); - memcpy(data_to.data(), src.getRawData().data, data_to.size()); + memcpy(data_to.data(), src.getRawData().data(), data_to.size()); } static void NO_INLINE executeToString(const IColumn & src, ColumnString & dst) diff --git a/src/Functions/repeat.cpp b/src/Functions/repeat.cpp index 40f33f2b9c4..748615f9ce5 100644 --- a/src/Functions/repeat.cpp +++ b/src/Functions/repeat.cpp @@ -94,14 +94,14 @@ struct RepeatImpl template static void constStrVectorRepeat( - const StringRef & copy_str, + std::string_view copy_str, ColumnString::Chars & res_data, ColumnString::Offsets & res_offsets, const PaddedPODArray & col_num) { UInt64 data_size = 0; res_offsets.resize(col_num.size()); - UInt64 str_size = copy_str.size; + UInt64 str_size = copy_str.size(); UInt64 col_size = col_num.size(); for (UInt64 i = 0; i < col_size; ++i) { @@ -116,7 +116,7 @@ struct RepeatImpl T repeat_time = col_num[i]; checkRepeatTime(repeat_time); process( - reinterpret_cast(const_cast(copy_str.data)), + reinterpret_cast(const_cast(copy_str.data())), res_data.data() + res_offsets[i - 1], str_size + 1, repeat_time); @@ -227,7 +227,7 @@ public: { /// Note that const-const case is handled by useDefaultImplementationForConstants. - StringRef copy_str = col_const->getDataColumn().getDataAt(0); + std::string_view copy_str = col_const->getDataColumn().getDataAt(0).toView(); if (castType(arguments[1].type.get(), [&](const auto & type) { @@ -254,7 +254,7 @@ public: REGISTER_FUNCTION(Repeat) { - factory.registerFunction(FunctionFactory::CaseInsensitive); + factory.registerFunction({}, FunctionFactory::CaseInsensitive); } } diff --git a/src/Functions/reverse.cpp b/src/Functions/reverse.cpp index 56397958b10..08234afaff0 100644 --- a/src/Functions/reverse.cpp +++ b/src/Functions/reverse.cpp @@ -150,7 +150,7 @@ private: REGISTER_FUNCTION(Reverse) { - factory.registerFunction(FunctionFactory::CaseInsensitive); + factory.registerFunction({}, FunctionFactory::CaseInsensitive); } } diff --git a/src/Functions/right.cpp b/src/Functions/right.cpp index 4c26630c9ff..a8ab4bf9685 100644 --- a/src/Functions/right.cpp +++ b/src/Functions/right.cpp @@ -6,8 +6,8 @@ namespace DB REGISTER_FUNCTION(Right) { - factory.registerFunction>(FunctionFactory::CaseInsensitive); - factory.registerFunction>(FunctionFactory::CaseSensitive); + factory.registerFunction>({}, FunctionFactory::CaseInsensitive); + factory.registerFunction>({}, FunctionFactory::CaseSensitive); } } diff --git a/src/Functions/serverConstants.cpp b/src/Functions/serverConstants.cpp index 2190b16d966..623382e1da3 100644 --- a/src/Functions/serverConstants.cpp +++ b/src/Functions/serverConstants.cpp @@ -153,12 +153,12 @@ REGISTER_FUNCTION(Uptime) REGISTER_FUNCTION(Version) { - factory.registerFunction(FunctionFactory::CaseInsensitive); + factory.registerFunction({}, FunctionFactory::CaseInsensitive); } REGISTER_FUNCTION(Revision) { - factory.registerFunction(FunctionFactory::CaseInsensitive); + factory.registerFunction({}, FunctionFactory::CaseInsensitive); } REGISTER_FUNCTION(ZooKeeperSessionUptime) @@ -174,4 +174,3 @@ REGISTER_FUNCTION(GetOSKernelVersion) } - diff --git a/src/Functions/sign.cpp b/src/Functions/sign.cpp index ae87ff8e8b6..60ad6ba5365 100644 --- a/src/Functions/sign.cpp +++ b/src/Functions/sign.cpp @@ -45,7 +45,7 @@ struct FunctionUnaryArithmeticMonotonicity REGISTER_FUNCTION(Sign) { - factory.registerFunction(FunctionFactory::CaseInsensitive); + factory.registerFunction({}, FunctionFactory::CaseInsensitive); } } diff --git a/src/Functions/sin.cpp b/src/Functions/sin.cpp index 536b2635b9a..dc75f4800c0 100644 --- a/src/Functions/sin.cpp +++ b/src/Functions/sin.cpp @@ -13,7 +13,7 @@ using FunctionSin = FunctionMathUnary>; REGISTER_FUNCTION(Sin) { - factory.registerFunction(FunctionFactory::CaseInsensitive); + factory.registerFunction({}, FunctionFactory::CaseInsensitive); } } diff --git a/src/Functions/sqrt.cpp b/src/Functions/sqrt.cpp index 63c1098d7e7..3c50f994391 100644 --- a/src/Functions/sqrt.cpp +++ b/src/Functions/sqrt.cpp @@ -13,7 +13,7 @@ using FunctionSqrt = FunctionMathUnary>; REGISTER_FUNCTION(Sqrt) { - factory.registerFunction(FunctionFactory::CaseInsensitive); + factory.registerFunction({}, FunctionFactory::CaseInsensitive); } } diff --git a/src/Functions/stem.cpp b/src/Functions/stem.cpp index 25021ed74a4..50293500b35 100644 --- a/src/Functions/stem.cpp +++ b/src/Functions/stem.cpp @@ -127,7 +127,7 @@ public: REGISTER_FUNCTION(Stem) { - factory.registerFunction(FunctionFactory::CaseInsensitive); + factory.registerFunction(); } } diff --git a/src/Functions/substring.cpp b/src/Functions/substring.cpp index 79b801a9ef6..dc1c3324437 100644 --- a/src/Functions/substring.cpp +++ b/src/Functions/substring.cpp @@ -188,11 +188,11 @@ public: REGISTER_FUNCTION(Substring) { - factory.registerFunction>(FunctionFactory::CaseInsensitive); + factory.registerFunction>({}, FunctionFactory::CaseInsensitive); factory.registerAlias("substr", "substring", FunctionFactory::CaseInsensitive); factory.registerAlias("mid", "substring", FunctionFactory::CaseInsensitive); /// from MySQL dialect - factory.registerFunction>(FunctionFactory::CaseSensitive); + factory.registerFunction>({}, FunctionFactory::CaseSensitive); } } diff --git a/src/Functions/synonyms.cpp b/src/Functions/synonyms.cpp index d68f9c76743..69310ed9680 100644 --- a/src/Functions/synonyms.cpp +++ b/src/Functions/synonyms.cpp @@ -120,7 +120,7 @@ public: REGISTER_FUNCTION(Synonyms) { - factory.registerFunction(FunctionFactory::CaseInsensitive); + factory.registerFunction({}, FunctionFactory::CaseInsensitive); } } diff --git a/src/Functions/tan.cpp b/src/Functions/tan.cpp index 7d84055d0c3..e39f8598419 100644 --- a/src/Functions/tan.cpp +++ b/src/Functions/tan.cpp @@ -13,7 +13,7 @@ using FunctionTan = FunctionMathUnary>; REGISTER_FUNCTION(Tan) { - factory.registerFunction(FunctionFactory::CaseInsensitive); + factory.registerFunction({}, FunctionFactory::CaseInsensitive); } } diff --git a/src/Functions/tanh.cpp b/src/Functions/tanh.cpp index 9461c2a5811..bdefa5263d7 100644 --- a/src/Functions/tanh.cpp +++ b/src/Functions/tanh.cpp @@ -39,9 +39,7 @@ using FunctionTanh = FunctionMathUnary>; REGISTER_FUNCTION(Tanh) { - factory.registerFunction(FunctionFactory::CaseInsensitive); + factory.registerFunction({}, FunctionFactory::CaseInsensitive); } } - - diff --git a/src/Functions/toDayOfMonth.cpp b/src/Functions/toDayOfMonth.cpp index c5ed4629258..d7689ef00f2 100644 --- a/src/Functions/toDayOfMonth.cpp +++ b/src/Functions/toDayOfMonth.cpp @@ -14,10 +14,8 @@ REGISTER_FUNCTION(ToDayOfMonth) factory.registerFunction(); /// MysQL compatibility alias. - factory.registerFunction("DAY", FunctionFactory::CaseInsensitive); - factory.registerFunction("DAYOFMONTH", FunctionFactory::CaseInsensitive); + factory.registerAlias("DAY", "toDayOfMonth", FunctionFactory::CaseInsensitive); + factory.registerAlias("DAYOFMONTH", "toDayOfMonth", FunctionFactory::CaseInsensitive); } } - - diff --git a/src/Functions/toDayOfWeek.cpp b/src/Functions/toDayOfWeek.cpp index 2c04e30a607..354d4dea894 100644 --- a/src/Functions/toDayOfWeek.cpp +++ b/src/Functions/toDayOfWeek.cpp @@ -14,9 +14,7 @@ REGISTER_FUNCTION(ToDayOfWeek) factory.registerFunction(); /// MysQL compatibility alias. - factory.registerFunction("DAYOFWEEK", FunctionFactory::CaseInsensitive); + factory.registerAlias("DAYOFWEEK", "toDayOfWeek", FunctionFactory::CaseInsensitive); } } - - diff --git a/src/Functions/toDayOfYear.cpp b/src/Functions/toDayOfYear.cpp index ac289e3a757..8b03f1a4211 100644 --- a/src/Functions/toDayOfYear.cpp +++ b/src/Functions/toDayOfYear.cpp @@ -14,9 +14,7 @@ REGISTER_FUNCTION(ToDayOfYear) factory.registerFunction(); /// MysQL compatibility alias. - factory.registerFunction("DAYOFYEAR", FunctionFactory::CaseInsensitive); + factory.registerAlias("DAYOFYEAR", "toDayOfYear", FunctionFactory::CaseInsensitive); } } - - diff --git a/src/Functions/toFixedString.h b/src/Functions/toFixedString.h index cbd29784271..200d25283d5 100644 --- a/src/Functions/toFixedString.h +++ b/src/Functions/toFixedString.h @@ -43,7 +43,7 @@ public: size_t getNumberOfArguments() const override { return 2; } bool isInjective(const ColumnsWithTypeAndName &) const override { return true; } - bool isSuitableForShortCircuitArgumentsExecution(const DataTypesWithConstInfo & /*arguments*/) const override { return false; } + bool isSuitableForShortCircuitArgumentsExecution(const DataTypesWithConstInfo & /*arguments*/) const override { return true; } DataTypePtr getReturnTypeImpl(const ColumnsWithTypeAndName & arguments) const override { diff --git a/src/Functions/toHour.cpp b/src/Functions/toHour.cpp index 172515aad58..a6a57946e33 100644 --- a/src/Functions/toHour.cpp +++ b/src/Functions/toHour.cpp @@ -14,9 +14,7 @@ REGISTER_FUNCTION(ToHour) factory.registerFunction(); /// MysQL compatibility alias. - factory.registerFunction("HOUR", FunctionFactory::CaseInsensitive); + factory.registerAlias("HOUR", "toHour", FunctionFactory::CaseInsensitive); } } - - diff --git a/src/Functions/toLastDayOfMonth.cpp b/src/Functions/toLastDayOfMonth.cpp index 38d42521f00..a7faab15f9f 100644 --- a/src/Functions/toLastDayOfMonth.cpp +++ b/src/Functions/toLastDayOfMonth.cpp @@ -13,9 +13,7 @@ REGISTER_FUNCTION(ToLastDayOfMonth) factory.registerFunction(); /// MySQL compatibility alias. - factory.registerFunction("LAST_DAY", FunctionFactory::CaseInsensitive); + factory.registerAlias("LAST_DAY", "toLastDayOfMonth", FunctionFactory::CaseInsensitive); } } - - diff --git a/src/Functions/toMinute.cpp b/src/Functions/toMinute.cpp index c84b0876a24..25939870554 100644 --- a/src/Functions/toMinute.cpp +++ b/src/Functions/toMinute.cpp @@ -12,10 +12,9 @@ using FunctionToMinute = FunctionDateOrDateTimeToSomething(); + /// MysQL compatibility alias. - factory.registerFunction("MINUTE", FunctionFactory::CaseInsensitive); + factory.registerAlias("MINUTE", "toMinute", FunctionFactory::CaseInsensitive); } } - - diff --git a/src/Functions/toMonth.cpp b/src/Functions/toMonth.cpp index 1364ad5a997..783a1341e23 100644 --- a/src/Functions/toMonth.cpp +++ b/src/Functions/toMonth.cpp @@ -13,9 +13,7 @@ REGISTER_FUNCTION(ToMonth) { factory.registerFunction(); /// MysQL compatibility alias. - factory.registerFunction("MONTH", FunctionFactory::CaseInsensitive); + factory.registerAlias("MONTH", "toMonth", FunctionFactory::CaseInsensitive); } } - - diff --git a/src/Functions/toQuarter.cpp b/src/Functions/toQuarter.cpp index e9c1795121f..2268b6402c6 100644 --- a/src/Functions/toQuarter.cpp +++ b/src/Functions/toQuarter.cpp @@ -13,9 +13,7 @@ REGISTER_FUNCTION(ToQuarter) { factory.registerFunction(); /// MysQL compatibility alias. - factory.registerFunction("QUARTER", FunctionFactory::CaseInsensitive); + factory.registerAlias("QUARTER", "toQuarter", FunctionFactory::CaseInsensitive); } } - - diff --git a/src/Functions/toSecond.cpp b/src/Functions/toSecond.cpp index 8ab329689f7..2fd64912c0f 100644 --- a/src/Functions/toSecond.cpp +++ b/src/Functions/toSecond.cpp @@ -12,10 +12,9 @@ using FunctionToSecond = FunctionDateOrDateTimeToSomething(); + /// MysQL compatibility alias. - factory.registerFunction("SECOND", FunctionFactory::CaseInsensitive); + factory.registerAlias("SECOND", "toSecond", FunctionFactory::CaseInsensitive); } } - - diff --git a/src/Functions/toYear.cpp b/src/Functions/toYear.cpp index 6658bf0e927..9cf2a260921 100644 --- a/src/Functions/toYear.cpp +++ b/src/Functions/toYear.cpp @@ -12,10 +12,9 @@ using FunctionToYear = FunctionDateOrDateTimeToSomething(); + /// MysQL compatibility alias. - factory.registerFunction("YEAR", FunctionFactory::CaseInsensitive); + factory.registerAlias("YEAR", "toYear", FunctionFactory::CaseInsensitive); } } - - diff --git a/src/Functions/upper.cpp b/src/Functions/upper.cpp index 05a125379d9..3e1c7b1d800 100644 --- a/src/Functions/upper.cpp +++ b/src/Functions/upper.cpp @@ -18,7 +18,7 @@ using FunctionUpper = FunctionStringToString, NameUpper REGISTER_FUNCTION(Upper) { - factory.registerFunction(FunctionFactory::CaseInsensitive); + factory.registerFunction({}, FunctionFactory::CaseInsensitive); factory.registerAlias("ucase", FunctionUpper::name, FunctionFactory::CaseInsensitive); } diff --git a/src/Functions/visitParamExtractBool.cpp b/src/Functions/visitParamExtractBool.cpp index e5a2277b443..31763fe54ce 100644 --- a/src/Functions/visitParamExtractBool.cpp +++ b/src/Functions/visitParamExtractBool.cpp @@ -16,16 +16,13 @@ struct ExtractBool } }; -struct NameVisitParamExtractBool { static constexpr auto name = "visitParamExtractBool"; }; -using FunctionVisitParamExtractBool = FunctionsStringSearch>; - -struct NameSimpleJSONExtractBool { static constexpr auto name = "simpleJSONExtractBool"; }; +struct NameSimpleJSONExtractBool { static constexpr auto name = "simpleJSONExtractBool"; }; using FunctionSimpleJSONExtractBool = FunctionsStringSearch>; REGISTER_FUNCTION(VisitParamExtractBool) { - factory.registerFunction(); factory.registerFunction(); + factory.registerAlias("visitParamExtractBool", "simpleJSONExtractBool"); } } diff --git a/src/Functions/visitParamExtractFloat.cpp b/src/Functions/visitParamExtractFloat.cpp index ee00f960f8f..6f6d5274050 100644 --- a/src/Functions/visitParamExtractFloat.cpp +++ b/src/Functions/visitParamExtractFloat.cpp @@ -6,16 +6,13 @@ namespace DB { -struct NameVisitParamExtractFloat { static constexpr auto name = "visitParamExtractFloat"; }; -using FunctionVisitParamExtractFloat = FunctionsStringSearch>>; - -struct NameSimpleJSONExtractFloat { static constexpr auto name = "simpleJSONExtractFloat"; }; +struct NameSimpleJSONExtractFloat { static constexpr auto name = "simpleJSONExtractFloat"; }; using FunctionSimpleJSONExtractFloat = FunctionsStringSearch>>; REGISTER_FUNCTION(VisitParamExtractFloat) { - factory.registerFunction(); factory.registerFunction(); + factory.registerAlias("visitParamExtractFloat", "simpleJSONExtractFloat"); } } diff --git a/src/Functions/visitParamExtractInt.cpp b/src/Functions/visitParamExtractInt.cpp index 30b373182ea..e020c43e8b4 100644 --- a/src/Functions/visitParamExtractInt.cpp +++ b/src/Functions/visitParamExtractInt.cpp @@ -6,16 +6,13 @@ namespace DB { -struct NameVisitParamExtractInt { static constexpr auto name = "visitParamExtractInt"; }; -using FunctionVisitParamExtractInt = FunctionsStringSearch>>; - -struct NameSimpleJSONExtractInt { static constexpr auto name = "simpleJSONExtractInt"; }; +struct NameSimpleJSONExtractInt { static constexpr auto name = "simpleJSONExtractInt"; }; using FunctionSimpleJSONExtractInt = FunctionsStringSearch>>; REGISTER_FUNCTION(VisitParamExtractInt) { - factory.registerFunction(); factory.registerFunction(); + factory.registerAlias("visitParamExtractInt", "simpleJSONExtractInt"); } } diff --git a/src/Functions/visitParamExtractRaw.cpp b/src/Functions/visitParamExtractRaw.cpp index ab21fdf6e98..74a83170545 100644 --- a/src/Functions/visitParamExtractRaw.cpp +++ b/src/Functions/visitParamExtractRaw.cpp @@ -56,16 +56,13 @@ struct ExtractRaw } }; -struct NameVisitParamExtractRaw { static constexpr auto name = "visitParamExtractRaw"; }; -using FunctionVisitParamExtractRaw = FunctionsStringSearchToString, NameVisitParamExtractRaw>; - struct NameSimpleJSONExtractRaw { static constexpr auto name = "simpleJSONExtractRaw"; }; using FunctionSimpleJSONExtractRaw = FunctionsStringSearchToString, NameSimpleJSONExtractRaw>; REGISTER_FUNCTION(VisitParamExtractRaw) { - factory.registerFunction(); factory.registerFunction(); + factory.registerAlias("visitParamExtractRaw", "simpleJSONExtractRaw"); } } diff --git a/src/Functions/visitParamExtractString.cpp b/src/Functions/visitParamExtractString.cpp index df640cef371..50d5f345189 100644 --- a/src/Functions/visitParamExtractString.cpp +++ b/src/Functions/visitParamExtractString.cpp @@ -17,16 +17,13 @@ struct ExtractString } }; -struct NameVisitParamExtractString { static constexpr auto name = "visitParamExtractString"; }; -using FunctionVisitParamExtractString = FunctionsStringSearchToString, NameVisitParamExtractString>; - struct NameSimpleJSONExtractString { static constexpr auto name = "simpleJSONExtractString"; }; using FunctionSimpleJSONExtractString = FunctionsStringSearchToString, NameSimpleJSONExtractString>; REGISTER_FUNCTION(VisitParamExtractString) { - factory.registerFunction(); factory.registerFunction(); + factory.registerAlias("visitParamExtractString", "simpleJSONExtractString"); } } diff --git a/src/Functions/visitParamExtractUInt.cpp b/src/Functions/visitParamExtractUInt.cpp index 1612c91984d..fb58e417f34 100644 --- a/src/Functions/visitParamExtractUInt.cpp +++ b/src/Functions/visitParamExtractUInt.cpp @@ -6,17 +6,14 @@ namespace DB { -struct NameVisitParamExtractUInt { static constexpr auto name = "visitParamExtractUInt"; }; -using FunctionVisitParamExtractUInt = FunctionsStringSearch>>; - -struct NameSimpleJSONExtractUInt { static constexpr auto name = "simpleJSONExtractUInt"; }; +struct NameSimpleJSONExtractUInt { static constexpr auto name = "simpleJSONExtractUInt"; }; using FunctionSimpleJSONExtractUInt = FunctionsStringSearch>>; REGISTER_FUNCTION(VisitParamExtractUInt) { - factory.registerFunction(); factory.registerFunction(); + factory.registerAlias("visitParamExtractUInt", "simpleJSONExtractUInt"); } } diff --git a/src/Functions/visitParamHas.cpp b/src/Functions/visitParamHas.cpp index 9e481fb44cc..1ed1f1d16e7 100644 --- a/src/Functions/visitParamHas.cpp +++ b/src/Functions/visitParamHas.cpp @@ -16,16 +16,13 @@ struct HasParam } }; -struct NameVisitParamHas { static constexpr auto name = "visitParamHas"; }; -using FunctionVisitParamHas = FunctionsStringSearch>; - -struct NameSimpleJSONHas { static constexpr auto name = "simpleJSONHas"; }; +struct NameSimpleJSONHas { static constexpr auto name = "simpleJSONHas"; }; using FunctionSimpleJSONHas = FunctionsStringSearch>; REGISTER_FUNCTION(VisitParamHas) { - factory.registerFunction(); factory.registerFunction(); + factory.registerAlias("visitParamHas", "simpleJSONHas"); } } diff --git a/src/IO/Archives/ZipArchiveReader.cpp b/src/IO/Archives/ZipArchiveReader.cpp index 68726248dc4..3127f299f5c 100644 --- a/src/IO/Archives/ZipArchiveReader.cpp +++ b/src/IO/Archives/ZipArchiveReader.cpp @@ -4,6 +4,7 @@ #include #include #include +#include #include @@ -553,11 +554,11 @@ void ZipArchiveReader::checkResult(int code) const if (code >= UNZ_OK) return; - String message = "Code= "; + String message = "Code = "; switch (code) { case UNZ_OK: return; - case UNZ_ERRNO: message += "ERRNO, errno= " + String{strerror(errno)}; break; + case UNZ_ERRNO: message += "ERRNO, errno = " + errnoToString(); break; case UNZ_PARAMERROR: message += "PARAMERROR"; break; case UNZ_BADZIPFILE: message += "BADZIPFILE"; break; case UNZ_INTERNALERROR: message += "INTERNALERROR"; break; diff --git a/src/IO/Archives/ZipArchiveWriter.cpp b/src/IO/Archives/ZipArchiveWriter.cpp index 28a7bacf8d0..817e8132b64 100644 --- a/src/IO/Archives/ZipArchiveWriter.cpp +++ b/src/IO/Archives/ZipArchiveWriter.cpp @@ -3,6 +3,7 @@ #if USE_MINIZIP #include #include +#include #include #include @@ -380,10 +381,10 @@ void ZipArchiveWriter::checkResult(int code) const if (code >= ZIP_OK) return; - String message = "Code= "; + String message = "Code = "; switch (code) { - case ZIP_ERRNO: message += "ERRNO, errno= " + String{strerror(errno)}; break; + case ZIP_ERRNO: message += "ERRNO, errno = " + errnoToString(); break; case ZIP_PARAMERROR: message += "PARAMERROR"; break; case ZIP_BADZIPFILE: message += "BADZIPFILE"; break; case ZIP_INTERNALERROR: message += "INTERNALERROR"; break; diff --git a/src/IO/BufferWithOwnMemory.h b/src/IO/BufferWithOwnMemory.h index 479e0e88fcc..2121747500b 100644 --- a/src/IO/BufferWithOwnMemory.h +++ b/src/IO/BufferWithOwnMemory.h @@ -8,6 +8,8 @@ #include #include +#include + namespace ProfileEvents { @@ -19,6 +21,11 @@ namespace ProfileEvents namespace DB { +namespace ErrorCodes +{ + extern const int ARGUMENT_OUT_OF_BOUND; +} + /** Replacement for std::vector to use in buffers. * Differs in that is doesn't do unneeded memset. (And also tries to do as little as possible.) @@ -38,9 +45,9 @@ struct Memory : boost::noncopyable, Allocator Memory() = default; /// If alignment != 0, then allocate memory aligned to specified value. - explicit Memory(size_t size_, size_t alignment_ = 0) : m_capacity(size_), m_size(m_capacity), alignment(alignment_) + explicit Memory(size_t size_, size_t alignment_ = 0) : alignment(alignment_) { - alloc(); + alloc(size_); } ~Memory() @@ -75,57 +82,55 @@ struct Memory : boost::noncopyable, Allocator void resize(size_t new_size) { - if (0 == m_capacity) + if (!m_data) { - m_size = new_size; - m_capacity = new_size; - alloc(); + alloc(new_size); + return; } - else if (new_size <= m_capacity - pad_right) + + if (new_size <= m_capacity - pad_right) { m_size = new_size; return; } - else - { - size_t new_capacity = align(new_size, alignment) + pad_right; - size_t diff = new_capacity - m_capacity; - ProfileEvents::increment(ProfileEvents::IOBufferAllocBytes, diff); + size_t new_capacity = withPadding(new_size); - m_data = static_cast(Allocator::realloc(m_data, m_capacity, new_capacity, alignment)); - m_capacity = new_capacity; - m_size = m_capacity - pad_right; - } + size_t diff = new_capacity - m_capacity; + ProfileEvents::increment(ProfileEvents::IOBufferAllocBytes, diff); + + m_data = static_cast(Allocator::realloc(m_data, m_capacity, new_capacity, alignment)); + m_capacity = new_capacity; + m_size = new_size; } private: - static size_t align(const size_t value, const size_t alignment) + static size_t withPadding(size_t value) { - if (!alignment) - return value; + size_t res = 0; - if (!(value % alignment)) - return value; + if (common::addOverflow(value, pad_right, res)) + throw Exception("value is too big to apply padding", ErrorCodes::ARGUMENT_OUT_OF_BOUND); - return (value + alignment - 1) / alignment * alignment; + return res; } - void alloc() + void alloc(size_t new_size) { - if (!m_capacity) + if (!new_size) { m_data = nullptr; return; } - ProfileEvents::increment(ProfileEvents::IOBufferAllocs); - ProfileEvents::increment(ProfileEvents::IOBufferAllocBytes, m_capacity); + size_t new_capacity = withPadding(new_size); + + ProfileEvents::increment(ProfileEvents::IOBufferAllocs); + ProfileEvents::increment(ProfileEvents::IOBufferAllocBytes, new_capacity); - size_t new_capacity = align(m_capacity, alignment) + pad_right; m_data = static_cast(Allocator::alloc(new_capacity, alignment)); m_capacity = new_capacity; - m_size = m_capacity - pad_right; + m_size = new_size; } void dealloc() diff --git a/src/IO/CompressionMethod.cpp b/src/IO/CompressionMethod.cpp index 449335407aa..83db5eeaeed 100644 --- a/src/IO/CompressionMethod.cpp +++ b/src/IO/CompressionMethod.cpp @@ -94,7 +94,7 @@ CompressionMethod chooseCompressionMethod(const std::string & path, const std::s return CompressionMethod::None; throw Exception( - "Unknown compression method " + hint + ". Only 'auto', 'none', 'gzip', 'deflate', 'br', 'xz', 'zstd', 'lz4', 'bz2', 'snappy' are supported as compression methods", + "Unknown compression method '" + hint + "'. Only 'auto', 'none', 'gzip', 'deflate', 'br', 'xz', 'zstd', 'lz4', 'bz2', 'snappy' are supported as compression methods", ErrorCodes::NOT_IMPLEMENTED); } diff --git a/src/IO/HTTPChunkedReadBuffer.cpp b/src/IO/HTTPChunkedReadBuffer.cpp index 75827679d0c..a7841b1180f 100644 --- a/src/IO/HTTPChunkedReadBuffer.cpp +++ b/src/IO/HTTPChunkedReadBuffer.cpp @@ -32,6 +32,9 @@ size_t HTTPChunkedReadBuffer::readChunkHeader() ++in->position(); } while (!in->eof() && isHexDigit(*in->position())); + if (res > max_chunk_size) + throw Exception("Chunk size exceeded the limit", ErrorCodes::ARGUMENT_OUT_OF_BOUND); + /// NOTE: If we want to read any chunk extensions, it should be done here. skipToCarriageReturnOrEOF(*in); diff --git a/src/IO/HTTPChunkedReadBuffer.h b/src/IO/HTTPChunkedReadBuffer.h index 378835cafc0..68d90e470fa 100644 --- a/src/IO/HTTPChunkedReadBuffer.h +++ b/src/IO/HTTPChunkedReadBuffer.h @@ -10,9 +10,12 @@ namespace DB class HTTPChunkedReadBuffer : public BufferWithOwnMemory { public: - explicit HTTPChunkedReadBuffer(std::unique_ptr in_) : in(std::move(in_)) {} + explicit HTTPChunkedReadBuffer(std::unique_ptr in_, size_t max_chunk_size_) + : max_chunk_size(max_chunk_size_), in(std::move(in_)) + {} private: + const size_t max_chunk_size; std::unique_ptr in; size_t readChunkHeader(); diff --git a/src/IO/HadoopSnappyReadBuffer.cpp b/src/IO/HadoopSnappyReadBuffer.cpp index 2a65ca9826b..408e76e19be 100644 --- a/src/IO/HadoopSnappyReadBuffer.cpp +++ b/src/IO/HadoopSnappyReadBuffer.cpp @@ -183,23 +183,28 @@ bool HadoopSnappyReadBuffer::nextImpl() if (eof) return false; - if (!in_available) + do { - in->nextIfAtEnd(); - in_available = in->buffer().end() - in->position(); - in_data = in->position(); + if (!in_available) + { + in->nextIfAtEnd(); + in_available = in->buffer().end() - in->position(); + in_data = in->position(); + } + + if (decoder->result == Status::NEEDS_MORE_INPUT && (!in_available || in->eof())) + { + throw Exception(String("hadoop snappy decode error:") + statusToString(decoder->result), ErrorCodes::SNAPPY_UNCOMPRESS_FAILED); + } + + out_capacity = internal_buffer.size(); + out_data = internal_buffer.begin(); + decoder->result = decoder->readBlock(&in_available, &in_data, &out_capacity, &out_data); + + in->position() = in->buffer().end() - in_available; } + while (decoder->result == Status::NEEDS_MORE_INPUT); - if (decoder->result == Status::NEEDS_MORE_INPUT && (!in_available || in->eof())) - { - throw Exception(String("hadoop snappy decode error:") + statusToString(decoder->result), ErrorCodes::SNAPPY_UNCOMPRESS_FAILED); - } - - out_capacity = internal_buffer.size(); - out_data = internal_buffer.begin(); - decoder->result = decoder->readBlock(&in_available, &in_data, &out_capacity, &out_data); - - in->position() = in->buffer().end() - in_available; working_buffer.resize(internal_buffer.size() - out_capacity); if (decoder->result == Status::OK) diff --git a/src/IO/ReadHelpers.cpp b/src/IO/ReadHelpers.cpp index fb5d0b9aea4..ac899b5a61f 100644 --- a/src/IO/ReadHelpers.cpp +++ b/src/IO/ReadHelpers.cpp @@ -969,10 +969,12 @@ ReturnType readDateTimeTextFallback(time_t & datetime, ReadBuffer & buf, const D { static constexpr bool throw_exception = std::is_same_v; - /// YYYY-MM-DD hh:mm:ss - static constexpr auto date_time_broken_down_length = 19; /// YYYY-MM-DD static constexpr auto date_broken_down_length = 10; + /// hh:mm:ss + static constexpr auto time_broken_down_length = 8; + /// YYYY-MM-DD hh:mm:ss + static constexpr auto date_time_broken_down_length = date_broken_down_length + 1 + time_broken_down_length; char s[date_time_broken_down_length]; char * s_pos = s; @@ -995,16 +997,15 @@ ReturnType readDateTimeTextFallback(time_t & datetime, ReadBuffer & buf, const D if (s_pos == s + 4 && !buf.eof() && !isNumericASCII(*buf.position())) { const auto already_read_length = s_pos - s; - const size_t remaining_date_time_size = date_time_broken_down_length - already_read_length; const size_t remaining_date_size = date_broken_down_length - already_read_length; - size_t size = buf.read(s_pos, remaining_date_time_size); - if (size != remaining_date_time_size && size != remaining_date_size) + size_t size = buf.read(s_pos, remaining_date_size); + if (size != remaining_date_size) { s_pos[size] = 0; if constexpr (throw_exception) - throw ParsingException(std::string("Cannot parse datetime ") + s, ErrorCodes::CANNOT_PARSE_DATETIME); + throw ParsingException(std::string("Cannot parse DateTime ") + s, ErrorCodes::CANNOT_PARSE_DATETIME); else return false; } @@ -1017,11 +1018,24 @@ ReturnType readDateTimeTextFallback(time_t & datetime, ReadBuffer & buf, const D UInt8 minute = 0; UInt8 second = 0; - if (size == remaining_date_time_size) + if (!buf.eof() && (*buf.position() == ' ' || *buf.position() == 'T')) { - hour = (s[11] - '0') * 10 + (s[12] - '0'); - minute = (s[14] - '0') * 10 + (s[15] - '0'); - second = (s[17] - '0') * 10 + (s[18] - '0'); + ++buf.position(); + size = buf.read(s, time_broken_down_length); + + if (size != time_broken_down_length) + { + s_pos[size] = 0; + + if constexpr (throw_exception) + throw ParsingException(std::string("Cannot parse time component of DateTime ") + s, ErrorCodes::CANNOT_PARSE_DATETIME); + else + return false; + } + + hour = (s[0] - '0') * 10 + (s[1] - '0'); + minute = (s[3] - '0') * 10 + (s[4] - '0'); + second = (s[6] - '0') * 10 + (s[7] - '0'); } if (unlikely(year == 0)) diff --git a/src/IO/ReadHelpers.h b/src/IO/ReadHelpers.h index 502e76a6c5e..d5b0ce4bebe 100644 --- a/src/IO/ReadHelpers.h +++ b/src/IO/ReadHelpers.h @@ -736,6 +736,7 @@ inline ReturnType readDateTextImpl(ExtendedDayNum & date, ReadBuffer & buf) readDateTextImpl(local_date, buf); else if (!readDateTextImpl(local_date, buf)) return false; + /// When the parameter is out of rule or out of range, Date32 uses 1925-01-01 as the default value (-DateLUT::instance().getDayNumOffsetEpoch(), -16436) and Date uses 1970-01-01. date = DateLUT::instance().makeDayNum(local_date.year(), local_date.month(), local_date.day(), -static_cast(DateLUT::instance().getDayNumOffsetEpoch())); return ReturnType(true); @@ -856,10 +857,10 @@ inline ReturnType readDateTimeTextImpl(time_t & datetime, ReadBuffer & buf, cons const char * s = buf.position(); /// YYYY-MM-DD hh:mm:ss - static constexpr auto DateTimeStringInputSize = 19; - ///YYYY-MM-DD - static constexpr auto DateStringInputSize = 10; - bool optimistic_path_for_date_time_input = s + DateTimeStringInputSize <= buf.buffer().end(); + static constexpr auto date_time_broken_down_length = 19; + /// YYYY-MM-DD + static constexpr auto date_broken_down_length = 10; + bool optimistic_path_for_date_time_input = s + date_time_broken_down_length <= buf.buffer().end(); if (optimistic_path_for_date_time_input) { @@ -872,7 +873,8 @@ inline ReturnType readDateTimeTextImpl(time_t & datetime, ReadBuffer & buf, cons UInt8 hour = 0; UInt8 minute = 0; UInt8 second = 0; - ///simply determine whether it is YYYY-MM-DD hh:mm:ss or YYYY-MM-DD by the content of the tenth character in an optimistic scenario + + /// Simply determine whether it is YYYY-MM-DD hh:mm:ss or YYYY-MM-DD by the content of the tenth character in an optimistic scenario bool dt_long = (s[10] == ' ' || s[10] == 'T'); if (dt_long) { @@ -887,9 +889,10 @@ inline ReturnType readDateTimeTextImpl(time_t & datetime, ReadBuffer & buf, cons datetime = date_lut.makeDateTime(year, month, day, hour, minute, second); if (dt_long) - buf.position() += DateTimeStringInputSize; + buf.position() += date_time_broken_down_length; else - buf.position() += DateStringInputSize; + buf.position() += date_broken_down_length; + return ReturnType(true); } else @@ -961,7 +964,13 @@ inline ReturnType readDateTimeTextImpl(DateTime64 & datetime64, UInt32 scale, Re components.whole = components.whole / common::exp10_i32(scale); } - datetime64 = negative_multiplier * DecimalUtils::decimalFromComponents(components, scale); + if constexpr (std::is_same_v) + datetime64 = DecimalUtils::decimalFromComponents(components, scale); + else + DecimalUtils::tryGetDecimalFromComponents(components, scale, datetime64); + + datetime64 *= negative_multiplier; + return ReturnType(true); } @@ -988,21 +997,33 @@ inline bool tryReadDateTime64Text(DateTime64 & datetime64, UInt32 scale, ReadBuf inline void readDateTimeText(LocalDateTime & datetime, ReadBuffer & buf) { - char s[19]; - size_t size = buf.read(s, 19); - if (19 != size) + char s[10]; + size_t size = buf.read(s, 10); + if (10 != size) { s[size] = 0; - throw ParsingException(std::string("Cannot parse datetime ") + s, ErrorCodes::CANNOT_PARSE_DATETIME); + throw ParsingException(std::string("Cannot parse DateTime ") + s, ErrorCodes::CANNOT_PARSE_DATETIME); } datetime.year((s[0] - '0') * 1000 + (s[1] - '0') * 100 + (s[2] - '0') * 10 + (s[3] - '0')); datetime.month((s[5] - '0') * 10 + (s[6] - '0')); datetime.day((s[8] - '0') * 10 + (s[9] - '0')); - datetime.hour((s[11] - '0') * 10 + (s[12] - '0')); - datetime.minute((s[14] - '0') * 10 + (s[15] - '0')); - datetime.second((s[17] - '0') * 10 + (s[18] - '0')); + /// Allow to read Date as DateTime + if (buf.eof() || !(*buf.position() == ' ' || *buf.position() == 'T')) + return; + + ++buf.position(); + size = buf.read(s, 8); + if (8 != size) + { + s[size] = 0; + throw ParsingException(std::string("Cannot parse time component of DateTime ") + s, ErrorCodes::CANNOT_PARSE_DATETIME); + } + + datetime.hour((s[0] - '0') * 10 + (s[1] - '0')); + datetime.minute((s[3] - '0') * 10 + (s[4] - '0')); + datetime.second((s[6] - '0') * 10 + (s[7] - '0')); } diff --git a/src/IO/ReadSettings.h b/src/IO/ReadSettings.h index 199ae4dcf7f..e639ecbedc2 100644 --- a/src/IO/ReadSettings.h +++ b/src/IO/ReadSettings.h @@ -80,7 +80,6 @@ struct ReadSettings size_t remote_fs_read_backoff_max_tries = 4; bool enable_filesystem_cache = true; - size_t filesystem_cache_max_wait_sec = 1; bool read_from_filesystem_cache_if_exists_otherwise_bypass_cache = false; bool enable_filesystem_cache_log = false; bool is_file_cache_persistent = false; /// Some files can be made non-evictable. diff --git a/src/IO/S3/PocoHTTPClient.cpp b/src/IO/S3/PocoHTTPClient.cpp index 007577e28a4..089d89cd8a7 100644 --- a/src/IO/S3/PocoHTTPClient.cpp +++ b/src/IO/S3/PocoHTTPClient.cpp @@ -5,6 +5,8 @@ #include "PocoHTTPClient.h" #include +#include +#include #include #include @@ -14,6 +16,7 @@ #include #include +#include #include #include #include "Poco/StreamCopier.h" @@ -23,6 +26,8 @@ #include +static const int SUCCESS_RESPONSE_MIN = 200; +static const int SUCCESS_RESPONSE_MAX = 299; namespace ProfileEvents { @@ -121,6 +126,37 @@ std::shared_ptr PocoHTTPClient::MakeRequest( return response; } +namespace +{ + /// No comments: + /// 1) https://aws.amazon.com/premiumsupport/knowledge-center/s3-resolve-200-internalerror/ + /// 2) https://github.com/aws/aws-sdk-cpp/issues/658 + bool checkRequestCanReturn2xxAndErrorInBody(Aws::Http::HttpRequest & request) + { + auto query_params = request.GetQueryStringParameters(); + if (request.HasHeader("z-amz-copy-source")) + { + /// CopyObject https://docs.aws.amazon.com/AmazonS3/latest/API/API_CopyObject.html + if (query_params.empty()) + return true; + + /// UploadPartCopy https://docs.aws.amazon.com/AmazonS3/latest/API/API_UploadPartCopy.html + if (query_params.contains("partNumber") && query_params.contains("uploadId")) + return true; + + } + else + { + /// CompleteMultipartUpload https://docs.aws.amazon.com/AmazonS3/latest/API/API_CompleteMultipartUpload.html + if (query_params.size() == 1 && query_params.contains("uploadId")) + return true; + } + + return false; + } +} + + void PocoHTTPClient::makeRequestInternal( Aws::Http::HttpRequest & request, std::shared_ptr & response, @@ -281,6 +317,7 @@ void PocoHTTPClient::makeRequestInternal( ProfileEvents::increment(select_metric(S3MetricType::Microseconds), watch.elapsedMicroseconds()); int status_code = static_cast(poco_response.getStatus()); + if (enable_s3_requests_logging) LOG_TEST(log, "Response status: {}, {}", status_code, poco_response.getReason()); @@ -316,18 +353,44 @@ void PocoHTTPClient::makeRequestInternal( response->AddHeader(header_name, header_value); } - if (status_code == 429 || status_code == 503) - { // API throttling - ProfileEvents::increment(select_metric(S3MetricType::Throttling)); - } - else if (status_code >= 300) + /// Request is successful but for some special requests we can have actual error message in body + if (status_code >= SUCCESS_RESPONSE_MIN && status_code <= SUCCESS_RESPONSE_MAX && checkRequestCanReturn2xxAndErrorInBody(request)) { - ProfileEvents::increment(select_metric(S3MetricType::Errors)); - if (status_code >= 500 && error_report) - error_report(request_configuration); - } + std::string response_string((std::istreambuf_iterator(response_body_stream)), + std::istreambuf_iterator()); - response->SetResponseBody(response_body_stream, session); + /// Just trim string so it will not be so long + LOG_TRACE(log, "Got dangerous response with successful code {}, checking its body: '{}'", status_code, response_string.substr(0, 300)); + const static std::string_view needle = ""; + if (auto it = std::search(response_string.begin(), response_string.end(), std::default_searcher(needle.begin(), needle.end())); it != response_string.end()) + { + LOG_WARNING(log, "Response for request contain tag in body, settings internal server error (500 code)"); + response->SetResponseCode(Aws::Http::HttpResponseCode::INTERNAL_SERVER_ERROR); + + ProfileEvents::increment(select_metric(S3MetricType::Errors)); + if (error_report) + error_report(request_configuration); + + } + + /// Set response from string + response->SetResponseBody(response_string); + } + else + { + + if (status_code == 429 || status_code == 503) + { // API throttling + ProfileEvents::increment(select_metric(S3MetricType::Throttling)); + } + else if (status_code >= 300) + { + ProfileEvents::increment(select_metric(S3MetricType::Errors)); + if (status_code >= 500 && error_report) + error_report(request_configuration); + } + response->SetResponseBody(response_body_stream, session); + } return; } diff --git a/src/IO/S3/PocoHTTPClient.h b/src/IO/S3/PocoHTTPClient.h index 6cedd95ec8d..5fc8c9acc17 100644 --- a/src/IO/S3/PocoHTTPClient.h +++ b/src/IO/S3/PocoHTTPClient.h @@ -80,6 +80,13 @@ public: ); } + void SetResponseBody(std::string & response_body) /// NOLINT + { + auto stream = Aws::New("http result buf", response_body); // STYLE_CHECK_ALLOW_STD_STRING_STREAM + stream->exceptions(std::ios::failbit); + body_stream = Aws::Utils::Stream::ResponseStream(std::move(stream)); + } + Aws::IOStream & GetResponseBody() const override { return body_stream.GetUnderlyingStream(); diff --git a/src/IO/WriteBufferFromHTTP.cpp b/src/IO/WriteBufferFromHTTP.cpp index 916fa40dc35..f7456ad6b6c 100644 --- a/src/IO/WriteBufferFromHTTP.cpp +++ b/src/IO/WriteBufferFromHTTP.cpp @@ -35,9 +35,8 @@ WriteBufferFromHTTP::WriteBufferFromHTTP( void WriteBufferFromHTTP::finalizeImpl() { - // for compressed body, the data is stored in buffered first - // here, make sure the content in the buffer has been flushed - this->nextImpl(); + // Make sure the content in the buffer has been flushed + this->next(); receiveResponse(*session, request, response, false); /// TODO: Response body is ignored. diff --git a/src/IO/WriteBufferFromS3.cpp b/src/IO/WriteBufferFromS3.cpp index 7da38b3df45..40e592ec197 100644 --- a/src/IO/WriteBufferFromS3.cpp +++ b/src/IO/WriteBufferFromS3.cpp @@ -15,6 +15,7 @@ #include #include #include +#include #include @@ -164,6 +165,20 @@ void WriteBufferFromS3::finalizeImpl() if (!multipart_upload_id.empty()) completeMultipartUpload(); + + if (s3_settings.check_objects_after_upload) + { + LOG_TRACE(log, "Checking object {} exists after upload", key); + + Aws::S3::Model::HeadObjectRequest request; + request.SetBucket(bucket); + request.SetKey(key); + + auto response = client_ptr->HeadObject(request); + + if (!response.IsSuccess()) + throw Exception(ErrorCodes::S3_ERROR, "Object {} from bucket {} disappeared immediately after upload, it's a bug in S3 or S3 API.", key, bucket); + } } void WriteBufferFromS3::createMultipartUpload() diff --git a/src/IO/examples/CMakeLists.txt b/src/IO/examples/CMakeLists.txt index de03801b41d..b42aa1a4f96 100644 --- a/src/IO/examples/CMakeLists.txt +++ b/src/IO/examples/CMakeLists.txt @@ -37,12 +37,6 @@ target_link_libraries (read_write_int PRIVATE clickhouse_common_io) clickhouse_add_executable (o_direct_and_dirty_pages o_direct_and_dirty_pages.cpp) target_link_libraries (o_direct_and_dirty_pages PRIVATE clickhouse_common_io) -clickhouse_add_executable (hashing_write_buffer hashing_write_buffer.cpp) -target_link_libraries (hashing_write_buffer PRIVATE clickhouse_common_io) - -clickhouse_add_executable (hashing_read_buffer hashing_read_buffer.cpp) -target_link_libraries (hashing_read_buffer PRIVATE clickhouse_common_io) - clickhouse_add_executable (io_operators io_operators.cpp) target_link_libraries (io_operators PRIVATE clickhouse_common_io) diff --git a/src/IO/examples/hashing_buffer.h b/src/IO/examples/hashing_buffer.h deleted file mode 100644 index 078390658f6..00000000000 --- a/src/IO/examples/hashing_buffer.h +++ /dev/null @@ -1,21 +0,0 @@ -#pragma once -#include -#include - -#define FAIL(msg) do { std::cout << msg; exit(1); } while (false) - - -static CityHash_v1_0_2::uint128 referenceHash(const char * data, size_t len) -{ - const size_t block_size = DBMS_DEFAULT_HASHING_BLOCK_SIZE; - CityHash_v1_0_2::uint128 state(0, 0); - size_t pos; - - for (pos = 0; pos + block_size <= len; pos += block_size) - state = CityHash_v1_0_2::CityHash128WithSeed(data + pos, block_size, state); - - if (pos < len) - state = CityHash_v1_0_2::CityHash128WithSeed(data + pos, len - pos, state); - - return state; -} diff --git a/src/IO/examples/hashing_read_buffer.cpp b/src/IO/examples/hashing_read_buffer.cpp deleted file mode 100644 index 67b3d61b1dc..00000000000 --- a/src/IO/examples/hashing_read_buffer.cpp +++ /dev/null @@ -1,69 +0,0 @@ -#include -#include -#include -#include "hashing_buffer.h" -#include -#include - - -static void test(size_t data_size) -{ - pcg64 rng; - - std::vector vec(data_size); - char * data = vec.data(); - - for (size_t i = 0; i < data_size; ++i) - data[i] = rng() & 255; - - CityHash_v1_0_2::uint128 reference = referenceHash(data, data_size); - - std::vector block_sizes = {56, 128, 513, 2048, 3055, 4097, 4096}; - for (size_t read_buffer_block_size : block_sizes) - { - std::cout << "block size " << read_buffer_block_size << std::endl; - std::stringstream io; // STYLE_CHECK_ALLOW_STD_STRING_STREAM - io.exceptions(std::ios::failbit); - DB::WriteBufferFromOStream out_impl(io); - DB::HashingWriteBuffer out(out_impl); - out.write(data, data_size); - out.next(); - - DB::ReadBufferFromIStream source(io, read_buffer_block_size); - DB::HashingReadBuffer buf(source); - - std::vector read_buf(data_size); - buf.read(read_buf.data(), data_size); - - bool failed_to_read = false; - for (size_t i = 0; i < data_size; ++i) - if (read_buf[i] != vec[i]) - failed_to_read = true; - - if (failed_to_read) - { - std::cout.write(data, data_size); - std::cout << std::endl; - std::cout.write(read_buf.data(), data_size); - std::cout << std::endl; - FAIL("Fail to read data"); - } - - if (buf.getHash() != reference) - FAIL("failed on data size " << data_size << " reading by blocks of size " << read_buffer_block_size); - if (buf.getHash() != out.getHash()) - FAIL("Hash of HashingReadBuffer doesn't match with hash of HashingWriteBuffer on data size " << data_size << " reading by blocks of size " << read_buffer_block_size); - } -} - -int main() -{ - test(5); - test(100); - test(2048); - test(2049); - test(100000); - test(1 << 17); - - return 0; -} diff --git a/src/IO/examples/hashing_write_buffer.cpp b/src/IO/examples/hashing_write_buffer.cpp deleted file mode 100644 index 461c39139c4..00000000000 --- a/src/IO/examples/hashing_write_buffer.cpp +++ /dev/null @@ -1,86 +0,0 @@ -#include -#include -#include - -#include "hashing_buffer.h" - -static void test(size_t data_size) -{ - pcg64 rng; - - std::vector vec(data_size); - char * data = vec.data(); - - for (size_t i = 0; i < data_size; ++i) - data[i] = rng() & 255; - - CityHash_v1_0_2::uint128 reference = referenceHash(data, data_size); - - DB::WriteBufferFromFile sink("/dev/null", 1 << 16); - - { - DB::HashingWriteBuffer buf(sink); - - for (size_t pos = 0; pos < data_size;) - { - size_t len = std::min(static_cast(rng() % 10000 + 1), data_size - pos); - buf.write(data + pos, len); - buf.next(); - pos += len; - } - - if (buf.getHash() != reference) - FAIL("failed on data size " << data_size << " writing rngom chunks of up to 10000 bytes"); - } - - { - DB::HashingWriteBuffer buf(sink); - - for (size_t pos = 0; pos < data_size;) - { - size_t len = std::min(static_cast(rng() % 5 + 1), data_size - pos); - buf.write(data + pos, len); - buf.next(); - pos += len; - } - - if (buf.getHash() != reference) - FAIL("failed on data size " << data_size << " writing rngom chunks of up to 5 bytes"); - } - - { - DB::HashingWriteBuffer buf(sink); - - for (size_t pos = 0; pos < data_size;) - { - size_t len = std::min(static_cast(2048 + rng() % 3 - 1), data_size - pos); - buf.write(data + pos, len); - buf.next(); - pos += len; - } - - if (buf.getHash() != reference) - FAIL("failed on data size " << data_size << " writing rngom chunks of 2048 +-1 bytes"); - } - - { - DB::HashingWriteBuffer buf(sink); - - buf.write(data, data_size); - - if (buf.getHash() != reference) - FAIL("failed on data size " << data_size << " writing all at once"); - } -} - -int main() -{ - test(5); - test(100); - test(2048); - test(2049); - test(100000); - test(1 << 17); - - return 0; -} diff --git a/src/IO/examples/parse_int_perf.cpp b/src/IO/examples/parse_int_perf.cpp index f2a04dc1541..a980cba8896 100644 --- a/src/IO/examples/parse_int_perf.cpp +++ b/src/IO/examples/parse_int_perf.cpp @@ -1,5 +1,6 @@ #include #include +#include #include @@ -7,7 +8,6 @@ #include #include #include -#include #include @@ -27,6 +27,8 @@ static UInt64 rdtsc() int main(int argc, char ** argv) { + pcg64 rng; + try { if (argc < 2) @@ -47,7 +49,7 @@ int main(int argc, char ** argv) Stopwatch watch; for (size_t i = 0; i < n; ++i) - data[i] = lrand48();// / lrand48();// ^ (lrand48() << 24) ^ (lrand48() << 48); + data[i] = rng(); watch.stop(); std::cerr << std::fixed << std::setprecision(2) diff --git a/src/IO/examples/var_uint.cpp b/src/IO/examples/var_uint.cpp index 8ed81162685..65e1f0495d3 100644 --- a/src/IO/examples/var_uint.cpp +++ b/src/IO/examples/var_uint.cpp @@ -8,32 +8,8 @@ #include -static void parse_trash_string_as_uint_must_fail(const std::string & str) -{ - using namespace DB; - - unsigned x = 0xFF; - - try - { - x = parse(str); - } - catch (...) - { - /// Ok - return; - } - - std::cerr << "Parsing must fail, but finished successfully x=" << x; - exit(-1); -} - - int main(int argc, char ** argv) { - parse_trash_string_as_uint_must_fail("trash"); - parse_trash_string_as_uint_must_fail("-1"); - if (argc != 2) { std::cerr << "Usage: " << std::endl diff --git a/src/IO/tests/gtest_hadoop_snappy_decoder.cpp b/src/IO/tests/gtest_hadoop_snappy_decoder.cpp index f681e8e61e1..4db0deac08e 100644 --- a/src/IO/tests/gtest_hadoop_snappy_decoder.cpp +++ b/src/IO/tests/gtest_hadoop_snappy_decoder.cpp @@ -60,7 +60,8 @@ TEST(HadoopSnappyDecoder, repeatNeedMoreInput) String output; WriteBufferFromString out(output); copyData(read_buffer, out); + out.finalize(); UInt128 hashcode = sipHash128(output.c_str(), output.size()); String hashcode_str = getHexUIntLowercase(hashcode); - ASSERT_EQ(hashcode_str, "593afe14f61866915cc00b8c7bd86046"); + ASSERT_EQ(hashcode_str, "673e5b065186cec146789451c2a8f703"); } diff --git a/src/IO/tests/gtest_memory_resize.cpp b/src/IO/tests/gtest_memory_resize.cpp new file mode 100644 index 00000000000..8619419a47a --- /dev/null +++ b/src/IO/tests/gtest_memory_resize.cpp @@ -0,0 +1,328 @@ +#include +#include +#include +#include + +#define EXPECT_THROW_ERROR_CODE(statement, expected_exception, expected_code) \ + EXPECT_THROW( \ + try \ + { \ + statement; \ + } \ + catch (const expected_exception & e) \ + { \ + EXPECT_EQ(expected_code, e.code()); \ + throw; \ + } \ + , expected_exception) + +namespace DB +{ + +namespace ErrorCodes +{ + extern const int ARGUMENT_OUT_OF_BOUND; + extern const int LOGICAL_ERROR; + extern const int CANNOT_ALLOCATE_MEMORY; +} + +} + +using namespace DB; + +class DummyAllocator +{ + void * dummy_address = reinterpret_cast(1); + +public: + void * alloc(size_t size, size_t /*alignment*/ = 0) + { + checkSize(size); + if (size) + return dummy_address; + else + return nullptr; + } + + void * realloc(void * /*buf*/, size_t /*old_size*/, size_t new_size, size_t /*alignment*/ = 0) + { + checkSize(new_size); + return dummy_address; + } + + void free([[maybe_unused]] void * buf, size_t /*size*/) + { + assert(buf == dummy_address); + } + + // the same check as in Common/Allocator.h + void static checkSize(size_t size) + { + /// More obvious exception in case of possible overflow (instead of just "Cannot mmap"). + if (size >= 0x8000000000000000ULL) + throw DB::Exception(DB::ErrorCodes::LOGICAL_ERROR, "Too large size ({}) passed to allocator. It indicates an error.", size); + } +}; + +TEST(MemoryResizeTest, SmallInitAndSmallResize) +{ + { + auto memory = Memory(0); + ASSERT_EQ(memory.m_data, nullptr); + ASSERT_EQ(memory.m_capacity, 0); + ASSERT_EQ(memory.m_size, 0); + + memory.resize(0); + ASSERT_EQ(memory.m_data, nullptr); + ASSERT_EQ(memory.m_capacity, 0); + ASSERT_EQ(memory.m_size, 0); + + memory.resize(1); + ASSERT_TRUE(memory.m_data); + ASSERT_EQ(memory.m_capacity, 16); + ASSERT_EQ(memory.m_size, 1); + } + + { + auto memory = Memory(1); + ASSERT_TRUE(memory.m_data); + ASSERT_EQ(memory.m_capacity, 16); + ASSERT_EQ(memory.m_size, 1); + + memory.resize(0); + ASSERT_TRUE(memory.m_data); + ASSERT_EQ(memory.m_capacity, 16); + ASSERT_EQ(memory.m_size, 0); + + memory.resize(1); + ASSERT_TRUE(memory.m_data); + ASSERT_EQ(memory.m_capacity, 16); + ASSERT_EQ(memory.m_size, 1); + } +} + +TEST(MemoryResizeTest, SmallInitAndBigResizeOverflowWhenPadding) +{ + { + auto memory = Memory(0); + ASSERT_EQ(memory.m_data, nullptr); + ASSERT_EQ(memory.m_capacity, 0); + ASSERT_EQ(memory.m_size, 0); + + EXPECT_THROW_ERROR_CODE(memory.resize(std::numeric_limits::max()), Exception, ErrorCodes::ARGUMENT_OUT_OF_BOUND); + ASSERT_EQ(memory.m_data, nullptr); // state is intact after exception + ASSERT_EQ(memory.m_size, 0); + ASSERT_EQ(memory.m_capacity, 0); + + memory.resize(1); + ASSERT_TRUE(memory.m_data); + ASSERT_EQ(memory.m_capacity, 16); + ASSERT_EQ(memory.m_size, 1); + + memory.resize(2); + ASSERT_TRUE(memory.m_data); + ASSERT_EQ(memory.m_capacity, 17); + ASSERT_EQ(memory.m_size, 2); + + EXPECT_THROW_ERROR_CODE(memory.resize(std::numeric_limits::max()), Exception, ErrorCodes::ARGUMENT_OUT_OF_BOUND); + ASSERT_TRUE(memory.m_data); // state is intact after exception + ASSERT_EQ(memory.m_capacity, 17); + ASSERT_EQ(memory.m_size, 2); + + memory.resize(0x8000000000000000ULL-16); + ASSERT_TRUE(memory.m_data); + ASSERT_EQ(memory.m_capacity, 0x8000000000000000ULL - 1); + ASSERT_EQ(memory.m_size, 0x8000000000000000ULL - 16); + +#ifndef ABORT_ON_LOGICAL_ERROR + EXPECT_THROW_ERROR_CODE(memory.resize(0x8000000000000000ULL-15), Exception, ErrorCodes::LOGICAL_ERROR); + ASSERT_TRUE(memory.m_data); // state is intact after exception + ASSERT_EQ(memory.m_capacity, 0x8000000000000000ULL - 1); + ASSERT_EQ(memory.m_size, 0x8000000000000000ULL - 16); +#endif + } + + { + auto memory = Memory(1); + ASSERT_TRUE(memory.m_data); + ASSERT_EQ(memory.m_capacity, 16); + ASSERT_EQ(memory.m_size, 1); + + EXPECT_THROW_ERROR_CODE(memory.resize(std::numeric_limits::max()), Exception, ErrorCodes::ARGUMENT_OUT_OF_BOUND); + ASSERT_TRUE(memory.m_data); // state is intact after exception + ASSERT_EQ(memory.m_capacity, 16); + ASSERT_EQ(memory.m_size, 1); + + memory.resize(1); + ASSERT_TRUE(memory.m_data); + ASSERT_EQ(memory.m_capacity, 16); + ASSERT_EQ(memory.m_size, 1); + +#ifndef ABORT_ON_LOGICAL_ERROR + EXPECT_THROW_ERROR_CODE(memory.resize(0x8000000000000000ULL-15), Exception, ErrorCodes::LOGICAL_ERROR); + ASSERT_TRUE(memory.m_data); // state is intact after exception + ASSERT_EQ(memory.m_capacity, 16); + ASSERT_EQ(memory.m_size, 1); +#endif + } +} + + +TEST(MemoryResizeTest, BigInitAndSmallResizeOverflowWhenPadding) +{ + { + EXPECT_THROW_ERROR_CODE( + { + auto memory = Memory(std::numeric_limits::max()); + } + , Exception + , ErrorCodes::ARGUMENT_OUT_OF_BOUND); + } + + { + EXPECT_THROW_ERROR_CODE( + { + auto memory = Memory(std::numeric_limits::max() - 1); + } + , Exception + , ErrorCodes::ARGUMENT_OUT_OF_BOUND); + } + + { + EXPECT_THROW_ERROR_CODE( + { + auto memory = Memory(std::numeric_limits::max() - 10); + } + , Exception + , ErrorCodes::ARGUMENT_OUT_OF_BOUND); + } + +#ifndef ABORT_ON_LOGICAL_ERROR + { + EXPECT_THROW_ERROR_CODE( + { + auto memory = Memory(std::numeric_limits::max() - 15); + } + , Exception + , ErrorCodes::LOGICAL_ERROR); + } + + { + EXPECT_THROW_ERROR_CODE( + { + auto memory = Memory(0x8000000000000000ULL - 15); + } + , Exception + , ErrorCodes::LOGICAL_ERROR); + } +#endif + + { + auto memory = Memory(0x8000000000000000ULL - 16); + ASSERT_TRUE(memory.m_data); + ASSERT_EQ(memory.m_capacity, 0x8000000000000000ULL - 1); + ASSERT_EQ(memory.m_size, 0x8000000000000000ULL - 16); + + memory.resize(1); + ASSERT_TRUE(memory.m_data); + ASSERT_EQ(memory.m_capacity, 0x8000000000000000ULL - 1); + ASSERT_EQ(memory.m_size, 1); + } +} + +TEST(MemoryResizeTest, AlignmentWithRealAllocator) +{ + { + auto memory = Memory<>(0, 3); // not the power of 2 but less than MALLOC_MIN_ALIGNMENT 8 so user-defined alignment is ignored at Allocator + ASSERT_EQ(memory.m_data, nullptr); + ASSERT_EQ(memory.m_capacity, 0); + ASSERT_EQ(memory.m_size, 0); + + memory.resize(1); + ASSERT_TRUE(memory.m_data); + ASSERT_EQ(memory.m_capacity, 16); + ASSERT_EQ(memory.m_size, 1); + + memory.resize(2); + ASSERT_TRUE(memory.m_data); + ASSERT_EQ(memory.m_capacity, 17); + ASSERT_EQ(memory.m_size, 2); + + memory.resize(3); + ASSERT_TRUE(memory.m_data); + ASSERT_EQ(memory.m_capacity, 18); + ASSERT_EQ(memory.m_size, 3); + + memory.resize(4); + ASSERT_TRUE(memory.m_data); + ASSERT_EQ(memory.m_capacity, 19); + ASSERT_EQ(memory.m_size, 4); + + memory.resize(0); + ASSERT_TRUE(memory.m_data); + ASSERT_EQ(memory.m_capacity, 19); + ASSERT_EQ(memory.m_size, 0); + + memory.resize(1); + ASSERT_TRUE(memory.m_data); + ASSERT_EQ(memory.m_capacity, 19); + ASSERT_EQ(memory.m_size, 1); + } + +#if !defined(ADDRESS_SANITIZER) && !defined(THREAD_SANITIZER) && !defined(MEMORY_SANITIZER) && !defined(UNDEFINED_BEHAVIOR_SANITIZER) + { + auto memory = Memory<>(0, 10); // not the power of 2 + ASSERT_EQ(memory.m_data, nullptr); + ASSERT_EQ(memory.m_capacity, 0); + ASSERT_EQ(memory.m_size, 0); + + EXPECT_THROW_ERROR_CODE(memory.resize(1), ErrnoException, ErrorCodes::CANNOT_ALLOCATE_MEMORY); + ASSERT_EQ(memory.m_data, nullptr); // state is intact after exception + ASSERT_EQ(memory.m_capacity, 0); + ASSERT_EQ(memory.m_size, 0); + } +#endif + + { + auto memory = Memory<>(0, 32); + ASSERT_EQ(memory.m_data, nullptr); + ASSERT_EQ(memory.m_capacity, 0); + ASSERT_EQ(memory.m_size, 0); + + memory.resize(1); + ASSERT_TRUE(memory.m_data); + ASSERT_EQ(memory.m_capacity, 16); + ASSERT_EQ(memory.m_size, 1); + + memory.resize(32); + ASSERT_TRUE(memory.m_data); + ASSERT_EQ(memory.m_capacity, 47); + ASSERT_EQ(memory.m_size, 32); + } +} + +TEST(MemoryResizeTest, SomeAlignmentOverflowWhenAlignment) +{ + { + auto memory = Memory(0, 31); + ASSERT_EQ(memory.m_data, nullptr); + ASSERT_EQ(memory.m_capacity, 0); + ASSERT_EQ(memory.m_size, 0); + + memory.resize(0); + ASSERT_EQ(memory.m_data, nullptr); + ASSERT_EQ(memory.m_capacity, 0); + ASSERT_EQ(memory.m_size, 0); + + memory.resize(1); + ASSERT_TRUE(memory.m_data); + ASSERT_EQ(memory.m_capacity, 16); + ASSERT_EQ(memory.m_size, 1); + + EXPECT_THROW_ERROR_CODE(memory.resize(std::numeric_limits::max()), Exception, ErrorCodes::ARGUMENT_OUT_OF_BOUND); + ASSERT_TRUE(memory.m_data); // state is intact after exception + ASSERT_EQ(memory.m_capacity, 16); + ASSERT_EQ(memory.m_size, 1); + } + +} diff --git a/src/Interpreters/ActionsDAG.cpp b/src/Interpreters/ActionsDAG.cpp index d5e1f332b50..3dc855b93ff 100644 --- a/src/Interpreters/ActionsDAG.cpp +++ b/src/Interpreters/ActionsDAG.cpp @@ -1194,7 +1194,7 @@ ActionsDAGPtr ActionsDAG::merge(ActionsDAG && first, ActionsDAG && second) if (it == first_result.end() || it->second.empty()) { if (first.project_input) - throw Exception(ErrorCodes::LOGICAL_ERROR, + throw Exception(ErrorCodes::UNKNOWN_IDENTIFIER, "Cannot find column {} in ActionsDAG result", input_node->result_name); first.inputs.push_back(input_node); diff --git a/src/Interpreters/Aggregator.cpp b/src/Interpreters/Aggregator.cpp index 6e101005599..3340170f71b 100644 --- a/src/Interpreters/Aggregator.cpp +++ b/src/Interpreters/Aggregator.cpp @@ -23,12 +23,14 @@ #include #include #include +#include #include #include #include #include #include #include +#include #include @@ -37,6 +39,8 @@ namespace ProfileEvents extern const Event ExternalAggregationWritePart; extern const Event ExternalAggregationCompressedBytes; extern const Event ExternalAggregationUncompressedBytes; + extern const Event ExternalProcessingCompressedBytesTotal; + extern const Event ExternalProcessingUncompressedBytesTotal; extern const Event AggregationPreallocatedElementsInHashTables; extern const Event AggregationHashTablesInitializedAsTwoLevel; extern const Event OverflowThrow; @@ -44,6 +48,11 @@ namespace ProfileEvents extern const Event OverflowAny; } +namespace CurrentMetrics +{ + extern const Metric TemporaryFilesForAggregation; +} + namespace { /** Collects observed HashMap-s sizes to avoid redundant intermediate resizes. @@ -1469,40 +1478,26 @@ bool Aggregator::executeOnBlock(Columns columns, && worth_convert_to_two_level) { size_t size = current_memory_usage + params.min_free_disk_space; - - std::string tmp_path = params.tmp_volume->getDisk()->getPath(); - - // enoughSpaceInDirectory() is not enough to make it right, since - // another process (or another thread of aggregator) can consume all - // space. - // - // But true reservation (IVolume::reserve()) cannot be used here since - // current_memory_usage does not takes compression into account and - // will reserve way more that actually will be used. - // - // Hence let's do a simple check. - if (!enoughSpaceInDirectory(tmp_path, size)) - throw Exception("Not enough space for external aggregation in " + tmp_path, ErrorCodes::NOT_ENOUGH_SPACE); - - writeToTemporaryFile(result, tmp_path); + writeToTemporaryFile(result, size); } return true; } -void Aggregator::writeToTemporaryFile(AggregatedDataVariants & data_variants, const String & tmp_path) const +void Aggregator::writeToTemporaryFile(AggregatedDataVariants & data_variants, size_t max_temp_file_size) const { Stopwatch watch; size_t rows = data_variants.size(); - auto file = createTemporaryFile(tmp_path); - const std::string & path = file->path(); + auto file = createTempFile(max_temp_file_size); + + const auto & path = file->path(); WriteBufferFromFile file_buf(path); CompressedWriteBuffer compressed_buf(file_buf); NativeWriter block_out(compressed_buf, DBMS_TCP_PROTOCOL_VERSION, getHeader(false)); - LOG_DEBUG(log, "Writing part of aggregation data into temporary file {}.", path); + LOG_DEBUG(log, "Writing part of aggregation data into temporary file {}", path); ProfileEvents::increment(ProfileEvents::ExternalAggregationWritePart); /// Flush only two-level data and possibly overflow data. @@ -1545,6 +1540,8 @@ void Aggregator::writeToTemporaryFile(AggregatedDataVariants & data_variants, co ProfileEvents::increment(ProfileEvents::ExternalAggregationCompressedBytes, compressed_bytes); ProfileEvents::increment(ProfileEvents::ExternalAggregationUncompressedBytes, uncompressed_bytes); + ProfileEvents::increment(ProfileEvents::ExternalProcessingCompressedBytesTotal, compressed_bytes); + ProfileEvents::increment(ProfileEvents::ExternalProcessingUncompressedBytesTotal, uncompressed_bytes); LOG_DEBUG(log, "Written part in {:.3f} sec., {} rows, {} uncompressed, {} compressed," @@ -1563,10 +1560,22 @@ void Aggregator::writeToTemporaryFile(AggregatedDataVariants & data_variants, co } -void Aggregator::writeToTemporaryFile(AggregatedDataVariants & data_variants) const +TemporaryFileOnDiskHolder Aggregator::createTempFile(size_t max_temp_file_size) const { - String tmp_path = params.tmp_volume->getDisk()->getPath(); - return writeToTemporaryFile(data_variants, tmp_path); + auto file = std::make_unique(params.tmp_volume->getDisk(), CurrentMetrics::TemporaryFilesForAggregation); + + // enoughSpaceInDirectory() is not enough to make it right, since + // another process (or another thread of aggregator) can consume all + // space. + // + // But true reservation (IVolume::reserve()) cannot be used here since + // current_memory_usage does not takes compression into account and + // will reserve way more that actually will be used. + // + // Hence let's do a simple check. + if (max_temp_file_size > 0 && !enoughSpaceInDirectory(file->getPath(), max_temp_file_size)) + throw Exception(ErrorCodes::NOT_ENOUGH_SPACE, "Not enough space for external aggregation in '{}'", file->path()); + return file; } @@ -2831,22 +2840,7 @@ bool Aggregator::mergeOnBlock(Block block, AggregatedDataVariants & result, bool && worth_convert_to_two_level) { size_t size = current_memory_usage + params.min_free_disk_space; - - std::string tmp_path = params.tmp_volume->getDisk()->getPath(); - - // enoughSpaceInDirectory() is not enough to make it right, since - // another process (or another thread of aggregator) can consume all - // space. - // - // But true reservation (IVolume::reserve()) cannot be used here since - // current_memory_usage does not takes compression into account and - // will reserve way more that actually will be used. - // - // Hence let's do a simple check. - if (!enoughSpaceInDirectory(tmp_path, size)) - throw Exception("Not enough space for external aggregation in " + tmp_path, ErrorCodes::NOT_ENOUGH_SPACE); - - writeToTemporaryFile(result, tmp_path); + writeToTemporaryFile(result, size); } return true; diff --git a/src/Interpreters/Aggregator.h b/src/Interpreters/Aggregator.h index 3e8b25c1a8c..ce63c24969a 100644 --- a/src/Interpreters/Aggregator.h +++ b/src/Interpreters/Aggregator.h @@ -23,6 +23,7 @@ #include #include +#include #include #include @@ -1058,14 +1059,15 @@ public: std::vector convertBlockToTwoLevel(const Block & block) const; /// For external aggregation. - void writeToTemporaryFile(AggregatedDataVariants & data_variants, const String & tmp_path) const; - void writeToTemporaryFile(AggregatedDataVariants & data_variants) const; + void writeToTemporaryFile(AggregatedDataVariants & data_variants, size_t max_temp_file_size = 0) const; + + TemporaryFileOnDiskHolder createTempFile(size_t max_temp_file_size) const; bool hasTemporaryFiles() const { return !temporary_files.empty(); } struct TemporaryFiles { - std::vector> files; + std::vector files; size_t sum_size_uncompressed = 0; size_t sum_size_compressed = 0; mutable std::mutex mutex; diff --git a/src/Interpreters/Context.cpp b/src/Interpreters/Context.cpp index 55ac93f7e7b..3d55a09f989 100644 --- a/src/Interpreters/Context.cpp +++ b/src/Interpreters/Context.cpp @@ -664,8 +664,8 @@ VolumePtr Context::setTemporaryStorage(const String & path, const String & polic { StoragePolicyPtr tmp_policy = getStoragePolicySelector(lock)->get(policy_name); if (tmp_policy->getVolumes().size() != 1) - throw Exception("Policy " + policy_name + " is used temporary files, such policy should have exactly one volume", - ErrorCodes::NO_ELEMENTS_IN_CONFIG); + throw Exception(ErrorCodes::NO_ELEMENTS_IN_CONFIG, + "Policy '{} is used temporary files, such policy should have exactly one volume", policy_name); shared->tmp_volume = tmp_policy->getVolume(0); } @@ -3451,7 +3451,6 @@ ReadSettings Context::getReadSettings() const res.remote_fs_read_max_backoff_ms = settings.remote_fs_read_max_backoff_ms; res.remote_fs_read_backoff_max_tries = settings.remote_fs_read_backoff_max_tries; res.enable_filesystem_cache = settings.enable_filesystem_cache; - res.filesystem_cache_max_wait_sec = settings.filesystem_cache_max_wait_sec; res.read_from_filesystem_cache_if_exists_otherwise_bypass_cache = settings.read_from_filesystem_cache_if_exists_otherwise_bypass_cache; res.enable_filesystem_cache_log = settings.enable_filesystem_cache_log; res.enable_filesystem_cache_on_lower_level = settings.enable_filesystem_cache_on_lower_level; @@ -3461,6 +3460,13 @@ ReadSettings Context::getReadSettings() const res.remote_read_min_bytes_for_seek = settings.remote_read_min_bytes_for_seek; + /// Zero read buffer will not make progress. + if (!settings.max_read_buffer_size) + { + throw Exception(ErrorCodes::INVALID_SETTING_VALUE, + "Invalid value '{}' for max_read_buffer_size", settings.max_read_buffer_size); + } + res.local_fs_buffer_size = settings.max_read_buffer_size; res.remote_fs_buffer_size = settings.max_read_buffer_size; res.direct_io_threshold = settings.min_bytes_to_use_direct_io; diff --git a/src/Interpreters/DatabaseCatalog.cpp b/src/Interpreters/DatabaseCatalog.cpp index 1ad3d0057cd..67fb256b1c9 100644 --- a/src/Interpreters/DatabaseCatalog.cpp +++ b/src/Interpreters/DatabaseCatalog.cpp @@ -19,6 +19,7 @@ #include #include #include +#include #include "config_core.h" @@ -32,6 +33,7 @@ # include #endif + namespace CurrentMetrics { extern const Metric TablesToDropQueueSize; @@ -255,6 +257,8 @@ DatabaseAndTable DatabaseCatalog::getTableImpl( ContextPtr context_, std::optional * exception) const { + checkStackSize(); + if (!table_id) { if (exception) diff --git a/src/Interpreters/ExpressionJIT.cpp b/src/Interpreters/ExpressionJIT.cpp index a179c4d8bf6..c37d4d5b6a2 100644 --- a/src/Interpreters/ExpressionJIT.cpp +++ b/src/Interpreters/ExpressionJIT.cpp @@ -113,14 +113,14 @@ public: const auto & null_map_column = nullable_column->getNullMapColumn(); auto nested_column_raw_data = nested_column.getRawData(); - __msan_unpoison(nested_column_raw_data.data, nested_column_raw_data.size); + __msan_unpoison(nested_column_raw_data.data(), nested_column_raw_data.size()); auto null_map_column_raw_data = null_map_column.getRawData(); - __msan_unpoison(null_map_column_raw_data.data, null_map_column_raw_data.size); + __msan_unpoison(null_map_column_raw_data.data(), null_map_column_raw_data.size()); } else { - __msan_unpoison(result_column->getRawData().data, result_column->getRawData().size); + __msan_unpoison(result_column->getRawData().data(), result_column->getRawData().size()); } #endif diff --git a/src/Interpreters/InterpreterInsertQuery.cpp b/src/Interpreters/InterpreterInsertQuery.cpp index 4c7823ddc4e..7b6066575ae 100644 --- a/src/Interpreters/InterpreterInsertQuery.cpp +++ b/src/Interpreters/InterpreterInsertQuery.cpp @@ -326,7 +326,7 @@ BlockIO InterpreterInsertQuery::execute() if (!query.table_function) getContext()->checkAccess(AccessType::INSERT, query.table_id, query_sample_block.getNames()); - if (query.select && settings.parallel_distributed_insert_select) + if (query.select && table->isRemote() && settings.parallel_distributed_insert_select) // Distributed INSERT SELECT distributed_pipeline = table->distributedWrite(query, getContext()); diff --git a/src/Interpreters/InterpreterSelectQuery.cpp b/src/Interpreters/InterpreterSelectQuery.cpp index 9b87e4dbbef..76f199d8e41 100644 --- a/src/Interpreters/InterpreterSelectQuery.cpp +++ b/src/Interpreters/InterpreterSelectQuery.cpp @@ -2198,7 +2198,7 @@ void InterpreterSelectQuery::executeFetchColumns(QueryProcessingStage::Enum proc query_info.syntax_analyzer_result); } } - else + else if (optimize_aggregation_in_order) { if (query_info.projection) { diff --git a/src/Interpreters/InterpreterSystemQuery.cpp b/src/Interpreters/InterpreterSystemQuery.cpp index d9310169ac7..43bb15b70bb 100644 --- a/src/Interpreters/InterpreterSystemQuery.cpp +++ b/src/Interpreters/InterpreterSystemQuery.cpp @@ -573,7 +573,9 @@ StoragePtr InterpreterSystemQuery::tryRestartReplica(const StorageID & replica, database->detachTable(system_context, replica.table_name); } + UUID uuid = table->getStorageID().uuid; table.reset(); + database->waitDetachedTableNotInUse(uuid); /// Attach actions /// getCreateTableQuery must return canonical CREATE query representation, there are no need for AST postprocessing diff --git a/src/Interpreters/JIT/compileFunction.cpp b/src/Interpreters/JIT/compileFunction.cpp index 9d2ab40bf76..353ab84674c 100644 --- a/src/Interpreters/JIT/compileFunction.cpp +++ b/src/Interpreters/JIT/compileFunction.cpp @@ -47,11 +47,11 @@ ColumnData getColumnData(const IColumn * column) if (const auto * nullable = typeid_cast(column)) { - result.null_data = nullable->getNullMapColumn().getRawData().data; + result.null_data = nullable->getNullMapColumn().getRawData().data(); column = & nullable->getNestedColumn(); } - result.data = column->getRawData().data; + result.data = column->getRawData().data(); return result; } diff --git a/src/Interpreters/MergeJoin.cpp b/src/Interpreters/MergeJoin.cpp index f1dcff70c4c..5a5a057aedc 100644 --- a/src/Interpreters/MergeJoin.cpp +++ b/src/Interpreters/MergeJoin.cpp @@ -735,7 +735,7 @@ void MergeJoin::joinBlock(Block & block, ExtraBlockPtr & not_processed) /// Back thread even with no data. We have some unfinished data in buffer. if (!not_processed && left_blocks_buffer) - not_processed = std::make_shared(NotProcessed{{}, 0, 0, 0}); + not_processed = std::make_shared(NotProcessed{{}, 0, 0, 0, 0}); if (needConditionJoinColumn()) block.erase(deriveTempName(mask_column_name_left, JoinTableSide::Left)); @@ -759,6 +759,7 @@ void MergeJoin::joinSortedBlock(Block & block, ExtraBlockPtr & not_processed) { auto & continuation = static_cast(*not_processed); left_cursor.nextN(continuation.left_position); + left_key_tail = continuation.left_key_tail; skip_right = continuation.right_position; starting_right_block = continuation.right_block; not_processed.reset(); @@ -778,7 +779,10 @@ void MergeJoin::joinSortedBlock(Block & block, ExtraBlockPtr & not_processed) if (intersection < 0) break; /// (left) ... (right) if (intersection > 0) + { + skip_right = 0; continue; /// (right) ... (left) + } } /// Use skip_right as ref. It would be updated in join. @@ -787,7 +791,7 @@ void MergeJoin::joinSortedBlock(Block & block, ExtraBlockPtr & not_processed) if (!leftJoin(left_cursor, block, right_block, left_columns, right_columns, left_key_tail)) { not_processed = extraBlock(block, std::move(left_columns), std::move(right_columns), - left_cursor.position(), skip_right, i); + left_cursor.position(), left_key_tail, skip_right, i); return; } } @@ -811,7 +815,10 @@ void MergeJoin::joinSortedBlock(Block & block, ExtraBlockPtr & not_processed) if (intersection < 0) break; /// (left) ... (right) if (intersection > 0) + { + skip_right = 0; continue; /// (right) ... (left) + } } /// Use skip_right as ref. It would be updated in join. @@ -822,7 +829,7 @@ void MergeJoin::joinSortedBlock(Block & block, ExtraBlockPtr & not_processed) if (!allInnerJoin(left_cursor, block, right_block, left_columns, right_columns, left_key_tail)) { not_processed = extraBlock(block, std::move(left_columns), std::move(right_columns), - left_cursor.position(), skip_right, i); + left_cursor.position(), left_key_tail, skip_right, i); return; } } @@ -884,7 +891,7 @@ bool MergeJoin::leftJoin(MergeJoinCursor & left_cursor, const Block & left_block { right_cursor.nextN(range.right_length); right_block_info.skip = right_cursor.position(); - left_cursor.nextN(range.left_length); + left_key_tail = range.left_length; return false; } } @@ -991,15 +998,15 @@ void MergeJoin::addRightColumns(Block & block, MutableColumns && right_columns) /// Split block into processed (result) and not processed. Not processed block would be joined next time. template ExtraBlockPtr MergeJoin::extraBlock(Block & processed, MutableColumns && left_columns, MutableColumns && right_columns, - size_t left_position [[maybe_unused]], size_t right_position [[maybe_unused]], - size_t right_block_number [[maybe_unused]]) + size_t left_position [[maybe_unused]], size_t left_key_tail [[maybe_unused]], + size_t right_position [[maybe_unused]], size_t right_block_number [[maybe_unused]]) { ExtraBlockPtr not_processed; if constexpr (is_all) { not_processed = std::make_shared( - NotProcessed{{processed.cloneEmpty()}, left_position, right_position, right_block_number}); + NotProcessed{{processed.cloneEmpty()}, left_position, left_key_tail, right_position, right_block_number}); not_processed->block.swap(processed); changeLeftColumns(processed, std::move(left_columns)); diff --git a/src/Interpreters/MergeJoin.h b/src/Interpreters/MergeJoin.h index 99bd7deffe2..3ea15d14240 100644 --- a/src/Interpreters/MergeJoin.h +++ b/src/Interpreters/MergeJoin.h @@ -45,6 +45,7 @@ private: struct NotProcessed : public ExtraBlock { size_t left_position; + size_t left_key_tail; size_t right_position; size_t right_block; }; @@ -123,7 +124,8 @@ private: template ExtraBlockPtr extraBlock(Block & processed, MutableColumns && left_columns, MutableColumns && right_columns, - size_t left_position, size_t right_position, size_t right_block_number); + size_t left_position, size_t left_key_tail, size_t right_position, + size_t right_block_number); void mergeRightBlocks(); diff --git a/src/Interpreters/SortedBlocksWriter.cpp b/src/Interpreters/SortedBlocksWriter.cpp index c2a6f513224..0acb056690f 100644 --- a/src/Interpreters/SortedBlocksWriter.cpp +++ b/src/Interpreters/SortedBlocksWriter.cpp @@ -7,27 +7,49 @@ #include #include #include +#include +namespace ProfileEvents +{ + extern const Event ExternalJoinWritePart; + extern const Event ExternalJoinMerge; + extern const Event ExternalJoinCompressedBytes; + extern const Event ExternalJoinUncompressedBytes; + extern const Event ExternalProcessingCompressedBytesTotal; + extern const Event ExternalProcessingUncompressedBytesTotal; +} + +namespace CurrentMetrics +{ + extern const Metric TemporaryFilesForJoin; +} + namespace DB { namespace { -std::unique_ptr flushToFile(const String & tmp_path, const Block & header, QueryPipelineBuilder pipeline, const String & codec) +TemporaryFileOnDiskHolder flushToFile(const DiskPtr & disk, const Block & header, QueryPipelineBuilder pipeline, const String & codec) { - auto tmp_file = createTemporaryFile(tmp_path); + auto tmp_file = std::make_unique(disk, CurrentMetrics::TemporaryFilesForJoin); + auto write_stat = TemporaryFileStream::write(tmp_file->getPath(), header, std::move(pipeline), codec); - TemporaryFileStream::write(tmp_file->path(), header, std::move(pipeline), codec); + ProfileEvents::increment(ProfileEvents::ExternalProcessingCompressedBytesTotal, write_stat.compressed_bytes); + ProfileEvents::increment(ProfileEvents::ExternalProcessingUncompressedBytesTotal, write_stat.uncompressed_bytes); + + ProfileEvents::increment(ProfileEvents::ExternalJoinCompressedBytes, write_stat.compressed_bytes); + ProfileEvents::increment(ProfileEvents::ExternalJoinUncompressedBytes, write_stat.uncompressed_bytes); + ProfileEvents::increment(ProfileEvents::ExternalJoinWritePart); return tmp_file; } -SortedBlocksWriter::SortedFiles flushToManyFiles(const String & tmp_path, const Block & header, QueryPipelineBuilder builder, +SortedBlocksWriter::SortedFiles flushToManyFiles(const DiskPtr & disk, const Block & header, QueryPipelineBuilder builder, const String & codec, std::function callback = [](const Block &){}) { - std::vector> files; + std::vector files; auto pipeline = QueryPipelineBuilder::getPipeline(std::move(builder)); PullingPipelineExecutor executor(pipeline); @@ -42,7 +64,7 @@ SortedBlocksWriter::SortedFiles flushToManyFiles(const String & tmp_path, const QueryPipelineBuilder one_block_pipeline; Chunk chunk(block.getColumns(), block.rows()); one_block_pipeline.init(Pipe(std::make_shared(block.cloneEmpty(), std::move(chunk)))); - auto tmp_file = flushToFile(tmp_path, header, std::move(one_block_pipeline), codec); + auto tmp_file = flushToFile(disk, header, std::move(one_block_pipeline), codec); files.emplace_back(std::move(tmp_file)); } @@ -116,8 +138,6 @@ void SortedBlocksWriter::insert(Block && block) SortedBlocksWriter::TmpFilePtr SortedBlocksWriter::flush(const BlocksList & blocks) const { - const std::string path = getPath(); - Pipes pipes; pipes.reserve(blocks.size()); for (const auto & block : blocks) @@ -142,7 +162,7 @@ SortedBlocksWriter::TmpFilePtr SortedBlocksWriter::flush(const BlocksList & bloc pipeline.addTransform(std::move(transform)); } - return flushToFile(path, sample_block, std::move(pipeline), codec); + return flushToFile(volume->getDisk(), sample_block, std::move(pipeline), codec); } SortedBlocksWriter::PremergedFiles SortedBlocksWriter::premerge() @@ -197,7 +217,7 @@ SortedBlocksWriter::PremergedFiles SortedBlocksWriter::premerge() pipeline.addTransform(std::move(transform)); } - new_files.emplace_back(flushToFile(getPath(), sample_block, std::move(pipeline), codec)); + new_files.emplace_back(flushToFile(volume->getDisk(), sample_block, std::move(pipeline), codec)); } } @@ -220,6 +240,7 @@ SortedBlocksWriter::SortedFiles SortedBlocksWriter::finishMerge(std::function 1) { + ProfileEvents::increment(ProfileEvents::ExternalJoinMerge); auto transform = std::make_shared( pipeline.getHeader(), pipeline.getNumStreams(), @@ -230,7 +251,7 @@ SortedBlocksWriter::SortedFiles SortedBlocksWriter::finishMerge(std::functiongetDisk(), sample_block, std::move(pipeline), codec, callback); } Pipe SortedBlocksWriter::streamFromFile(const TmpFilePtr & file) const @@ -238,11 +259,6 @@ Pipe SortedBlocksWriter::streamFromFile(const TmpFilePtr & file) const return Pipe(std::make_shared(file->path(), materializeBlock(sample_block))); } -String SortedBlocksWriter::getPath() const -{ - return volume->getDisk()->getPath(); -} - Block SortedBlocksBuffer::exchange(Block && block) { diff --git a/src/Interpreters/SortedBlocksWriter.h b/src/Interpreters/SortedBlocksWriter.h index 0262a274c68..db8ed860207 100644 --- a/src/Interpreters/SortedBlocksWriter.h +++ b/src/Interpreters/SortedBlocksWriter.h @@ -8,7 +8,7 @@ #include #include #include - +#include namespace DB { @@ -24,7 +24,7 @@ using VolumePtr = std::shared_ptr; struct SortedBlocksWriter { - using TmpFilePtr = std::unique_ptr; + using TmpFilePtr = TemporaryFileOnDiskHolder; using SortedFiles = std::vector; struct Blocks diff --git a/src/Interpreters/ThreadStatusExt.cpp b/src/Interpreters/ThreadStatusExt.cpp index 9ad3dc7c4a9..4b5d4f46b49 100644 --- a/src/Interpreters/ThreadStatusExt.cpp +++ b/src/Interpreters/ThreadStatusExt.cpp @@ -416,7 +416,7 @@ void ThreadStatus::detachQuery(bool exit_if_already_detached, bool thread_exits) LOG_TRACE(log, "Resetting nice"); if (0 != setpriority(PRIO_PROCESS, thread_id, 0)) - LOG_ERROR(log, "Cannot 'setpriority' back to zero: {}", errnoToString(ErrorCodes::CANNOT_SET_THREAD_PRIORITY, errno)); + LOG_ERROR(log, "Cannot 'setpriority' back to zero: {}", errnoToString()); os_thread_priority = 0; } diff --git a/src/Parsers/ASTColumnsTransformers.cpp b/src/Parsers/ASTColumnsTransformers.cpp index d90d1e747f4..71207724a89 100644 --- a/src/Parsers/ASTColumnsTransformers.cpp +++ b/src/Parsers/ASTColumnsTransformers.cpp @@ -51,7 +51,13 @@ void ASTColumnsApplyTransformer::formatImpl(const FormatSettings & settings, For settings.ostr << func_name; if (parameters) - parameters->formatImpl(settings, state, frame); + { + auto nested_frame = frame; + nested_frame.expression_list_prepend_whitespace = false; + settings.ostr << "("; + parameters->formatImpl(settings, state, nested_frame); + settings.ostr << ")"; + } } if (!column_name_prefix.empty()) diff --git a/src/Parsers/ASTFunction.cpp b/src/Parsers/ASTFunction.cpp index ac340eef987..63dc9f6b3ac 100644 --- a/src/Parsers/ASTFunction.cpp +++ b/src/Parsers/ASTFunction.cpp @@ -12,7 +12,6 @@ #include #include #include -#include #include #include diff --git a/src/Parsers/ASTQueryWithOutput.cpp b/src/Parsers/ASTQueryWithOutput.cpp index 95bcaaad416..6db011417a6 100644 --- a/src/Parsers/ASTQueryWithOutput.cpp +++ b/src/Parsers/ASTQueryWithOutput.cpp @@ -1,4 +1,5 @@ #include +#include namespace DB { @@ -40,7 +41,7 @@ void ASTQueryWithOutput::formatImpl(const FormatSettings & s, FormatState & stat format->formatImpl(s, state, frame); } - if (settings_ast) + if (settings_ast && assert_cast(settings_ast.get())->print_in_format) { s.ostr << (s.hilite ? hilite_keyword : "") << s.nl_or_ws << indent_str << "SETTINGS " << (s.hilite ? hilite_none : ""); settings_ast->formatImpl(s, state, frame); diff --git a/src/Parsers/ASTSelectQuery.cpp b/src/Parsers/ASTSelectQuery.cpp index 4e84e1d1e20..76849653b4e 100644 --- a/src/Parsers/ASTSelectQuery.cpp +++ b/src/Parsers/ASTSelectQuery.cpp @@ -192,7 +192,7 @@ void ASTSelectQuery::formatImpl(const FormatSettings & s, FormatState & state, F limitOffset()->formatImpl(s, state, frame); } - if (settings()) + if (settings() && assert_cast(settings().get())->print_in_format) { s.ostr << (s.hilite ? hilite_keyword : "") << s.nl_or_ws << indent_str << "SETTINGS " << (s.hilite ? hilite_none : ""); settings()->formatImpl(s, state, frame); diff --git a/src/Parsers/ASTSetQuery.h b/src/Parsers/ASTSetQuery.h index 4e3d9d227b6..2c79ea18359 100644 --- a/src/Parsers/ASTSetQuery.h +++ b/src/Parsers/ASTSetQuery.h @@ -14,6 +14,12 @@ class ASTSetQuery : public IAST public: bool is_standalone = true; /// If false, this AST is a part of another query, such as SELECT. + /// To support overriding certain settings in a **subquery**, we add a ASTSetQuery with Settings to all subqueries, containing + /// the list of all settings that affect them (specifically or globally to the whole query). + /// We use `print_in_format` to avoid printing these nodes when they were left unchanged from the parent copy + /// See more: https://github.com/ClickHouse/ClickHouse/issues/38895 + bool print_in_format = true; + SettingsChanges changes; NameToNameMap query_parameters; diff --git a/src/Parsers/ParserCreateQuery.cpp b/src/Parsers/ParserCreateQuery.cpp index 275f3bc75cc..08240abe8c6 100644 --- a/src/Parsers/ParserCreateQuery.cpp +++ b/src/Parsers/ParserCreateQuery.cpp @@ -31,6 +31,7 @@ namespace ErrorCodes namespace { + ASTPtr parseComment(IParser::Pos & pos, Expected & expected) { ParserKeyword s_comment("COMMENT"); @@ -41,8 +42,10 @@ ASTPtr parseComment(IParser::Pos & pos, Expected & expected) return comment; } + } + bool ParserNestedTable::parseImpl(Pos & pos, ASTPtr & node, Expected & expected) { ParserToken open(TokenType::OpeningRoundBracket); diff --git a/src/Parsers/ParserCreateQuery.h b/src/Parsers/ParserCreateQuery.h index 79da3defdac..f56e0a4c3a0 100644 --- a/src/Parsers/ParserCreateQuery.h +++ b/src/Parsers/ParserCreateQuery.h @@ -53,7 +53,8 @@ bool IParserNameTypePair::parseImpl(Pos & pos, ASTPtr & node, Expect NameParser name_parser; ParserDataType type_parser; - ASTPtr name, type; + ASTPtr name; + ASTPtr type; if (name_parser.parse(pos, name, expected) && type_parser.parse(pos, type, expected)) { diff --git a/src/Parsers/ParserQueryWithOutput.cpp b/src/Parsers/ParserQueryWithOutput.cpp index 6107bd2a5eb..163e71e3201 100644 --- a/src/Parsers/ParserQueryWithOutput.cpp +++ b/src/Parsers/ParserQueryWithOutput.cpp @@ -142,7 +142,9 @@ bool ParserQueryWithOutput::parseImpl(Pos & pos, ASTPtr & node, Expected & expec // Pass them manually, to apply in InterpreterSelectQuery::initSettings() if (query->as()) { - QueryWithOutputSettingsPushDownVisitor::Data data{query_with_output.settings_ast}; + auto settings = query_with_output.settings_ast->clone(); + assert_cast(settings.get())->print_in_format = false; + QueryWithOutputSettingsPushDownVisitor::Data data{settings}; QueryWithOutputSettingsPushDownVisitor(data).visit(query); } } diff --git a/src/Processors/Formats/ISchemaReader.cpp b/src/Processors/Formats/ISchemaReader.cpp index 4e3e7e4eeb8..c7d8b87ab77 100644 --- a/src/Processors/Formats/ISchemaReader.cpp +++ b/src/Processors/Formats/ISchemaReader.cpp @@ -89,15 +89,13 @@ void IIRowSchemaReader::setContext(ContextPtr & context) } IRowSchemaReader::IRowSchemaReader(ReadBuffer & in_, const FormatSettings & format_settings_) - : IIRowSchemaReader(in_, format_settings_) + : IIRowSchemaReader(in_, format_settings_), column_names(splitColumnNames(format_settings.column_names_for_schema_inference)) { - initColumnNames(format_settings.column_names_for_schema_inference); } IRowSchemaReader::IRowSchemaReader(ReadBuffer & in_, const FormatSettings & format_settings_, DataTypePtr default_type_) - : IIRowSchemaReader(in_, format_settings_, default_type_) + : IIRowSchemaReader(in_, format_settings_, default_type_), column_names(splitColumnNames(format_settings.column_names_for_schema_inference)) { - initColumnNames(format_settings.column_names_for_schema_inference); } IRowSchemaReader::IRowSchemaReader(ReadBuffer & in_, const FormatSettings & format_settings_, const DataTypes & default_types_) @@ -171,11 +169,12 @@ NamesAndTypesList IRowSchemaReader::readSchema() return result; } -void IRowSchemaReader::initColumnNames(const String & column_names_str) +Strings splitColumnNames(const String & column_names_str) { if (column_names_str.empty()) - return; + return {}; + Strings column_names; /// column_names_for_schema_inference is a string in format 'column1,column2,column3,...' boost::split(column_names, column_names_str, boost::is_any_of(",")); for (auto & column_name : column_names) @@ -184,6 +183,7 @@ void IRowSchemaReader::initColumnNames(const String & column_names_str) if (!col_name_trimmed.empty()) column_name = col_name_trimmed; } + return column_names; } DataTypePtr IRowSchemaReader::getDefaultType(size_t column) const diff --git a/src/Processors/Formats/ISchemaReader.h b/src/Processors/Formats/ISchemaReader.h index 5d1f1fdc029..503632fd2f8 100644 --- a/src/Processors/Formats/ISchemaReader.h +++ b/src/Processors/Formats/ISchemaReader.h @@ -136,4 +136,6 @@ void chooseResultColumnType( void checkResultColumnTypeAndAppend( NamesAndTypesList & result, DataTypePtr & type, const String & name, const DataTypePtr & default_type, size_t rows_read); +Strings splitColumnNames(const String & column_names_str); + } diff --git a/src/Processors/Formats/Impl/AvroRowInputFormat.cpp b/src/Processors/Formats/Impl/AvroRowInputFormat.cpp index 75a318ce372..9ec39223484 100644 --- a/src/Processors/Formats/Impl/AvroRowInputFormat.cpp +++ b/src/Processors/Formats/Impl/AvroRowInputFormat.cpp @@ -25,6 +25,7 @@ #include #include #include +#include #include #include @@ -572,6 +573,39 @@ AvroDeserializer::SkipFn AvroDeserializer::createSkipFn(avro::NodePtr root_node) } } +void AvroDeserializer::Action::deserializeNested(MutableColumns & columns, avro::Decoder & decoder, RowReadExtension & ext) const +{ + /// We should deserialize all nested columns together, because + /// in avro we have single row Array(Record) and we can + /// deserialize it once. + + std::vector arrays_offsets; + arrays_offsets.reserve(nested_column_indexes.size()); + std::vector nested_columns; + nested_columns.reserve(nested_column_indexes.size()); + for (size_t index : nested_column_indexes) + { + ColumnArray & column_array = assert_cast(*columns[index]); + arrays_offsets.push_back(&column_array.getOffsets()); + nested_columns.push_back(&column_array.getData()); + ext.read_columns[index] = true; + } + + size_t total = 0; + for (size_t n = decoder.arrayStart(); n != 0; n = decoder.arrayNext()) + { + total += n; + for (size_t i = 0; i < n; ++i) + { + for (size_t j = 0; j != nested_deserializers.size(); ++j) + nested_deserializers[j](*nested_columns[j], decoder); + } + } + + for (auto & offsets : arrays_offsets) + offsets->push_back(offsets->back() + total); +} + static inline std::string concatPath(const std::string & a, const std::string & b) { return a.empty() ? b : a + "." + b; @@ -631,6 +665,42 @@ AvroDeserializer::Action AvroDeserializer::createAction(const Block & header, co } return AvroDeserializer::Action::unionAction(branch_actions); } + else if (node->type() == avro::AVRO_ARRAY) + { + /// If header doesn't have column with current_path name and node is Array(Record), + /// check if we have a flattened Nested table with such name. + Names nested_names = Nested::getAllNestedColumnsForTable(header, current_path); + auto nested_avro_node = node->leafAt(0); + if (nested_names.empty() || nested_avro_node->type() != avro::AVRO_RECORD) + return AvroDeserializer::Action(createSkipFn(node)); + + /// Check that all nested columns are Arrays. + std::unordered_map nested_types; + for (const auto & name : nested_names) + { + auto type = header.getByName(name).type; + if (!isArray(type)) + return AvroDeserializer::Action(createSkipFn(node)); + nested_types[Nested::splitName(name).second] = assert_cast(type.get())->getNestedType(); + } + + /// Create nested deserializer for each nested column. + std::vector nested_deserializers; + std::vector nested_indexes; + for (size_t i = 0; i != nested_avro_node->leaves(); ++i) + { + const auto & name = nested_avro_node->nameAt(i); + if (!nested_types.contains(name)) + return AvroDeserializer::Action(createSkipFn(node)); + size_t nested_column_index = header.getPositionByName(Nested::concatenateName(current_path, name)); + column_found[nested_column_index] = true; + auto nested_deserializer = createDeserializeFn(nested_avro_node->leafAt(i), nested_types[name]); + nested_deserializers.emplace_back(nested_deserializer); + nested_indexes.push_back(nested_column_index); + } + + return AvroDeserializer::Action(nested_indexes, nested_deserializers); + } else { return AvroDeserializer::Action(createSkipFn(node)); diff --git a/src/Processors/Formats/Impl/AvroRowInputFormat.h b/src/Processors/Formats/Impl/AvroRowInputFormat.h index 13afa06b089..17203925f2f 100644 --- a/src/Processors/Formats/Impl/AvroRowInputFormat.h +++ b/src/Processors/Formats/Impl/AvroRowInputFormat.h @@ -37,13 +37,15 @@ public: private: using DeserializeFn = std::function; + using DeserializeNestedFn = std::function; + using SkipFn = std::function; DeserializeFn createDeserializeFn(avro::NodePtr root_node, DataTypePtr target_type); SkipFn createSkipFn(avro::NodePtr root_node); struct Action { - enum Type {Noop, Deserialize, Skip, Record, Union}; + enum Type {Noop, Deserialize, Skip, Record, Union, Nested}; Type type; /// Deserialize int target_column_idx; @@ -52,6 +54,9 @@ private: SkipFn skip_fn; /// Record | Union std::vector actions; + /// For flattened Nested column + std::vector nested_column_indexes; + std::vector nested_deserializers; Action() : type(Noop) {} @@ -65,6 +70,11 @@ private: : type(Skip) , skip_fn(skip_fn_) {} + Action(std::vector nested_column_indexes_, std::vector nested_deserializers_) + : type(Nested) + , nested_column_indexes(nested_column_indexes_) + , nested_deserializers(nested_deserializers_) {} + static Action recordAction(std::vector field_actions) { return Action(Type::Record, field_actions); } static Action unionAction(std::vector branch_actions) { return Action(Type::Union, branch_actions); } @@ -87,6 +97,9 @@ private: for (const auto & action : actions) action.execute(columns, decoder, ext); break; + case Nested: + deserializeNested(columns, decoder, ext); + break; case Union: auto index = decoder.decodeUnionIndex(); if (index >= actions.size()) @@ -101,6 +114,8 @@ private: Action(Type type_, std::vector actions_) : type(type_) , actions(actions_) {} + + void deserializeNested(MutableColumns & columns, avro::Decoder & decoder, RowReadExtension & ext) const; }; /// Populate actions by recursively traversing root schema diff --git a/src/Processors/Formats/Impl/CSVRowInputFormat.cpp b/src/Processors/Formats/Impl/CSVRowInputFormat.cpp index 8f5591e6aa8..55cdbb5467e 100644 --- a/src/Processors/Formats/Impl/CSVRowInputFormat.cpp +++ b/src/Processors/Formats/Impl/CSVRowInputFormat.cpp @@ -401,6 +401,13 @@ void registerCSVSchemaReader(FormatFactory & factory) { return std::make_shared(buf, with_names, with_types, settings); }); + factory.registerAdditionalInfoForSchemaCacheGetter(format_name, [with_names](const FormatSettings & settings) + { + String result = getAdditionalFormatInfoByEscapingRule(settings, FormatSettings::EscapingRule::CSV); + if (!with_names) + result += fmt::format(", column_names_for_schema_inference={}", settings.column_names_for_schema_inference); + return result; + }); }; registerWithNamesAndTypes("CSV", register_func); diff --git a/src/Processors/Formats/Impl/CapnProtoRowInputFormat.cpp b/src/Processors/Formats/Impl/CapnProtoRowInputFormat.cpp index e94ac27870f..e9ccfd70094 100644 --- a/src/Processors/Formats/Impl/CapnProtoRowInputFormat.cpp +++ b/src/Processors/Formats/Impl/CapnProtoRowInputFormat.cpp @@ -319,7 +319,7 @@ void registerInputFormatCapnProto(FormatFactory & factory) factory.markFormatSupportsSubsetOfColumns("CapnProto"); factory.registerFileExtension("capnp", "CapnProto"); factory.registerAdditionalInfoForSchemaCacheGetter( - "CapnProto", [](const FormatSettings & settings) { return "Format schema: " + settings.schema.format_schema; }); + "CapnProto", [](const FormatSettings & settings) { return fmt::format("format_schema={}", settings.schema.format_schema); }); } void registerCapnProtoSchemaReader(FormatFactory & factory) diff --git a/src/Processors/Formats/Impl/CustomSeparatedRowInputFormat.cpp b/src/Processors/Formats/Impl/CustomSeparatedRowInputFormat.cpp index e5397ca0757..61f79f7b4e1 100644 --- a/src/Processors/Formats/Impl/CustomSeparatedRowInputFormat.cpp +++ b/src/Processors/Formats/Impl/CustomSeparatedRowInputFormat.cpp @@ -353,6 +353,19 @@ void registerCustomSeparatedSchemaReader(FormatFactory & factory) { return std::make_shared(buf, with_names, with_types, ignore_spaces, settings); }); + factory.registerAdditionalInfoForSchemaCacheGetter(format_name, [](const FormatSettings & settings) + { + String result = getAdditionalFormatInfoByEscapingRule(settings, settings.custom.escaping_rule); + return result + fmt::format( + ", result_before_delimiter={}, row_before_delimiter={}, field_delimiter={}," + " row_after_delimiter={}, row_between_delimiter={}, result_after_delimiter={}", + settings.custom.result_before_delimiter, + settings.custom.row_before_delimiter, + settings.custom.field_delimiter, + settings.custom.row_after_delimiter, + settings.custom.row_between_delimiter, + settings.custom.result_after_delimiter); + }); }; registerWithNamesAndTypes(ignore_spaces ? "CustomSeparatedIgnoreSpaces" : "CustomSeparated", register_func); diff --git a/src/Processors/Formats/Impl/JSONColumnsBlockInputFormat.cpp b/src/Processors/Formats/Impl/JSONColumnsBlockInputFormat.cpp index 22264d01a57..ad80a0222df 100644 --- a/src/Processors/Formats/Impl/JSONColumnsBlockInputFormat.cpp +++ b/src/Processors/Formats/Impl/JSONColumnsBlockInputFormat.cpp @@ -1,6 +1,7 @@ #include #include #include +#include namespace DB { @@ -66,6 +67,10 @@ void registerJSONColumnsSchemaReader(FormatFactory & factory) return std::make_shared(buf, settings, std::make_unique(buf)); } ); + factory.registerAdditionalInfoForSchemaCacheGetter("JSONColumns", [](const FormatSettings & settings) + { + return getAdditionalFormatInfoByEscapingRule(settings, FormatSettings::EscapingRule::JSON); + }); } } diff --git a/src/Processors/Formats/Impl/JSONColumnsBlockInputFormatBase.cpp b/src/Processors/Formats/Impl/JSONColumnsBlockInputFormatBase.cpp index 7f9fbddd554..b6477ef58fa 100644 --- a/src/Processors/Formats/Impl/JSONColumnsBlockInputFormatBase.cpp +++ b/src/Processors/Formats/Impl/JSONColumnsBlockInputFormatBase.cpp @@ -1,4 +1,5 @@ #include +#include #include #include #include @@ -178,7 +179,10 @@ Chunk JSONColumnsBlockInputFormatBase::generate() JSONColumnsSchemaReaderBase::JSONColumnsSchemaReaderBase( ReadBuffer & in_, const FormatSettings & format_settings_, std::unique_ptr reader_) - : ISchemaReader(in_), format_settings(format_settings_), reader(std::move(reader_)) + : ISchemaReader(in_) + , format_settings(format_settings_) + , reader(std::move(reader_)) + , column_names_from_settings(splitColumnNames(format_settings_.column_names_for_schema_inference)) { } @@ -214,8 +218,15 @@ NamesAndTypesList JSONColumnsSchemaReaderBase::readSchema() do { auto column_name_opt = reader->readColumnStart(); - /// If format doesn't have named for columns, use default names 'c1', 'c2', ... - String column_name = column_name_opt.has_value() ? *column_name_opt : "c" + std::to_string(iteration + 1); + /// If format doesn't have names for columns, use names from setting column_names_for_schema_inference or default names 'c1', 'c2', ... + String column_name; + if (column_name_opt.has_value()) + column_name = *column_name_opt; + else if (iteration < column_names_from_settings.size()) + column_name = column_names_from_settings[iteration]; + else + column_name = "c" + std::to_string(iteration + 1); + /// Keep order of column names as it is in input data. if (!names_to_types.contains(column_name)) names_order.push_back(column_name); diff --git a/src/Processors/Formats/Impl/JSONColumnsBlockInputFormatBase.h b/src/Processors/Formats/Impl/JSONColumnsBlockInputFormatBase.h index 6769e60be22..308c8a59b92 100644 --- a/src/Processors/Formats/Impl/JSONColumnsBlockInputFormatBase.h +++ b/src/Processors/Formats/Impl/JSONColumnsBlockInputFormatBase.h @@ -87,6 +87,7 @@ private: const FormatSettings format_settings; std::unique_ptr reader; + Names column_names_from_settings; }; } diff --git a/src/Processors/Formats/Impl/JSONCompactColumnsBlockInputFormat.cpp b/src/Processors/Formats/Impl/JSONCompactColumnsBlockInputFormat.cpp index 5b26ee2677b..d4d4a51c05d 100644 --- a/src/Processors/Formats/Impl/JSONCompactColumnsBlockInputFormat.cpp +++ b/src/Processors/Formats/Impl/JSONCompactColumnsBlockInputFormat.cpp @@ -1,6 +1,7 @@ #include #include #include +#include namespace DB { @@ -60,6 +61,11 @@ void registerJSONCompactColumnsSchemaReader(FormatFactory & factory) return std::make_shared(buf, settings, std::make_unique(buf)); } ); + factory.registerAdditionalInfoForSchemaCacheGetter("JSONCompactColumns", [](const FormatSettings & settings) + { + auto result = getAdditionalFormatInfoByEscapingRule(settings, FormatSettings::EscapingRule::JSON); + return result + fmt::format(", column_names_for_schema_inference={}", settings.column_names_for_schema_inference); + }); } } diff --git a/src/Processors/Formats/Impl/JSONCompactEachRowRowInputFormat.cpp b/src/Processors/Formats/Impl/JSONCompactEachRowRowInputFormat.cpp index 8ea379beae5..8b0b4e35697 100644 --- a/src/Processors/Formats/Impl/JSONCompactEachRowRowInputFormat.cpp +++ b/src/Processors/Formats/Impl/JSONCompactEachRowRowInputFormat.cpp @@ -245,6 +245,11 @@ void registerJSONCompactEachRowSchemaReader(FormatFactory & factory) { return std::make_shared(buf, with_names, with_types, json_strings, settings); }); + factory.registerAdditionalInfoForSchemaCacheGetter(format_name, [](const FormatSettings & settings) + { + auto result = getAdditionalFormatInfoByEscapingRule(settings, FormatSettings::EscapingRule::JSON); + return result + fmt::format(", column_names_for_schema_inference={}", settings.column_names_for_schema_inference); + }); }; registerWithNamesAndTypes(json_strings ? "JSONCompactStringsEachRow" : "JSONCompactEachRow", register_func); } diff --git a/src/Processors/Formats/Impl/JSONEachRowRowInputFormat.cpp b/src/Processors/Formats/Impl/JSONEachRowRowInputFormat.cpp index 7cb62f9bf83..31289cadf1b 100644 --- a/src/Processors/Formats/Impl/JSONEachRowRowInputFormat.cpp +++ b/src/Processors/Formats/Impl/JSONEachRowRowInputFormat.cpp @@ -355,44 +355,26 @@ void JSONEachRowSchemaReader::transformTypesIfNeeded(DataTypePtr & type, DataTyp void registerInputFormatJSONEachRow(FormatFactory & factory) { - factory.registerInputFormat("JSONEachRow", []( - ReadBuffer & buf, - const Block & sample, - IRowInputFormat::Params params, - const FormatSettings & settings) + auto register_format = [&](const String & format_name, bool json_strings) { - return std::make_shared(buf, sample, std::move(params), settings, false); - }); + factory.registerInputFormat(format_name, [json_strings]( + ReadBuffer & buf, + const Block & sample, + IRowInputFormat::Params params, + const FormatSettings & settings) + { + return std::make_shared(buf, sample, std::move(params), settings, json_strings); + }); + }; - factory.registerInputFormat("JSONLines", []( - ReadBuffer & buf, - const Block & sample, - IRowInputFormat::Params params, - const FormatSettings & settings) - { - return std::make_shared(buf, sample, std::move(params), settings, false); - }); - - factory.registerInputFormat("NDJSON", []( - ReadBuffer & buf, - const Block & sample, - IRowInputFormat::Params params, - const FormatSettings & settings) - { - return std::make_shared(buf, sample, std::move(params), settings, false); - }); + register_format("JSONEachRow", false); + register_format("JSONLines", false); + register_format("NDJSON", false); factory.registerFileExtension("ndjson", "JSONEachRow"); factory.registerFileExtension("jsonl", "JSONEachRow"); - factory.registerInputFormat("JSONStringsEachRow", []( - ReadBuffer & buf, - const Block & sample, - IRowInputFormat::Params params, - const FormatSettings & settings) - { - return std::make_shared(buf, sample, std::move(params), settings, true); - }); + register_format("JSONStringsEachRow", true); factory.markFormatSupportsSubsetOfColumns("JSONEachRow"); factory.markFormatSupportsSubsetOfColumns("JSONLines"); @@ -418,25 +400,22 @@ void registerNonTrivialPrefixAndSuffixCheckerJSONEachRow(FormatFactory & factory void registerJSONEachRowSchemaReader(FormatFactory & factory) { - factory.registerSchemaReader("JSONEachRow", [](ReadBuffer & buf, const FormatSettings & settings) + auto register_schema_reader = [&](const String & format_name, bool json_strings) { - return std::make_unique(buf, false, settings); - }); + factory.registerSchemaReader(format_name, [json_strings](ReadBuffer & buf, const FormatSettings & settings) + { + return std::make_unique(buf, json_strings, settings); + }); + factory.registerAdditionalInfoForSchemaCacheGetter(format_name, [](const FormatSettings & settings) + { + return getAdditionalFormatInfoByEscapingRule(settings, FormatSettings::EscapingRule::JSON); + }); + }; - factory.registerSchemaReader("JSONStringsEachRow", [](ReadBuffer & buf, const FormatSettings & settings) - { - return std::make_unique(buf, true, settings); - }); - - factory.registerSchemaReader("JSONLines", [](ReadBuffer & buf, const FormatSettings & settings) - { - return std::make_unique(buf, false, settings); - }); - - factory.registerSchemaReader("NDJSON", [](ReadBuffer & buf, const FormatSettings & settings) - { - return std::make_unique(buf, false, settings); - }); + register_schema_reader("JSONEachRow", false); + register_schema_reader("JSONLines", false); + register_schema_reader("NDJSON", false); + register_schema_reader("JSONStringsEachRow", true); } } diff --git a/src/Processors/Formats/Impl/MsgPackRowInputFormat.cpp b/src/Processors/Formats/Impl/MsgPackRowInputFormat.cpp index 48502e7af3a..b3d237fecfd 100644 --- a/src/Processors/Formats/Impl/MsgPackRowInputFormat.cpp +++ b/src/Processors/Formats/Impl/MsgPackRowInputFormat.cpp @@ -539,6 +539,14 @@ void registerMsgPackSchemaReader(FormatFactory & factory) { return std::make_shared(buf, settings); }); + factory.registerAdditionalInfoForSchemaCacheGetter("MsgPack", [](const FormatSettings & settings) + { + return fmt::format( + "number_of_columns={}, schema_inference_hints={}, max_rows_to_read_for_schema_inference={}", + settings.msgpack.number_of_columns, + settings.schema_inference_hints, + settings.max_rows_to_read_for_schema_inference); + }); } } diff --git a/src/Processors/Formats/Impl/MySQLDumpRowInputFormat.cpp b/src/Processors/Formats/Impl/MySQLDumpRowInputFormat.cpp index 8c24f22ce13..beca7ad2552 100644 --- a/src/Processors/Formats/Impl/MySQLDumpRowInputFormat.cpp +++ b/src/Processors/Formats/Impl/MySQLDumpRowInputFormat.cpp @@ -452,9 +452,6 @@ void registerInputFormatMySQLDump(FormatFactory & factory) { return std::make_shared(buf, header, params, settings); }); - - factory.registerAdditionalInfoForSchemaCacheGetter( - "MySQLDump", [](const FormatSettings & settings) { return "Table name: " + settings.mysql_dump.table_name; }); } void registerMySQLSchemaReader(FormatFactory & factory) @@ -463,6 +460,12 @@ void registerMySQLSchemaReader(FormatFactory & factory) { return std::make_shared(buf, settings); }); + + factory.registerAdditionalInfoForSchemaCacheGetter("MySQLDump", [](const FormatSettings & settings) + { + auto result = getAdditionalFormatInfoByEscapingRule(settings, FormatSettings::EscapingRule::Quoted); + return result + fmt::format(", table_name={}", settings.mysql_dump.table_name); + }); } diff --git a/src/Processors/Formats/Impl/PrometheusTextOutputFormat.cpp b/src/Processors/Formats/Impl/PrometheusTextOutputFormat.cpp index a0af124edce..b7c5ef92328 100644 --- a/src/Processors/Formats/Impl/PrometheusTextOutputFormat.cpp +++ b/src/Processors/Formats/Impl/PrometheusTextOutputFormat.cpp @@ -1,6 +1,7 @@ #include #include +#include #include #include @@ -306,7 +307,10 @@ void PrometheusTextOutputFormat::write(const Columns & columns, size_t row_num) } if (pos.help.has_value() && !columns[*pos.help]->isNullAt(row_num) && current_metric.help.empty()) + { current_metric.help = getString(columns, row_num, *pos.help); + std::replace(current_metric.help.begin(), current_metric.help.end(), '\n', ' '); + } if (pos.type.has_value() && !columns[*pos.type]->isNullAt(row_num) && current_metric.type.empty()) current_metric.type = getString(columns, row_num, *pos.type); diff --git a/src/Processors/Formats/Impl/ProtobufListInputFormat.cpp b/src/Processors/Formats/Impl/ProtobufListInputFormat.cpp index 7af0d8a7094..73e8e7992d3 100644 --- a/src/Processors/Formats/Impl/ProtobufListInputFormat.cpp +++ b/src/Processors/Formats/Impl/ProtobufListInputFormat.cpp @@ -82,7 +82,7 @@ void registerInputFormatProtobufList(FormatFactory & factory) }); factory.markFormatSupportsSubsetOfColumns("ProtobufList"); factory.registerAdditionalInfoForSchemaCacheGetter( - "ProtobufList", [](const FormatSettings & settings) { return "Format schema: " + settings.schema.format_schema; }); + "ProtobufList", [](const FormatSettings & settings) { return fmt::format("format_schema={}", settings.schema.format_schema); }); } void registerProtobufListSchemaReader(FormatFactory & factory) diff --git a/src/Processors/Formats/Impl/ProtobufRowInputFormat.cpp b/src/Processors/Formats/Impl/ProtobufRowInputFormat.cpp index 5fd4bb79fa0..3046b005fa8 100644 --- a/src/Processors/Formats/Impl/ProtobufRowInputFormat.cpp +++ b/src/Processors/Formats/Impl/ProtobufRowInputFormat.cpp @@ -104,7 +104,7 @@ void registerProtobufSchemaReader(FormatFactory & factory) for (const auto & name : {"Protobuf", "ProtobufSingle"}) factory.registerAdditionalInfoForSchemaCacheGetter( - name, [](const FormatSettings & settings) { return "Format schema: " + settings.schema.format_schema; }); + name, [](const FormatSettings & settings) { return fmt::format("format_schema={}", settings.schema.format_schema); }); } } diff --git a/src/Processors/Formats/Impl/RegexpRowInputFormat.cpp b/src/Processors/Formats/Impl/RegexpRowInputFormat.cpp index c6150863bd4..aaa42fee00d 100644 --- a/src/Processors/Formats/Impl/RegexpRowInputFormat.cpp +++ b/src/Processors/Formats/Impl/RegexpRowInputFormat.cpp @@ -211,6 +211,11 @@ void registerRegexpSchemaReader(FormatFactory & factory) { return std::make_shared(buf, settings); }); + factory.registerAdditionalInfoForSchemaCacheGetter("Regexp", [](const FormatSettings & settings) + { + auto result = getAdditionalFormatInfoByEscapingRule(settings, settings.regexp.escaping_rule); + return result + fmt::format(", regexp={}", settings.regexp.regexp); + }); } } diff --git a/src/Processors/Formats/Impl/TSKVRowInputFormat.cpp b/src/Processors/Formats/Impl/TSKVRowInputFormat.cpp index 7393a1d6ce6..838aba72e3d 100644 --- a/src/Processors/Formats/Impl/TSKVRowInputFormat.cpp +++ b/src/Processors/Formats/Impl/TSKVRowInputFormat.cpp @@ -285,6 +285,10 @@ void registerTSKVSchemaReader(FormatFactory & factory) { return std::make_shared(buf, settings); }); + factory.registerAdditionalInfoForSchemaCacheGetter("TSKV", [](const FormatSettings & settings) + { + return getAdditionalFormatInfoByEscapingRule(settings, FormatSettings::EscapingRule::Escaped); + }); } } diff --git a/src/Processors/Formats/Impl/TabSeparatedRowInputFormat.cpp b/src/Processors/Formats/Impl/TabSeparatedRowInputFormat.cpp index a50302697e6..d983e5371b8 100644 --- a/src/Processors/Formats/Impl/TabSeparatedRowInputFormat.cpp +++ b/src/Processors/Formats/Impl/TabSeparatedRowInputFormat.cpp @@ -240,7 +240,7 @@ void TabSeparatedFormatReader::checkNullValueForNonNullable(DataTypePtr type) void TabSeparatedFormatReader::skipPrefixBeforeHeader() { - for (size_t i = 0; i != format_settings.csv.skip_first_lines; ++i) + for (size_t i = 0; i != format_settings.tsv.skip_first_lines; ++i) readRow(); } @@ -302,6 +302,14 @@ void registerTSVSchemaReader(FormatFactory & factory) { return std::make_shared(buf, with_names, with_types, is_raw, settings); }); + factory.registerAdditionalInfoForSchemaCacheGetter(format_name, [with_names, is_raw](const FormatSettings & settings) + { + String result = getAdditionalFormatInfoByEscapingRule( + settings, is_raw ? FormatSettings::EscapingRule::Raw : FormatSettings::EscapingRule::Escaped); + if (!with_names) + result += fmt::format(", column_names_for_schema_inference={}", settings.column_names_for_schema_inference); + return result; + }); }; registerWithNamesAndTypes(is_raw ? "TabSeparatedRaw" : "TabSeparated", register_func); diff --git a/src/Processors/Formats/Impl/TemplateRowInputFormat.cpp b/src/Processors/Formats/Impl/TemplateRowInputFormat.cpp index 6e8bba89d8c..785658c0fa2 100644 --- a/src/Processors/Formats/Impl/TemplateRowInputFormat.cpp +++ b/src/Processors/Formats/Impl/TemplateRowInputFormat.cpp @@ -566,13 +566,32 @@ void registerTemplateSchemaReader(FormatFactory & factory) { for (bool ignore_spaces : {false, true}) { - factory.registerSchemaReader(ignore_spaces ? "TemplateIgnoreSpaces" : "Template", [ignore_spaces](ReadBuffer & buf, const FormatSettings & settings) + String format_name = ignore_spaces ? "TemplateIgnoreSpaces" : "Template"; + factory.registerSchemaReader(format_name, [ignore_spaces](ReadBuffer & buf, const FormatSettings & settings) { size_t index = 0; auto idx_getter = [&](const String &) -> std::optional { return index++; }; auto row_format = fillRowFormat(settings, idx_getter, false); return std::make_shared(buf, ignore_spaces, fillResultSetFormat(settings), row_format, settings.template_settings.row_between_delimiter, settings); }); + factory.registerAdditionalInfoForSchemaCacheGetter(format_name, [](const FormatSettings & settings) + { + size_t index = 0; + auto idx_getter = [&](const String &) -> std::optional { return index++; }; + auto row_format = fillRowFormat(settings, idx_getter, false); + std::unordered_set visited_escaping_rules; + String result = fmt::format("row_format={}, resultset_format={}, row_between_delimiter={}", + settings.template_settings.row_format, + settings.template_settings.resultset_format, + settings.template_settings.row_between_delimiter); + for (auto escaping_rule : row_format.escaping_rules) + { + if (!visited_escaping_rules.contains(escaping_rule)) + result += ", " + getAdditionalFormatInfoByEscapingRule(settings, settings.regexp.escaping_rule); + visited_escaping_rules.insert(escaping_rule); + } + return result; + }); } } diff --git a/src/Processors/Formats/Impl/ValuesBlockInputFormat.cpp b/src/Processors/Formats/Impl/ValuesBlockInputFormat.cpp index 49b758b78c4..f3822a7665d 100644 --- a/src/Processors/Formats/Impl/ValuesBlockInputFormat.cpp +++ b/src/Processors/Formats/Impl/ValuesBlockInputFormat.cpp @@ -634,6 +634,10 @@ void registerValuesSchemaReader(FormatFactory & factory) { return std::make_shared(buf, settings); }); + factory.registerAdditionalInfoForSchemaCacheGetter("Values", [](const FormatSettings & settings) + { + return getAdditionalFormatInfoByEscapingRule(settings, FormatSettings::EscapingRule::Quoted); + }); } } diff --git a/src/Processors/Merges/Algorithms/tests/gtest_graphite.cpp b/src/Processors/Merges/Algorithms/tests/gtest_graphite.cpp index ca895c5ed2a..c767e1e50ea 100644 --- a/src/Processors/Merges/Algorithms/tests/gtest_graphite.cpp +++ b/src/Processors/Merges/Algorithms/tests/gtest_graphite.cpp @@ -12,6 +12,8 @@ #include #include #include +#include + using namespace DB; @@ -43,7 +45,7 @@ static ConfigProcessor::LoadedConfig loadConfigurationFromString(std::string & s int fd = mkstemp(tmp_file); if (fd == -1) { - throw std::runtime_error(strerror(errno)); + throw std::runtime_error(errnoToString()); } try { @@ -61,7 +63,7 @@ static ConfigProcessor::LoadedConfig loadConfigurationFromString(std::string & s { int err = errno; (void)remove(tmp_file); - throw std::runtime_error(strerror(err)); + throw std::runtime_error(errnoToString(err)); } ConfigProcessor::LoadedConfig config = loadConfiguration(config_path); (void)remove(tmp_file); diff --git a/src/Processors/Transforms/FilterTransform.cpp b/src/Processors/Transforms/FilterTransform.cpp index 5d75bdc483c..2f5f3d69cff 100644 --- a/src/Processors/Transforms/FilterTransform.cpp +++ b/src/Processors/Transforms/FilterTransform.cpp @@ -49,7 +49,8 @@ FilterTransform::FilterTransform( ExpressionActionsPtr expression_, String filter_column_name_, bool remove_filter_column_, - bool on_totals_) + bool on_totals_, + std::shared_ptr> rows_filtered_) : ISimpleTransform( header_, transformHeader(header_, expression_ ? &expression_->getActionsDAG() : nullptr, filter_column_name_, remove_filter_column_), @@ -58,6 +59,7 @@ FilterTransform::FilterTransform( , filter_column_name(std::move(filter_column_name_)) , remove_filter_column(remove_filter_column_) , on_totals(on_totals_) + , rows_filtered(rows_filtered_) { transformed_header = getInputPort().getHeader(); if (expression) @@ -100,6 +102,14 @@ void FilterTransform::removeFilterIfNeed(Chunk & chunk) const } void FilterTransform::transform(Chunk & chunk) +{ + auto chunk_rows_before = chunk.getNumRows(); + doTransform(chunk); + if (rows_filtered) + *rows_filtered += chunk_rows_before - chunk.getNumRows(); +} + +void FilterTransform::doTransform(Chunk & chunk) { size_t num_rows_before_filtration = chunk.getNumRows(); auto columns = chunk.detachColumns(); diff --git a/src/Processors/Transforms/FilterTransform.h b/src/Processors/Transforms/FilterTransform.h index be892414c96..bb72b72d619 100644 --- a/src/Processors/Transforms/FilterTransform.h +++ b/src/Processors/Transforms/FilterTransform.h @@ -20,7 +20,7 @@ class FilterTransform : public ISimpleTransform public: FilterTransform( const Block & header_, ExpressionActionsPtr expression_, String filter_column_name_, - bool remove_filter_column_, bool on_totals_ = false); + bool remove_filter_column_, bool on_totals_ = false, std::shared_ptr> rows_filtered_ = nullptr); static Block transformHeader( Block header, @@ -43,11 +43,14 @@ private: ConstantFilterDescription constant_filter_description; size_t filter_column_position = 0; + std::shared_ptr> rows_filtered; + /// Header after expression, but before removing filter column. Block transformed_header; bool are_prepared_sets_initialized = false; + void doTransform(Chunk & chunk); void removeFilterIfNeed(Chunk & chunk) const; }; diff --git a/src/Processors/Transforms/MergeSortingTransform.cpp b/src/Processors/Transforms/MergeSortingTransform.cpp index 7c0422584c9..422659788a7 100644 --- a/src/Processors/Transforms/MergeSortingTransform.cpp +++ b/src/Processors/Transforms/MergeSortingTransform.cpp @@ -2,6 +2,7 @@ #include #include #include +#include #include #include #include @@ -15,8 +16,16 @@ namespace ProfileEvents { extern const Event ExternalSortWritePart; extern const Event ExternalSortMerge; + extern const Event ExternalSortCompressedBytes; + extern const Event ExternalSortUncompressedBytes; + extern const Event ExternalProcessingCompressedBytesTotal; + extern const Event ExternalProcessingUncompressedBytesTotal; } +namespace CurrentMetrics +{ + extern const Metric TemporaryFilesForSort; +} namespace DB { @@ -50,9 +59,14 @@ public: { if (out_stream) { + out_stream->flush(); compressed_buf_out.next(); file_buf_out.next(); - LOG_INFO(log, "Done writing part of data into temporary file {}", path); + + auto stat = updateWriteStat(); + + LOG_INFO(log, "Done writing part of data into temporary file {}, compressed {}, uncompressed {} ", + path, ReadableSize(static_cast(stat.compressed_size)), ReadableSize(static_cast(stat.uncompressed_size))); out_stream.reset(); @@ -76,6 +90,24 @@ public: } private: + struct Stat + { + size_t compressed_size = 0; + size_t uncompressed_size = 0; + }; + + Stat updateWriteStat() + { + Stat res{compressed_buf_out.getCompressedBytes(), compressed_buf_out.getUncompressedBytes()}; + + ProfileEvents::increment(ProfileEvents::ExternalProcessingCompressedBytesTotal, res.compressed_size); + ProfileEvents::increment(ProfileEvents::ExternalProcessingUncompressedBytesTotal, res.uncompressed_size); + + ProfileEvents::increment(ProfileEvents::ExternalSortCompressedBytes, res.compressed_size); + ProfileEvents::increment(ProfileEvents::ExternalSortUncompressedBytes, res.uncompressed_size); + return res; + } + Poco::Logger * log; std::string path; WriteBufferFromFile file_buf_out; @@ -182,8 +214,7 @@ void MergeSortingTransform::consume(Chunk chunk) if (!reservation) throw Exception("Not enough space for external sort in temporary storage", ErrorCodes::NOT_ENOUGH_SPACE); - const std::string tmp_path(reservation->getDisk()->getPath()); - temporary_files.emplace_back(createTemporaryFile(tmp_path)); + temporary_files.emplace_back(std::make_unique(reservation->getDisk(), CurrentMetrics::TemporaryFilesForSort)); const std::string & path = temporary_files.back()->path(); merge_sorter @@ -236,7 +267,7 @@ void MergeSortingTransform::generate() else { ProfileEvents::increment(ProfileEvents::ExternalSortMerge); - LOG_INFO(log, "There are {} temporary sorted parts to merge.", temporary_files.size()); + LOG_INFO(log, "There are {} temporary sorted parts to merge", temporary_files.size()); processors.emplace_back(std::make_shared( header_without_constants, std::move(chunks), description, max_merged_block_size, limit)); diff --git a/src/Processors/Transforms/MergeSortingTransform.h b/src/Processors/Transforms/MergeSortingTransform.h index e118a2a655b..465193548e7 100644 --- a/src/Processors/Transforms/MergeSortingTransform.h +++ b/src/Processors/Transforms/MergeSortingTransform.h @@ -3,6 +3,7 @@ #include #include #include +#include #include @@ -55,7 +56,7 @@ private: bool remerge_is_useful = true; /// Everything below is for external sorting. - std::vector> temporary_files; + std::vector temporary_files; /// Merge all accumulated blocks to keep no more than limit rows. void remerge(); diff --git a/src/Server/CertificateReloader.cpp b/src/Server/CertificateReloader.cpp index c187b53a926..ba23414e9cf 100644 --- a/src/Server/CertificateReloader.cpp +++ b/src/Server/CertificateReloader.cpp @@ -23,12 +23,6 @@ int callSetCertificate(SSL * ssl, [[maybe_unused]] void * arg) } -namespace ErrorCodes -{ - extern const int CANNOT_STAT; -} - - /// This is callback for OpenSSL. It will be called on every connection to obtain a certificate and private key. int CertificateReloader::setCertificate(SSL * ssl) { @@ -118,7 +112,7 @@ bool CertificateReloader::File::changeIfModified(std::string new_path, Poco::Log if (ec) { LOG_ERROR(logger, "Cannot obtain modification time for {} file {}, skipping update. {}", - description, new_path, errnoToString(ErrorCodes::CANNOT_STAT, ec.value())); + description, new_path, errnoToString(ec.value())); return false; } diff --git a/src/Server/HTTP/HTTPServerRequest.cpp b/src/Server/HTTP/HTTPServerRequest.cpp index bb72c2a4010..3b8df07b772 100644 --- a/src/Server/HTTP/HTTPServerRequest.cpp +++ b/src/Server/HTTP/HTTPServerRequest.cpp @@ -46,7 +46,7 @@ HTTPServerRequest::HTTPServerRequest(ContextPtr context, HTTPServerResponse & re readRequest(*in); /// Try parse according to RFC7230 if (getChunkedTransferEncoding()) - stream = std::make_unique(std::move(in)); + stream = std::make_unique(std::move(in), context->getSettingsRef().http_max_chunk_size); else if (hasContentLength()) stream = std::make_unique(std::move(in), getContentLength(), false); else if (getMethod() != HTTPRequest::HTTP_GET && getMethod() != HTTPRequest::HTTP_HEAD && getMethod() != HTTPRequest::HTTP_DELETE) diff --git a/src/Server/HTTPHandlerFactory.cpp b/src/Server/HTTPHandlerFactory.cpp index 526b86a5c28..6659c9b8390 100644 --- a/src/Server/HTTPHandlerFactory.cpp +++ b/src/Server/HTTPHandlerFactory.cpp @@ -169,10 +169,20 @@ void addCommonDefaultHandlersFactory(HTTPRequestHandlerFactoryMain & factory, IS replicas_status_handler->allowGetAndHeadRequest(); factory.addHandler(replicas_status_handler); - auto web_ui_handler = std::make_shared>(server, "play.html"); - web_ui_handler->attachNonStrictPath("/play"); - web_ui_handler->allowGetAndHeadRequest(); - factory.addHandler(web_ui_handler); + auto play_handler = std::make_shared>(server); + play_handler->attachNonStrictPath("/play"); + play_handler->allowGetAndHeadRequest(); + factory.addHandler(play_handler); + + auto dashboard_handler = std::make_shared>(server); + dashboard_handler->attachNonStrictPath("/dashboard"); + dashboard_handler->allowGetAndHeadRequest(); + factory.addHandler(dashboard_handler); + + auto js_handler = std::make_shared>(server); + js_handler->attachNonStrictPath("/js/"); + js_handler->allowGetAndHeadRequest(); + factory.addHandler(js_handler); } void addDefaultHandlersFactory(HTTPRequestHandlerFactoryMain & factory, IServer & server, AsynchronousMetrics & async_metrics) diff --git a/src/Server/PrometheusMetricsWriter.cpp b/src/Server/PrometheusMetricsWriter.cpp index 30ae6f6fe42..9168eb5f24d 100644 --- a/src/Server/PrometheusMetricsWriter.cpp +++ b/src/Server/PrometheusMetricsWriter.cpp @@ -1,10 +1,10 @@ #include "PrometheusMetricsWriter.h" -#include - #include #include -#include +#include /// TODO: this library is harmful. +#include + namespace { @@ -33,6 +33,11 @@ bool replaceInvalidChars(std::string & metric_name) return !metric_name.empty(); } +void convertHelpToSingleLine(std::string & help) +{ + std::replace(help.begin(), help.end(), '\n', ' '); +} + } @@ -61,6 +66,8 @@ void PrometheusMetricsWriter::write(WriteBuffer & wb) const std::string metric_name{ProfileEvents::getName(static_cast(i))}; std::string metric_doc{ProfileEvents::getDocumentation(static_cast(i))}; + convertHelpToSingleLine(metric_doc); + if (!replaceInvalidChars(metric_name)) continue; std::string key{profile_events_prefix + metric_name}; @@ -80,6 +87,8 @@ void PrometheusMetricsWriter::write(WriteBuffer & wb) const std::string metric_name{CurrentMetrics::getName(static_cast(i))}; std::string metric_doc{CurrentMetrics::getDocumentation(static_cast(i))}; + convertHelpToSingleLine(metric_doc); + if (!replaceInvalidChars(metric_name)) continue; std::string key{current_metrics_prefix + metric_name}; @@ -115,6 +124,8 @@ void PrometheusMetricsWriter::write(WriteBuffer & wb) const std::string metric_name{CurrentStatusInfo::getName(static_cast(i))}; std::string metric_doc{CurrentStatusInfo::getDocumentation(static_cast(i))}; + convertHelpToSingleLine(metric_doc); + if (!replaceInvalidChars(metric_name)) continue; std::string key{current_status_prefix + metric_name}; diff --git a/src/Server/TCPHandler.cpp b/src/Server/TCPHandler.cpp index b268179ae93..1fc88168b35 100644 --- a/src/Server/TCPHandler.cpp +++ b/src/Server/TCPHandler.cpp @@ -428,8 +428,6 @@ void TCPHandler::runImpl() if (e.code() == ErrorCodes::UNKNOWN_PACKET_FROM_CLIENT) throw; - LOG_TEST(log, "Going to close connection due to exception: {}", e.message()); - /// If there is UNEXPECTED_PACKET_FROM_CLIENT emulate network_error /// to break the loop, but do not throw to send the exception to /// the client. @@ -439,6 +437,9 @@ void TCPHandler::runImpl() /// If a timeout occurred, try to inform client about it and close the session if (e.code() == ErrorCodes::SOCKET_TIMEOUT) network_error = true; + + if (network_error) + LOG_TEST(log, "Going to close connection due to exception: {}", e.message()); } catch (const Poco::Net::NetException & e) { diff --git a/src/Server/WebUIRequestHandler.cpp b/src/Server/WebUIRequestHandler.cpp index 50aa0be4778..3997e0f19b6 100644 --- a/src/Server/WebUIRequestHandler.cpp +++ b/src/Server/WebUIRequestHandler.cpp @@ -8,12 +8,14 @@ #include #include +#include + namespace DB { -WebUIRequestHandler::WebUIRequestHandler(IServer & server_, std::string resource_name_) - : server(server_), resource_name(std::move(resource_name_)) +WebUIRequestHandler::WebUIRequestHandler(IServer & server_) + : server(server_) { } @@ -28,8 +30,38 @@ void WebUIRequestHandler::handleRequest(HTTPServerRequest & request, HTTPServerR response.setChunkedTransferEncoding(true); setResponseDefaultHeaders(response, keep_alive_timeout); - response.setStatusAndReason(Poco::Net::HTTPResponse::HTTP_OK); - *response.send() << getResource(resource_name); + + if (request.getURI().starts_with("/play")) + { + response.setStatusAndReason(Poco::Net::HTTPResponse::HTTP_OK); + *response.send() << getResource("play.html"); + } + else if (request.getURI().starts_with("/dashboard")) + { + response.setStatusAndReason(Poco::Net::HTTPResponse::HTTP_OK); + + std::string html(getResource("dashboard.html")); + + /// Replace a link to external JavaScript file to embedded file. + /// This allows to open the HTML without running a server and to host it on server. + /// Note: we can embed the JavaScript file inline to the HTML, + /// but we don't do it to keep the "view-source" perfectly readable. + + static re2::RE2 uplot_url = R"(https://[^\s"'`]+u[Pp]lot[^\s"'`]*\.js)"; + RE2::Replace(&html, uplot_url, "/js/uplot.js"); + + *response.send() << html; + } + else if (request.getURI() == "/js/uplot.js") + { + response.setStatusAndReason(Poco::Net::HTTPResponse::HTTP_OK); + *response.send() << getResource("js/uplot.js"); + } + else + { + response.setStatusAndReason(Poco::Net::HTTPResponse::HTTP_NOT_FOUND); + *response.send() << "Not found.\n"; + } } } diff --git a/src/Server/WebUIRequestHandler.h b/src/Server/WebUIRequestHandler.h index 1c52b626091..09fe62d41c3 100644 --- a/src/Server/WebUIRequestHandler.h +++ b/src/Server/WebUIRequestHandler.h @@ -13,11 +13,10 @@ class WebUIRequestHandler : public HTTPRequestHandler { private: IServer & server; - std::string resource_name; + public: - WebUIRequestHandler(IServer & server_, std::string resource_name_); + WebUIRequestHandler(IServer & server_); void handleRequest(HTTPServerRequest & request, HTTPServerResponse & response) override; }; } - diff --git a/src/Storages/Cache/SchemaCache.cpp b/src/Storages/Cache/SchemaCache.cpp index 7bce1965f1a..22b6921f6c1 100644 --- a/src/Storages/Cache/SchemaCache.cpp +++ b/src/Storages/Cache/SchemaCache.cpp @@ -17,21 +17,21 @@ SchemaCache::SchemaCache(size_t max_elements_) : max_elements(max_elements_) { } -void SchemaCache::add(const String & key, const ColumnsDescription & columns) +void SchemaCache::add(const Key & key, const ColumnsDescription & columns) { std::lock_guard lock(mutex); addUnlocked(key, columns); } -void SchemaCache::addMany(const Strings & keys, const ColumnsDescription & columns) +void SchemaCache::addMany(const Keys & keys, const ColumnsDescription & columns) { std::lock_guard lock(mutex); for (const auto & key : keys) addUnlocked(key, columns); } -void SchemaCache::addUnlocked(const String & key, const ColumnsDescription & columns) +void SchemaCache::addUnlocked(const Key & key, const ColumnsDescription & columns) { /// Do nothing if this key is already in cache; if (data.contains(key)) @@ -54,7 +54,7 @@ void SchemaCache::checkOverflow() ProfileEvents::increment(ProfileEvents::SchemaInferenceCacheEvictions); } -std::optional SchemaCache::tryGet(const String & key, LastModificationTimeGetter get_last_mod_time) +std::optional SchemaCache::tryGet(const Key & key, LastModificationTimeGetter get_last_mod_time) { std::lock_guard lock(mutex); auto it = data.find(key); @@ -101,10 +101,10 @@ void SchemaCache::clear() queue.clear(); } -std::unordered_map SchemaCache::getAll() +std::unordered_map SchemaCache::getAll() { std::lock_guard lock(mutex); - std::unordered_map result; + std::unordered_map result; for (const auto & [key, value] : data) result[key] = value.schema_info; diff --git a/src/Storages/Cache/SchemaCache.h b/src/Storages/Cache/SchemaCache.h index 132fbc0a8cb..deec32df49e 100644 --- a/src/Storages/Cache/SchemaCache.h +++ b/src/Storages/Cache/SchemaCache.h @@ -23,6 +23,28 @@ class SchemaCache public: SchemaCache(size_t max_elements_); + struct Key + { + String source; + String format; + String additional_format_info; + + bool operator==(const Key & other) const + { + return source == other.source && format == other.format && additional_format_info == other.additional_format_info; + } + }; + + using Keys = std::vector; + + struct KeyHash + { + size_t operator()(const Key & key) const + { + return std::hash()(key.source + key.format + key.additional_format_info); + } + }; + struct SchemaInfo { ColumnsDescription columns; @@ -32,22 +54,22 @@ public: using LastModificationTimeGetter = std::function()>; /// Add new key with a schema - void add(const String & key, const ColumnsDescription & columns); + void add(const Key & key, const ColumnsDescription & columns); /// Add many keys with the same schema (usually used for globs) - void addMany(const Strings & keys, const ColumnsDescription & columns); + void addMany(const Keys & keys, const ColumnsDescription & columns); - std::optional tryGet(const String & key, LastModificationTimeGetter get_last_mod_time = {}); + std::optional tryGet(const Key & key, LastModificationTimeGetter get_last_mod_time = {}); void clear(); - std::unordered_map getAll(); + std::unordered_map getAll(); private: - void addUnlocked(const String & key, const ColumnsDescription & columns); + void addUnlocked(const Key & key, const ColumnsDescription & columns); void checkOverflow(); - using Queue = std::list; + using Queue = std::list; using QueueIterator = Queue::iterator; struct Cell @@ -57,7 +79,7 @@ private: }; Queue queue; - std::unordered_map data; + std::unordered_map data; size_t max_elements; std::mutex mutex; diff --git a/src/Storages/Distributed/DistributedSink.cpp b/src/Storages/Distributed/DistributedSink.cpp index 13c085d650b..8ea2954368e 100644 --- a/src/Storages/Distributed/DistributedSink.cpp +++ b/src/Storages/Distributed/DistributedSink.cpp @@ -766,7 +766,7 @@ void DistributedSink::writeToShard(const Block & block, const std::vector #include -#include #include #include + #if USE_KRB5 -#include -#endif // USE_KRB5 + #include +#endif + namespace DB { + namespace ErrorCodes { extern const int BAD_ARGUMENTS; @@ -26,10 +28,15 @@ namespace ErrorCodes #endif // USE_KRB5 } -const String HDFSBuilderWrapper::CONFIG_PREFIX = "hdfs"; -const String HDFS_URL_REGEXP = "^hdfs://[^/]*/.*"; +static constexpr std::string_view CONFIG_PREFIX = "hdfs"; +static constexpr std::string_view HDFS_URL_REGEXP = "^hdfs://[^/]*/.*"; + + +HDFSFileInfo::~HDFSFileInfo() +{ + hdfsFreeFileInfo(file_info, length); +} -std::once_flag init_libhdfs3_conf_flag; void HDFSBuilderWrapper::loadFromConfig(const Poco::Util::AbstractConfiguration & config, const String & prefix, bool isUser) @@ -111,23 +118,6 @@ HDFSBuilderWrapper createHDFSBuilder(const String & uri_str, const Poco::Util::A if (host.empty()) throw Exception("Illegal HDFS URI: " + uri.toString(), ErrorCodes::BAD_ARGUMENTS); - // Shall set env LIBHDFS3_CONF *before* HDFSBuilderWrapper construction. - std::call_once(init_libhdfs3_conf_flag, [&config]() - { - String libhdfs3_conf = config.getString(HDFSBuilderWrapper::CONFIG_PREFIX + ".libhdfs3_conf", ""); - if (!libhdfs3_conf.empty()) - { - if (std::filesystem::path{libhdfs3_conf}.is_relative() && !std::filesystem::exists(libhdfs3_conf)) - { - const String config_path = config.getString("config-file", "config.xml"); - const auto config_dir = std::filesystem::path{config_path}.remove_filename(); - if (std::filesystem::exists(config_dir / libhdfs3_conf)) - libhdfs3_conf = std::filesystem::absolute(config_dir / libhdfs3_conf); - } - setenv("LIBHDFS3_CONF", libhdfs3_conf.c_str(), 1); - } - }); - HDFSBuilderWrapper builder; if (builder.get() == nullptr) throw Exception("Unable to create builder to connect to HDFS: " + @@ -157,14 +147,14 @@ HDFSBuilderWrapper createHDFSBuilder(const String & uri_str, const Poco::Util::A hdfsBuilderSetNameNodePort(builder.get(), port); } - if (config.has(HDFSBuilderWrapper::CONFIG_PREFIX)) + if (config.has(std::string(CONFIG_PREFIX))) { - builder.loadFromConfig(config, HDFSBuilderWrapper::CONFIG_PREFIX); + builder.loadFromConfig(config, std::string(CONFIG_PREFIX)); } if (!user.empty()) { - String user_config_prefix = HDFSBuilderWrapper::CONFIG_PREFIX + "_" + user; + String user_config_prefix = std::string(CONFIG_PREFIX) + "_" + user; if (config.has(user_config_prefix)) { builder.loadFromConfig(config, user_config_prefix, true); @@ -208,7 +198,7 @@ String getNameNodeCluster(const String &hdfs_url) void checkHDFSURL(const String & url) { - if (!re2::RE2::FullMatch(url, HDFS_URL_REGEXP)) + if (!re2::RE2::FullMatch(url, std::string(HDFS_URL_REGEXP))) throw Exception(ErrorCodes::BAD_ARGUMENTS, "Bad hdfs url: {}. It should have structure 'hdfs://:/'", url); } diff --git a/src/Storages/HDFS/HDFSCommon.h b/src/Storages/HDFS/HDFSCommon.h index 9eb2dfd3e46..d33a0ac97b6 100644 --- a/src/Storages/HDFS/HDFSCommon.h +++ b/src/Storages/HDFS/HDFSCommon.h @@ -40,11 +40,7 @@ struct HDFSFileInfo HDFSFileInfo(HDFSFileInfo && other) = default; HDFSFileInfo & operator=(const HDFSFileInfo & other) = delete; HDFSFileInfo & operator=(HDFSFileInfo && other) = default; - - ~HDFSFileInfo() - { - hdfsFreeFileInfo(file_info, length); - } + ~HDFSFileInfo(); }; diff --git a/src/Storages/HDFS/StorageHDFS.cpp b/src/Storages/HDFS/StorageHDFS.cpp index c18726aa7c4..f93bc45d1a3 100644 --- a/src/Storages/HDFS/StorageHDFS.cpp +++ b/src/Storages/HDFS/StorageHDFS.cpp @@ -774,7 +774,7 @@ std::optional StorageHDFS::tryGetColumnsFromCache( }; String url = fs::path(uri_without_path) / path; - String cache_key = getKeyForSchemaCache(url, format_name, {}, ctx); + auto cache_key = getKeyForSchemaCache(url, format_name, {}, ctx); auto columns = schema_cache.tryGet(cache_key, get_last_mod_time); if (columns) return columns; @@ -794,7 +794,7 @@ void StorageHDFS::addColumnsToCache( Strings sources; sources.reserve(paths.size()); std::transform(paths.begin(), paths.end(), std::back_inserter(sources), [&](const String & path){ return fs::path(uri_without_path) / path; }); - Strings cache_keys = getKeysForSchemaCache(sources, format_name, {}, ctx); + auto cache_keys = getKeysForSchemaCache(sources, format_name, {}, ctx); schema_cache.addMany(cache_keys, columns); } diff --git a/src/Storages/HDFS/StorageHDFSCluster.cpp b/src/Storages/HDFS/StorageHDFSCluster.cpp index 200c8cb3320..47a6fbf5eaa 100644 --- a/src/Storages/HDFS/StorageHDFSCluster.cpp +++ b/src/Storages/HDFS/StorageHDFSCluster.cpp @@ -41,7 +41,7 @@ StorageHDFSCluster::StorageHDFSCluster( const ColumnsDescription & columns_, const ConstraintsDescription & constraints_, const String & compression_method_) - : IStorageCluster(table_id_) + : IStorage(table_id_) , cluster_name(cluster_name_) , uri(uri_) , format_name(format_name_) @@ -74,7 +74,13 @@ Pipe StorageHDFSCluster::read( size_t /*max_block_size*/, unsigned /*num_streams*/) { - createIteratorAndCallback(context); + auto cluster = context->getCluster(cluster_name)->getClusterWithReplicasAsShards(context->getSettingsRef()); + + auto iterator = std::make_shared(context, uri); + auto callback = std::make_shared([iterator]() mutable -> String + { + return iterator->next(); + }); /// Calculate the header. This is significant, because some columns could be thrown away in some cases like query with count(*) Block header = @@ -134,29 +140,6 @@ QueryProcessingStage::Enum StorageHDFSCluster::getQueryProcessingStage( } -void StorageHDFSCluster::createIteratorAndCallback(ContextPtr context) const -{ - cluster = context->getCluster(cluster_name)->getClusterWithReplicasAsShards(context->getSettingsRef()); - - iterator = std::make_shared(context, uri); - callback = std::make_shared([iter = this->iterator]() mutable -> String { return iter->next(); }); -} - - -RemoteQueryExecutor::Extension StorageHDFSCluster::getTaskIteratorExtension(ContextPtr context) const -{ - createIteratorAndCallback(context); - return RemoteQueryExecutor::Extension{.task_iterator = callback}; -} - - -ClusterPtr StorageHDFSCluster::getCluster(ContextPtr context) const -{ - createIteratorAndCallback(context); - return cluster; -} - - NamesAndTypesList StorageHDFSCluster::getVirtuals() const { return NamesAndTypesList{ diff --git a/src/Storages/HDFS/StorageHDFSCluster.h b/src/Storages/HDFS/StorageHDFSCluster.h index 64b5fa86e05..21ae73c11ea 100644 --- a/src/Storages/HDFS/StorageHDFSCluster.h +++ b/src/Storages/HDFS/StorageHDFSCluster.h @@ -9,7 +9,6 @@ #include #include -#include #include namespace DB @@ -17,7 +16,7 @@ namespace DB class Context; -class StorageHDFSCluster : public IStorageCluster +class StorageHDFSCluster : public IStorage { public: StorageHDFSCluster( @@ -40,20 +39,11 @@ public: NamesAndTypesList getVirtuals() const override; - ClusterPtr getCluster(ContextPtr context) const override; - RemoteQueryExecutor::Extension getTaskIteratorExtension(ContextPtr context) const override; - private: String cluster_name; String uri; String format_name; String compression_method; - - mutable ClusterPtr cluster; - mutable std::shared_ptr iterator; - mutable std::shared_ptr callback; - - void createIteratorAndCallback(ContextPtr context) const; }; diff --git a/src/Storages/IStorageCluster.h b/src/Storages/IStorageCluster.h deleted file mode 100644 index ecab7266153..00000000000 --- a/src/Storages/IStorageCluster.h +++ /dev/null @@ -1,28 +0,0 @@ -#pragma once - -#include -#include -#include - -namespace DB -{ - - -/** - * Base cluster for Storages used in table functions like s3Cluster and hdfsCluster - * Needed for code simplification around parallel_distributed_insert_select - */ -class IStorageCluster: public IStorage -{ -public: - - explicit IStorageCluster(const StorageID & table_id_) : IStorage(table_id_) {} - - virtual ClusterPtr getCluster(ContextPtr context) const = 0; - virtual RemoteQueryExecutor::Extension getTaskIteratorExtension(ContextPtr context) const = 0; - - bool isRemote() const override { return true; } -}; - - -} diff --git a/src/Storages/Kafka/StorageKafka.cpp b/src/Storages/Kafka/StorageKafka.cpp index 058cc5ff7de..b51457ba5d5 100644 --- a/src/Storages/Kafka/StorageKafka.cpp +++ b/src/Storages/Kafka/StorageKafka.cpp @@ -40,8 +40,6 @@ #include #include #include -#include - #include #include @@ -847,7 +845,7 @@ void registerStorageKafka(StorageFactory & factory) auto num_consumers = kafka_settings->kafka_num_consumers.value; auto max_consumers = std::max(getNumberOfPhysicalCPUCores(), 16); - if (num_consumers > max_consumers) + if (!args.getLocalContext()->getSettingsRef().kafka_disable_num_consumers_limit && num_consumers > max_consumers) { throw Exception(ErrorCodes::BAD_ARGUMENTS, "The number of consumers can not be bigger than {}. " "A single consumer can read any number of partitions. Extra consumers are relatively expensive, " diff --git a/src/Storages/MergeTree/DataPartStorageOnDisk.cpp b/src/Storages/MergeTree/DataPartStorageOnDisk.cpp index 1cd6fd0305b..0154fd6e281 100644 --- a/src/Storages/MergeTree/DataPartStorageOnDisk.cpp +++ b/src/Storages/MergeTree/DataPartStorageOnDisk.cpp @@ -415,7 +415,7 @@ std::string DataPartStorageOnDisk::getDiskName() const std::string DataPartStorageOnDisk::getDiskType() const { - return toString(volume->getDisk()->getType()); + return toString(volume->getDisk()->getDataSourceDescription().type); } bool DataPartStorageOnDisk::isStoredOnRemoteDisk() const diff --git a/src/Storages/MergeTree/DataPartsExchange.cpp b/src/Storages/MergeTree/DataPartsExchange.cpp index 5fe3ee4da28..02821de8629 100644 --- a/src/Storages/MergeTree/DataPartsExchange.cpp +++ b/src/Storages/MergeTree/DataPartsExchange.cpp @@ -458,11 +458,11 @@ MergeTreeData::MutableDataPartPtr Fetcher::fetchPart( Disks disks = data.getDisks(); for (const auto & data_disk : disks) if (data_disk->supportZeroCopyReplication()) - capability.push_back(toString(data_disk->getType())); + capability.push_back(toString(data_disk->getDataSourceDescription().type)); } else if (disk->supportZeroCopyReplication()) { - capability.push_back(toString(disk->getType())); + capability.push_back(toString(disk->getDataSourceDescription().type)); } } if (!capability.empty()) diff --git a/src/Storages/MergeTree/IMergeTreeDataPart.cpp b/src/Storages/MergeTree/IMergeTreeDataPart.cpp index 1c1871226ba..32c2c09a392 100644 --- a/src/Storages/MergeTree/IMergeTreeDataPart.cpp +++ b/src/Storages/MergeTree/IMergeTreeDataPart.cpp @@ -1231,9 +1231,9 @@ void IMergeTreeDataPart::assertHasVersionMetadata(MergeTreeTransaction * txn) co assert(!txn || data_part_storage->exists(TXN_VERSION_METADATA_FILE_NAME)); } -void IMergeTreeDataPart::storeVersionMetadata() const +void IMergeTreeDataPart::storeVersionMetadata(bool force) const { - if (!wasInvolvedInTransaction()) + if (!wasInvolvedInTransaction() && !force) return; LOG_TEST(storage.log, "Writing version for {} (creation: {}, removal {})", name, version.creation_tid, version.removal_tid); @@ -1285,8 +1285,6 @@ void IMergeTreeDataPart::loadVersionMetadata() const try { data_part_storage->loadVersionMetadata(version, storage.log); - - } catch (Exception & e) { diff --git a/src/Storages/MergeTree/IMergeTreeDataPart.h b/src/Storages/MergeTree/IMergeTreeDataPart.h index bdf42223d2f..1e23886fb21 100644 --- a/src/Storages/MergeTree/IMergeTreeDataPart.h +++ b/src/Storages/MergeTree/IMergeTreeDataPart.h @@ -411,7 +411,7 @@ public: void assertHasVersionMetadata(MergeTreeTransaction * txn) const; /// [Re]writes file with transactional metadata on disk - void storeVersionMetadata() const; + void storeVersionMetadata(bool force = false) const; /// Appends the corresponding CSN to file on disk (without fsync) void appendCSNToVersionMetadata(VersionMetadata::WhichCSN which_csn) const; diff --git a/src/Storages/MergeTree/IMergedBlockOutputStream.cpp b/src/Storages/MergeTree/IMergedBlockOutputStream.cpp index 31c6a635b18..9be49a9bba4 100644 --- a/src/Storages/MergeTree/IMergedBlockOutputStream.cpp +++ b/src/Storages/MergeTree/IMergedBlockOutputStream.cpp @@ -79,10 +79,18 @@ NameSet IMergedBlockOutputStream::removeEmptyColumnsFromPart( } /// Remove files on disk and checksums - for (const String & removed_file : remove_files) + for (auto itr = remove_files.begin(); itr != remove_files.end();) { - if (checksums.files.contains(removed_file)) - checksums.files.erase(removed_file); + if (checksums.files.contains(*itr)) + { + checksums.files.erase(*itr); + ++itr; + } + else /// If we have no file in checksums it doesn't exist on disk + { + LOG_TRACE(storage.log, "Files {} doesn't exist in checksums so it doesn't exist on disk, will not try to remove it", *itr); + itr = remove_files.erase(itr); + } } /// Remove columns from columns array diff --git a/src/Storages/MergeTree/MergeTask.cpp b/src/Storages/MergeTree/MergeTask.cpp index 81840f0cffb..c247d2d2476 100644 --- a/src/Storages/MergeTree/MergeTask.cpp +++ b/src/Storages/MergeTree/MergeTask.cpp @@ -427,6 +427,7 @@ bool MergeTask::VerticalMergeStage::prepareVerticalMergeForAllColumns() const return false; size_t sum_input_rows_exact = global_ctx->merge_list_element_ptr->rows_read; + size_t input_rows_filtered = *global_ctx->input_rows_filtered; global_ctx->merge_list_element_ptr->columns_written = global_ctx->merging_column_names.size(); global_ctx->merge_list_element_ptr->progress.store(ctx->column_sizes->keyColumnsWeight(), std::memory_order_relaxed); @@ -439,10 +440,11 @@ bool MergeTask::VerticalMergeStage::prepareVerticalMergeForAllColumns() const /// In special case, when there is only one source part, and no rows were skipped, we may have /// skipped writing rows_sources file. Otherwise rows_sources_count must be equal to the total /// number of input rows. - if ((rows_sources_count > 0 || global_ctx->future_part->parts.size() > 1) && sum_input_rows_exact != rows_sources_count) - throw Exception("Number of rows in source parts (" + toString(sum_input_rows_exact) - + ") differs from number of bytes written to rows_sources file (" + toString(rows_sources_count) - + "). It is a bug.", ErrorCodes::LOGICAL_ERROR); + if ((rows_sources_count > 0 || global_ctx->future_part->parts.size() > 1) && sum_input_rows_exact != rows_sources_count + input_rows_filtered) + throw Exception( + ErrorCodes::LOGICAL_ERROR, + "Number of rows in source parts ({}) excluding filtered rows ({}) differs from number of bytes written to rows_sources file ({}). It is a bug.", + sum_input_rows_exact, input_rows_filtered, rows_sources_count); ctx->rows_sources_read_buf = std::make_unique(ctx->tmp_disk->readFile(fileName(ctx->rows_sources_file->path()))); @@ -454,7 +456,6 @@ bool MergeTask::VerticalMergeStage::prepareVerticalMergeForAllColumns() const return false; } - void MergeTask::VerticalMergeStage::prepareVerticalMergeForOneColumn() const { const auto & [column_name, column_type] = *ctx->it_name_and_type; @@ -467,10 +468,17 @@ void MergeTask::VerticalMergeStage::prepareVerticalMergeForOneColumn() const Pipes pipes; for (size_t part_num = 0; part_num < global_ctx->future_part->parts.size(); ++part_num) { - auto column_part_source = std::make_shared( - *global_ctx->data, global_ctx->storage_snapshot, global_ctx->future_part->parts[part_num], column_names, ctx->read_with_direct_io, true); + Pipe pipe = createMergeTreeSequentialSource( + *global_ctx->data, + global_ctx->storage_snapshot, + global_ctx->future_part->parts[part_num], + column_names, + ctx->read_with_direct_io, + true, + false, + global_ctx->input_rows_filtered); - pipes.emplace_back(std::move(column_part_source)); + pipes.emplace_back(std::move(pipe)); } auto pipe = Pipe::unitePipes(std::move(pipes)); @@ -809,27 +817,15 @@ void MergeTask::ExecuteAndFinalizeHorizontalPart::createMergedStream() for (const auto & part : global_ctx->future_part->parts) { - auto columns = global_ctx->merging_column_names; - - /// The part might have some rows masked by lightweight deletes - const auto lightweight_delete_filter_column = LightweightDeleteDescription::FILTER_COLUMN.name; - const bool need_to_filter_deleted_rows = part->hasLightweightDelete(); - if (need_to_filter_deleted_rows) - columns.emplace_back(lightweight_delete_filter_column); - - auto input = std::make_unique( - *global_ctx->data, global_ctx->storage_snapshot, part, columns, ctx->read_with_direct_io, true); - - Pipe pipe(std::move(input)); - - /// Add filtering step that discards deleted rows - if (need_to_filter_deleted_rows) - { - pipe.addSimpleTransform([lightweight_delete_filter_column](const Block & header) - { - return std::make_shared(header, nullptr, lightweight_delete_filter_column, true); - }); - } + Pipe pipe = createMergeTreeSequentialSource( + *global_ctx->data, + global_ctx->storage_snapshot, + part, + global_ctx->merging_column_names, + ctx->read_with_direct_io, + true, + false, + global_ctx->input_rows_filtered); if (global_ctx->metadata_snapshot->hasSortingKey()) { diff --git a/src/Storages/MergeTree/MergeTask.h b/src/Storages/MergeTree/MergeTask.h index 2c338e03973..43aba602052 100644 --- a/src/Storages/MergeTree/MergeTask.h +++ b/src/Storages/MergeTree/MergeTask.h @@ -170,6 +170,8 @@ private: MergeTreeData::MutableDataPartPtr new_data_part{nullptr}; DataPartStorageBuilderPtr data_part_storage_builder; + /// If lightweight delete mask is present then some input rows are filtered out right after reading. + std::shared_ptr> input_rows_filtered{std::make_shared>(0)}; size_t rows_written{0}; UInt64 watch_prev_elapsed{0}; diff --git a/src/Storages/MergeTree/MergeTreeBaseSelectProcessor.cpp b/src/Storages/MergeTree/MergeTreeBaseSelectProcessor.cpp index 08142bd8dd1..e2cd797ab92 100644 --- a/src/Storages/MergeTree/MergeTreeBaseSelectProcessor.cpp +++ b/src/Storages/MergeTree/MergeTreeBaseSelectProcessor.cpp @@ -703,11 +703,11 @@ size_t MergeTreeBaseSelectProcessor::estimateMaxBatchSizeForHugeRanges() { /// This is an empirical number and it is so, /// because we have an adaptive granularity by default. - const size_t average_granule_size_bytes = 8UL * 1024 * 1024 * 10; // 10 MiB + const size_t average_granule_size_bytes = 1024 * 1024 * 10; // 10 MiB /// We want to have one RTT per one gigabyte of data read from disk /// this could be configurable. - const size_t max_size_for_one_request = 8UL * 1024 * 1024 * 1024; // 1 GiB + const size_t max_size_for_one_request = 1024 * 1024 * 1024; // 1 GiB size_t sum_average_marks_size = 0; /// getColumnSize is not fully implemented for compact parts diff --git a/src/Storages/MergeTree/MergeTreeData.cpp b/src/Storages/MergeTree/MergeTreeData.cpp index 7314eb84a2c..ee2cbf4fe8a 100644 --- a/src/Storages/MergeTree/MergeTreeData.cpp +++ b/src/Storages/MergeTree/MergeTreeData.cpp @@ -19,7 +19,6 @@ #include #include #include -#include #include #include #include @@ -52,7 +51,6 @@ #include #include #include -#include #include #include #include @@ -64,6 +62,7 @@ #include #include #include +#include #include #include #include @@ -1457,7 +1456,7 @@ void MergeTreeData::loadDataParts(bool skip_sanity_checks) throw Exception(ErrorCodes::LOGICAL_ERROR, "Part {} has invalid version metadata: {}", part->name, version.toString()); if (version_updated) - part->storeVersionMetadata(); + part->storeVersionMetadata(/* force */ true); /// Deactivate part if creation was not committed or if removal was. if (version.creation_csn == Tx::RolledBackCSN || version.removal_csn) diff --git a/src/Storages/MergeTree/MergeTreeDataMergerMutator.cpp b/src/Storages/MergeTree/MergeTreeDataMergerMutator.cpp index 11fd861882b..a649b2a59c6 100644 --- a/src/Storages/MergeTree/MergeTreeDataMergerMutator.cpp +++ b/src/Storages/MergeTree/MergeTreeDataMergerMutator.cpp @@ -1,6 +1,5 @@ #include "MergeTreeDataMergerMutator.h" -#include #include #include #include diff --git a/src/Storages/MergeTree/MergeTreeMarksLoader.cpp b/src/Storages/MergeTree/MergeTreeMarksLoader.cpp index b5cf3431bea..ce26a86f0c0 100644 --- a/src/Storages/MergeTree/MergeTreeMarksLoader.cpp +++ b/src/Storages/MergeTree/MergeTreeMarksLoader.cpp @@ -42,8 +42,7 @@ const MarkInCompressedFile & MergeTreeMarksLoader::getMark(size_t row_index, siz #ifndef NDEBUG if (column_index >= columns_in_mark) - throw Exception("Column index: " + toString(column_index) - + " is out of range [0, " + toString(columns_in_mark) + ")", ErrorCodes::LOGICAL_ERROR); + throw Exception(ErrorCodes::LOGICAL_ERROR, "Column index: {} is out of range [0, {})", column_index, columns_in_mark); #endif return (*marks)[row_index * columns_in_mark + column_index]; @@ -74,8 +73,9 @@ MarkCache::MappedPtr MergeTreeMarksLoader::loadMarksImpl() buffer->readStrict(reinterpret_cast(res->data()), file_size); if (!buffer->eof()) - throw Exception("Cannot read all marks from file " + mrk_path + ", eof: " + std::to_string(buffer->eof()) - + ", buffer size: " + std::to_string(buffer->buffer().size()) + ", file size: " + std::to_string(file_size), ErrorCodes::CANNOT_READ_ALL_DATA); + throw Exception(ErrorCodes::CANNOT_READ_ALL_DATA, + "Cannot read all marks from file {}, is eof: {}, buffer size: {}, file size: {}", + mrk_path, buffer->eof(), buffer->buffer().size(), file_size); } else { @@ -89,7 +89,7 @@ MarkCache::MappedPtr MergeTreeMarksLoader::loadMarksImpl() } if (i * mark_size != file_size) - throw Exception("Cannot read all marks from file " + mrk_path, ErrorCodes::CANNOT_READ_ALL_DATA); + throw Exception(ErrorCodes::CANNOT_READ_ALL_DATA, "Cannot read all marks from file {}", mrk_path); } res->protect(); return res; @@ -116,7 +116,7 @@ void MergeTreeMarksLoader::loadMarks() marks = loadMarksImpl(); if (!marks) - throw Exception("Failed to load marks: " + std::string(fs::path(data_part_storage->getFullPath()) / mrk_path), ErrorCodes::LOGICAL_ERROR); + throw Exception(ErrorCodes::LOGICAL_ERROR, "Failed to load marks: {}", String(fs::path(data_part_storage->getFullPath()) / mrk_path)); } } diff --git a/src/Storages/MergeTree/MergeTreeMarksLoader.h b/src/Storages/MergeTree/MergeTreeMarksLoader.h index b176571b6df..3a1d3dc2c1b 100644 --- a/src/Storages/MergeTree/MergeTreeMarksLoader.h +++ b/src/Storages/MergeTree/MergeTreeMarksLoader.h @@ -25,8 +25,6 @@ public: const MarkInCompressedFile & getMark(size_t row_index, size_t column_index = 0); - bool initialized() const { return marks != nullptr; } - private: DataPartStoragePtr data_part_storage; MarkCache * mark_cache = nullptr; diff --git a/src/Storages/MergeTree/MergeTreeReaderStream.h b/src/Storages/MergeTree/MergeTreeReaderStream.h index dc2d99144ce..f5a8ebadcba 100644 --- a/src/Storages/MergeTree/MergeTreeReaderStream.h +++ b/src/Storages/MergeTree/MergeTreeReaderStream.h @@ -76,4 +76,5 @@ private: MergeTreeMarksLoader marks_loader; }; + } diff --git a/src/Storages/MergeTree/MergeTreeSequentialSource.cpp b/src/Storages/MergeTree/MergeTreeSequentialSource.cpp index c86acf11547..5b9eceece51 100644 --- a/src/Storages/MergeTree/MergeTreeSequentialSource.cpp +++ b/src/Storages/MergeTree/MergeTreeSequentialSource.cpp @@ -1,5 +1,7 @@ #include #include +#include +#include #include namespace DB @@ -9,6 +11,65 @@ namespace ErrorCodes extern const int MEMORY_LIMIT_EXCEEDED; } + +/// Lightweight (in terms of logic) stream for reading single part from MergeTree +/// NOTE: +/// It doesn't filter out rows that are deleted with lightweight deletes. +/// Use createMergeTreeSequentialSource filter out those rows. +class MergeTreeSequentialSource : public ISource +{ +public: + MergeTreeSequentialSource( + const MergeTreeData & storage_, + const StorageSnapshotPtr & storage_snapshot_, + MergeTreeData::DataPartPtr data_part_, + Names columns_to_read_, + bool read_with_direct_io_, + bool take_column_types_from_storage, + bool quiet = false); + + ~MergeTreeSequentialSource() override; + + String getName() const override { return "MergeTreeSequentialSource"; } + + size_t getCurrentMark() const { return current_mark; } + + size_t getCurrentRow() const { return current_row; } + +protected: + Chunk generate() override; + +private: + + const MergeTreeData & storage; + StorageSnapshotPtr storage_snapshot; + + /// Data part will not be removed if the pointer owns it + MergeTreeData::DataPartPtr data_part; + + /// Columns we have to read (each Block from read will contain them) + Names columns_to_read; + + /// Should read using direct IO + bool read_with_direct_io; + + Poco::Logger * log = &Poco::Logger::get("MergeTreeSequentialSource"); + + std::shared_ptr mark_cache; + using MergeTreeReaderPtr = std::unique_ptr; + MergeTreeReaderPtr reader; + + /// current mark at which we stop reading + size_t current_mark = 0; + + /// current row at which we stop reading + size_t current_row = 0; + + /// Closes readers and unlock part locks + void finish(); +}; + + MergeTreeSequentialSource::MergeTreeSequentialSource( const MergeTreeData & storage_, const StorageSnapshotPtr & storage_snapshot_, @@ -145,4 +206,39 @@ void MergeTreeSequentialSource::finish() MergeTreeSequentialSource::~MergeTreeSequentialSource() = default; + +Pipe createMergeTreeSequentialSource( + const MergeTreeData & storage, + const StorageSnapshotPtr & storage_snapshot, + MergeTreeData::DataPartPtr data_part, + Names columns_to_read, + bool read_with_direct_io, + bool take_column_types_from_storage, + bool quiet, + std::shared_ptr> filtered_rows_count) +{ + /// The part might have some rows masked by lightweight deletes + const bool need_to_filter_deleted_rows = data_part->hasLightweightDelete(); + auto columns = columns_to_read; + if (need_to_filter_deleted_rows) + columns.emplace_back(LightweightDeleteDescription::FILTER_COLUMN.name); + + auto column_part_source = std::make_shared( + storage, storage_snapshot, data_part, columns, read_with_direct_io, take_column_types_from_storage, quiet); + + Pipe pipe(std::move(column_part_source)); + + /// Add filtering step that discards deleted rows + if (need_to_filter_deleted_rows) + { + pipe.addSimpleTransform([filtered_rows_count](const Block & header) + { + return std::make_shared( + header, nullptr, LightweightDeleteDescription::FILTER_COLUMN.name, true, false, filtered_rows_count); + }); + } + + return pipe; +} + } diff --git a/src/Storages/MergeTree/MergeTreeSequentialSource.h b/src/Storages/MergeTree/MergeTreeSequentialSource.h index a3e4f5fa856..c6c29f9d49a 100644 --- a/src/Storages/MergeTree/MergeTreeSequentialSource.h +++ b/src/Storages/MergeTree/MergeTreeSequentialSource.h @@ -8,58 +8,16 @@ namespace DB { -/// Lightweight (in terms of logic) stream for reading single part from MergeTree -class MergeTreeSequentialSource : public ISource -{ -public: - MergeTreeSequentialSource( - const MergeTreeData & storage_, - const StorageSnapshotPtr & storage_snapshot_, - MergeTreeData::DataPartPtr data_part_, - Names columns_to_read_, - bool read_with_direct_io_, - bool take_column_types_from_storage, - bool quiet = false); - - ~MergeTreeSequentialSource() override; - - String getName() const override { return "MergeTreeSequentialSource"; } - - size_t getCurrentMark() const { return current_mark; } - - size_t getCurrentRow() const { return current_row; } - -protected: - Chunk generate() override; - -private: - - const MergeTreeData & storage; - StorageSnapshotPtr storage_snapshot; - - /// Data part will not be removed if the pointer owns it - MergeTreeData::DataPartPtr data_part; - - /// Columns we have to read (each Block from read will contain them) - Names columns_to_read; - - /// Should read using direct IO - bool read_with_direct_io; - - Poco::Logger * log = &Poco::Logger::get("MergeTreeSequentialSource"); - - std::shared_ptr mark_cache; - using MergeTreeReaderPtr = std::unique_ptr; - MergeTreeReaderPtr reader; - - /// current mark at which we stop reading - size_t current_mark = 0; - - /// current row at which we stop reading - size_t current_row = 0; - - /// Closes readers and unlock part locks - void finish(); -}; +/// Create stream for reading single part from MergeTree. +/// If the part has lightweight delete mask then the deleted rows are filtered out. +Pipe createMergeTreeSequentialSource( + const MergeTreeData & storage, + const StorageSnapshotPtr & storage_snapshot, + MergeTreeData::DataPartPtr data_part, + Names columns_to_read, + bool read_with_direct_io, + bool take_column_types_from_storage, + bool quiet, + std::shared_ptr> filtered_rows_count); } diff --git a/src/Storages/MergeTree/MergeTreeSettings.h b/src/Storages/MergeTree/MergeTreeSettings.h index 89081fe924f..07659b1c9dc 100644 --- a/src/Storages/MergeTree/MergeTreeSettings.h +++ b/src/Storages/MergeTree/MergeTreeSettings.h @@ -95,6 +95,7 @@ struct Settings; M(Seconds, replicated_fetches_http_receive_timeout, 0, "HTTP receive timeout for fetch part requests. Inherited from default profile `http_receive_timeout` if not set explicitly.", 0) \ M(Bool, replicated_can_become_leader, true, "If true, Replicated tables replicas on this node will try to acquire leadership.", 0) \ M(Seconds, zookeeper_session_expiration_check_period, 60, "ZooKeeper session expiration check period, in seconds.", 0) \ + M(Seconds, initialization_retry_period, 60, "Retry period for table initialization, in seconds.", 0) \ M(Bool, detach_old_local_parts_when_cloning_replica, true, "Do not remove old local parts when repairing lost replica.", 0) \ M(Bool, detach_not_byte_identical_parts, false, "Do not remove non byte-idential parts for ReplicatedMergeTree, instead detach them (maybe useful for further analysis).", 0) \ M(UInt64, max_replicated_fetches_network_bandwidth, 0, "The maximum speed of data exchange over the network in bytes per second for replicated fetches. Zero means unlimited.", 0) \ diff --git a/src/Storages/MergeTree/MergedBlockOutputStream.cpp b/src/Storages/MergeTree/MergedBlockOutputStream.cpp index 06c5aac8ae3..a5bc189e42f 100644 --- a/src/Storages/MergeTree/MergedBlockOutputStream.cpp +++ b/src/Storages/MergeTree/MergedBlockOutputStream.cpp @@ -94,7 +94,7 @@ void MergedBlockOutputStream::Finalizer::Impl::finish() { writer.finish(sync); - for (const auto & file_name: files_to_remove_after_finish) + for (const auto & file_name : files_to_remove_after_finish) data_part_storage_builder->removeFile(file_name); for (auto & file : written_files) diff --git a/src/Storages/MergeTree/ReplicatedMergeTreeAttachThread.cpp b/src/Storages/MergeTree/ReplicatedMergeTreeAttachThread.cpp new file mode 100644 index 00000000000..f3e33b6b38b --- /dev/null +++ b/src/Storages/MergeTree/ReplicatedMergeTreeAttachThread.cpp @@ -0,0 +1,172 @@ +#include +#include +#include + +namespace DB +{ + +namespace ErrorCodes +{ + extern const int SUPPORT_IS_DISABLED; +} + +ReplicatedMergeTreeAttachThread::ReplicatedMergeTreeAttachThread(StorageReplicatedMergeTree & storage_) + : storage(storage_) + , log_name(storage.getStorageID().getFullTableName() + " (ReplicatedMergeTreeAttachThread)") + , log(&Poco::Logger::get(log_name)) +{ + task = storage.getContext()->getSchedulePool().createTask(log_name, [this] { run(); }); + const auto storage_settings = storage.getSettings(); + retry_period = storage_settings->initialization_retry_period.totalSeconds(); +} + +ReplicatedMergeTreeAttachThread::~ReplicatedMergeTreeAttachThread() +{ + shutdown(); +} + +void ReplicatedMergeTreeAttachThread::shutdown() +{ + if (!shutdown_called.exchange(true)) + { + task->deactivate(); + LOG_INFO(log, "Attach thread finished"); + } +} + +void ReplicatedMergeTreeAttachThread::run() +{ + bool needs_retry{false}; + try + { + // we delay the first reconnect if the storage failed to connect to ZK initially + if (!first_try_done && !storage.current_zookeeper) + { + needs_retry = true; + } + else + { + runImpl(); + finalizeInitialization(); + } + } + catch (const Exception & e) + { + if (const auto * coordination_exception = dynamic_cast(&e)) + needs_retry = Coordination::isHardwareError(coordination_exception->code); + + if (needs_retry) + { + LOG_ERROR(log, "Initialization failed. Error: {}", e.message()); + } + else + { + LOG_ERROR(log, "Initialization failed, table will remain readonly. Error: {}", e.message()); + storage.initialization_done = true; + } + } + + if (!first_try_done.exchange(true)) + first_try_done.notify_one(); + + if (shutdown_called) + { + LOG_WARNING(log, "Shutdown called, cancelling initialization"); + return; + } + + if (needs_retry) + { + LOG_INFO(log, "Will retry initialization in {}s", retry_period); + task->scheduleAfter(retry_period * 1000); + } +} + +void ReplicatedMergeTreeAttachThread::checkHasReplicaMetadataInZooKeeper(const zkutil::ZooKeeperPtr & zookeeper, const String & replica_path) +{ + /// Since 20.4 and until 22.9 "/metadata" and "/metadata_version" nodes were created on replica startup. + /// Since 21.12 we could use "/metadata" to check if replica is dropped (see StorageReplicatedMergeTree::dropReplica), + /// but it did not work correctly, because "/metadata" node was re-created on server startup. + /// Since 22.9 we do not recreate these nodes and use "/host" to check if replica is dropped. + + String replica_metadata; + const bool replica_metadata_exists = zookeeper->tryGet(replica_path + "/metadata", replica_metadata); + if (!replica_metadata_exists || replica_metadata.empty() || !zookeeper->exists(replica_path + "/metadata_version")) + { + throw Exception(ErrorCodes::SUPPORT_IS_DISABLED, "Upgrade from 20.3 and older to 22.9 and newer " + "should be done through an intermediate version (failed to get metadata or metadata_version for {}," + "assuming it's because of upgrading)", replica_path); + } +} + +void ReplicatedMergeTreeAttachThread::runImpl() +{ + storage.setZooKeeper(); + + auto zookeeper = storage.getZooKeeper(); + const auto & zookeeper_path = storage.zookeeper_path; + bool metadata_exists = zookeeper->exists(zookeeper_path + "/metadata"); + if (!metadata_exists) + { + LOG_WARNING(log, "No metadata in ZooKeeper for {}: table will stay in readonly mode.", zookeeper_path); + storage.has_metadata_in_zookeeper = false; + return; + } + + auto metadata_snapshot = storage.getInMemoryMetadataPtr(); + + const auto & replica_path = storage.replica_path; + /// May it be ZK lost not the whole root, so the upper check passed, but only the /replicas/replica + /// folder. + bool replica_path_exists = zookeeper->exists(replica_path); + if (!replica_path_exists) + { + LOG_WARNING(log, "No metadata in ZooKeeper for {}: table will stay in readonly mode", replica_path); + storage.has_metadata_in_zookeeper = false; + return; + } + + bool host_node_exists = zookeeper->exists(replica_path + "/host"); + if (!host_node_exists) + { + LOG_WARNING(log, "Replica {} is dropped (but metadata is not completely removed from ZooKeeper), " + "table will stay in readonly mode", replica_path); + storage.has_metadata_in_zookeeper = false; + return; + } + + storage.has_metadata_in_zookeeper = true; + + checkHasReplicaMetadataInZooKeeper(zookeeper, replica_path); + + storage.checkTableStructure(replica_path, metadata_snapshot); + storage.checkParts(skip_sanity_checks); + + storage.metadata_version = parse(zookeeper->get(replica_path + "/metadata_version")); + + /// Temporary directories contain uninitialized results of Merges or Fetches (after forced restart), + /// don't allow to reinitialize them, delete each of them immediately. + storage.clearOldTemporaryDirectories(0, {"tmp_", "delete_tmp_", "tmp-fetch_"}); + storage.clearOldWriteAheadLogs(); + if (storage.getSettings()->merge_tree_enable_clear_old_broken_detached) + storage.clearOldBrokenPartsFromDetachedDirecory(); + + storage.createNewZooKeeperNodes(); + storage.syncPinnedPartUUIDs(); + + storage.createTableSharedID(); +}; + +void ReplicatedMergeTreeAttachThread::finalizeInitialization() TSA_NO_THREAD_SAFETY_ANALYSIS +{ + storage.startupImpl(); + storage.initialization_done = true; + LOG_INFO(log, "Table is initialized"); +} + +void ReplicatedMergeTreeAttachThread::setSkipSanityChecks(bool skip_sanity_checks_) +{ + skip_sanity_checks = skip_sanity_checks_; +} + +} diff --git a/src/Storages/MergeTree/ReplicatedMergeTreeAttachThread.h b/src/Storages/MergeTree/ReplicatedMergeTreeAttachThread.h new file mode 100644 index 00000000000..a491a06d6a5 --- /dev/null +++ b/src/Storages/MergeTree/ReplicatedMergeTreeAttachThread.h @@ -0,0 +1,54 @@ +#pragma once + +#include +#include +#include +#include + +namespace DB +{ + +class StorageReplicatedMergeTree; + +// Attach table to the existing data. +// Initialize the table by creating all the necessary nodes and do the required checks. +// Initialization is repeated if an operation fails because of a ZK request or connection loss. +class ReplicatedMergeTreeAttachThread +{ +public: + explicit ReplicatedMergeTreeAttachThread(StorageReplicatedMergeTree & storage_); + + ~ReplicatedMergeTreeAttachThread(); + + void start() { task->activateAndSchedule(); } + + void shutdown(); + + void waitFirstTry() { first_try_done.wait(false); } + + void setSkipSanityChecks(bool skip_sanity_checks_); + + static void checkHasReplicaMetadataInZooKeeper(const zkutil::ZooKeeperPtr & zookeeper, const String & replica_path); + +private: + StorageReplicatedMergeTree & storage; + BackgroundSchedulePool::TaskHolder task; + + std::string log_name; + Poco::Logger * log; + + std::atomic first_try_done{false}; + + std::atomic shutdown_called{false}; + + UInt64 retry_period; + + bool skip_sanity_checks{false}; + + void run(); + void runImpl(); + + void finalizeInitialization(); +}; + +} diff --git a/src/Storages/MergeTree/ReplicatedMergeTreeRestartingThread.cpp b/src/Storages/MergeTree/ReplicatedMergeTreeRestartingThread.cpp index 11f668bafbe..9d95189b611 100644 --- a/src/Storages/MergeTree/ReplicatedMergeTreeRestartingThread.cpp +++ b/src/Storages/MergeTree/ReplicatedMergeTreeRestartingThread.cpp @@ -103,7 +103,6 @@ void ReplicatedMergeTreeRestartingThread::run() } bool ReplicatedMergeTreeRestartingThread::runImpl() - { if (!storage.is_readonly && !storage.getZooKeeper()->expired()) return true; diff --git a/src/Storages/NATS/NATSConnection.cpp b/src/Storages/NATS/NATSConnection.cpp index 64beb9f2dff..d33138419e2 100644 --- a/src/Storages/NATS/NATSConnection.cpp +++ b/src/Storages/NATS/NATSConnection.cpp @@ -18,7 +18,7 @@ NATSConnectionManager::NATSConnectionManager(const NATSConfiguration & configura , log(log_) , event_handler(loop.getLoop(), log) { - const char * val = std::getenv("CLICKHOUSE_NATS_TLS_SECURE"); + const char * val = std::getenv("CLICKHOUSE_NATS_TLS_SECURE"); // NOLINT(concurrency-mt-unsafe) // this is safe on Linux glibc/Musl, but potentially not safe on other platforms std::string tls_secure = val == nullptr ? std::string("1") : std::string(val); if (tls_secure == "0") skip_verification = true; diff --git a/src/Storages/PostgreSQL/MaterializedPostgreSQLConsumer.cpp b/src/Storages/PostgreSQL/MaterializedPostgreSQLConsumer.cpp index a57328fb402..f0c5807f89c 100644 --- a/src/Storages/PostgreSQL/MaterializedPostgreSQLConsumer.cpp +++ b/src/Storages/PostgreSQL/MaterializedPostgreSQLConsumer.cpp @@ -443,6 +443,7 @@ void MaterializedPostgreSQLConsumer::processReplicationMessage(const char * repl pos += unused_flags_len + commit_lsn_len + transaction_end_lsn_len + transaction_commit_timestamp_len; final_lsn = current_lsn; + committed = true; break; } case 'R': // Relation @@ -593,6 +594,12 @@ void MaterializedPostgreSQLConsumer::syncTables() LOG_DEBUG(log, "Table sync end for {} tables, last lsn: {} = {}, (attempted lsn {})", tables_to_sync.size(), current_lsn, getLSNValue(current_lsn), getLSNValue(final_lsn)); + updateLsn(); +} + + +void MaterializedPostgreSQLConsumer::updateLsn() +{ try { auto tx = std::make_shared(connection->getRef()); @@ -614,6 +621,7 @@ String MaterializedPostgreSQLConsumer::advanceLSN(std::shared_ptr(); LOG_TRACE(log, "Advanced LSN up to: {}", getLSNValue(final_lsn)); + committed = false; return final_lsn; } @@ -771,7 +779,7 @@ bool MaterializedPostgreSQLConsumer::readFromReplicationSlot() try { - // LOG_DEBUG(log, "Current message: {}", (*row)[1]); + /// LOG_DEBUG(log, "Current message: {}", (*row)[1]); processReplicationMessage((*row)[1].c_str(), (*row)[1].size()); } catch (const Exception & e) @@ -790,6 +798,7 @@ bool MaterializedPostgreSQLConsumer::readFromReplicationSlot() } catch (const pqxx::broken_connection &) { + LOG_DEBUG(log, "Connection was broken"); connection->tryUpdateConnection(); return false; } @@ -823,7 +832,13 @@ bool MaterializedPostgreSQLConsumer::readFromReplicationSlot() } if (!tables_to_sync.empty()) + { syncTables(); + } + else if (committed) + { + updateLsn(); + } return true; } diff --git a/src/Storages/PostgreSQL/MaterializedPostgreSQLConsumer.h b/src/Storages/PostgreSQL/MaterializedPostgreSQLConsumer.h index 869d764f358..91bf5eeccde 100644 --- a/src/Storages/PostgreSQL/MaterializedPostgreSQLConsumer.h +++ b/src/Storages/PostgreSQL/MaterializedPostgreSQLConsumer.h @@ -94,6 +94,8 @@ private: void syncTables(); + void updateLsn(); + String advanceLSN(std::shared_ptr ntx); void processReplicationMessage(const char * replication_message, size_t size); @@ -136,6 +138,8 @@ private: ContextPtr context; const std::string replication_slot_name, publication_name; + bool committed = false; + std::shared_ptr connection; std::string current_lsn, final_lsn; diff --git a/src/Storages/PostgreSQL/PostgreSQLReplicationHandler.cpp b/src/Storages/PostgreSQL/PostgreSQLReplicationHandler.cpp index e0e2acc3436..b81e029acff 100644 --- a/src/Storages/PostgreSQL/PostgreSQLReplicationHandler.cpp +++ b/src/Storages/PostgreSQL/PostgreSQLReplicationHandler.cpp @@ -321,13 +321,13 @@ void PostgreSQLReplicationHandler::startSynchronization(bool throw_on_error) nested_storages, (is_materialized_postgresql_database ? postgres_database : postgres_database + '.' + tables_list)); + replication_handler_initialized = true; + consumer_task->activateAndSchedule(); cleanup_task->activateAndSchedule(); /// Do not rely anymore on saved storage pointers. materialized_storages.clear(); - - replication_handler_initialized = true; } diff --git a/src/Storages/RocksDB/EmbeddedRocksDBSink.cpp b/src/Storages/RocksDB/EmbeddedRocksDBSink.cpp index b42f2214d88..c39e70745fd 100644 --- a/src/Storages/RocksDB/EmbeddedRocksDBSink.cpp +++ b/src/Storages/RocksDB/EmbeddedRocksDBSink.cpp @@ -2,7 +2,7 @@ #include #include -#include +#include namespace DB diff --git a/src/Storages/RocksDB/StorageEmbeddedRocksDB.cpp b/src/Storages/RocksDB/StorageEmbeddedRocksDB.cpp index 24a2db33dcf..c9ddd9147b9 100644 --- a/src/Storages/RocksDB/StorageEmbeddedRocksDB.cpp +++ b/src/Storages/RocksDB/StorageEmbeddedRocksDB.cpp @@ -1,3 +1,4 @@ +#include #include #include @@ -21,9 +22,9 @@ #include #include -#include #include #include +#include #include #include @@ -164,10 +165,12 @@ StorageEmbeddedRocksDB::StorageEmbeddedRocksDB(const StorageID & table_id_, const StorageInMemoryMetadata & metadata_, bool attach, ContextPtr context_, - const String & primary_key_) + const String & primary_key_, + Int32 ttl_) : IStorage(table_id_) , WithContext(context_->getGlobalContext()) , primary_key{primary_key_} + , ttl(ttl_) { setInMemoryMetadata(metadata_); rocksdb_dir = context_->getPath() + relative_data_path_; @@ -193,7 +196,6 @@ void StorageEmbeddedRocksDB::initDB() { rocksdb::Status status; rocksdb::Options base; - rocksdb::DB * db; base.create_if_missing = true; base.compression = rocksdb::CompressionType::kZSTD; @@ -264,15 +266,28 @@ void StorageEmbeddedRocksDB::initDB() } } - status = rocksdb::DB::Open(merged, rocksdb_dir, &db); - - if (!status.ok()) + if (ttl > 0) { - throw Exception(ErrorCodes::ROCKSDB_ERROR, "Fail to open rocksdb path at: {}: {}", - rocksdb_dir, status.ToString()); + rocksdb::DBWithTTL * db; + status = rocksdb::DBWithTTL::Open(merged, rocksdb_dir, &db, ttl); + if (!status.ok()) + { + throw Exception(ErrorCodes::ROCKSDB_ERROR, "Failed to open rocksdb path at: {}: {}", + rocksdb_dir, status.ToString()); + } + rocksdb_ptr = std::unique_ptr(db); + } + else + { + rocksdb::DB * db; + status = rocksdb::DB::Open(merged, rocksdb_dir, &db); + if (!status.ok()) + { + throw Exception(ErrorCodes::ROCKSDB_ERROR, "Failed to open rocksdb path at: {}: {}", + rocksdb_dir, status.ToString()); + } + rocksdb_ptr = std::unique_ptr(db); } - /// It's ok just to wrap db with unique_ptr, from rdb documentation: "when you are done with a database, just delete the database object" - rocksdb_ptr = std::unique_ptr(db); } Pipe StorageEmbeddedRocksDB::read( @@ -335,10 +350,16 @@ SinkToStoragePtr StorageEmbeddedRocksDB::write( static StoragePtr create(const StorageFactory::Arguments & args) { // TODO custom RocksDBSettings, table function - if (!args.engine_args.empty()) - throw Exception( - "Engine " + args.engine_name + " doesn't support any arguments (" + toString(args.engine_args.size()) + " given)", - ErrorCodes::NUMBER_OF_ARGUMENTS_DOESNT_MATCH); + auto engine_args = args.engine_args; + if (engine_args.size() > 1) + { + throw Exception(ErrorCodes::NUMBER_OF_ARGUMENTS_DOESNT_MATCH, "Engine {} requires at most 1 parameter. ({} given). Correct usage: EmbeddedRocksDB([ttl])", + args.engine_name, engine_args.size()); + } + + Int32 ttl{0}; + if (!engine_args.empty()) + ttl = checkAndGetLiteralArgument(engine_args[0], "ttl"); StorageInMemoryMetadata metadata; metadata.setColumns(args.columns); @@ -353,7 +374,7 @@ static StoragePtr create(const StorageFactory::Arguments & args) { throw Exception("StorageEmbeddedRocksDB must require one column in primary key", ErrorCodes::BAD_ARGUMENTS); } - return std::make_shared(args.table_id, args.relative_data_path, metadata, args.attach, args.getContext(), primary_key_names[0]); + return std::make_shared(args.table_id, args.relative_data_path, metadata, args.attach, args.getContext(), primary_key_names[0], ttl); } std::shared_ptr StorageEmbeddedRocksDB::getRocksDBStatistics() const @@ -449,6 +470,7 @@ void registerStorageEmbeddedRocksDB(StorageFactory & factory) { StorageFactory::StorageFeatures features{ .supports_sort_order = true, + .supports_ttl = true, .supports_parallel_insert = true, }; diff --git a/src/Storages/RocksDB/StorageEmbeddedRocksDB.h b/src/Storages/RocksDB/StorageEmbeddedRocksDB.h index 55770516b3f..ab87eac3e66 100644 --- a/src/Storages/RocksDB/StorageEmbeddedRocksDB.h +++ b/src/Storages/RocksDB/StorageEmbeddedRocksDB.h @@ -32,7 +32,8 @@ public: const StorageInMemoryMetadata & metadata, bool attach, ContextPtr context_, - const String & primary_key_); + const String & primary_key_, + Int32 ttl_ = 0); std::string getName() const override { return "EmbeddedRocksDB"; } @@ -80,6 +81,7 @@ private: RocksDBPtr rocksdb_ptr; mutable std::shared_mutex rocksdb_ptr_mx; String rocksdb_dir; + Int32 ttl; void initDB(); }; diff --git a/src/Storages/StorageBuffer.cpp b/src/Storages/StorageBuffer.cpp index c14584a382b..3fc00a79bbe 100644 --- a/src/Storages/StorageBuffer.cpp +++ b/src/Storages/StorageBuffer.cpp @@ -102,6 +102,19 @@ std::unique_lock StorageBuffer::Buffer::lockImpl(bool read) const } +StoragePtr StorageBuffer::getDestinationTable() const +{ + if (!destination_id) + return {}; + + auto destination = DatabaseCatalog::instance().tryGetTable(destination_id, getContext()); + if (destination.get() == this) + throw Exception("Destination table is myself. Will lead to infinite loop.", ErrorCodes::INFINITE_LOOP); + + return destination; +} + + StorageBuffer::StorageBuffer( const StorageID & table_id_, const ColumnsDescription & columns_, @@ -134,6 +147,7 @@ StorageBuffer::StorageBuffer( } else storage_metadata.setColumns(columns_); + storage_metadata.setConstraints(constraints_); storage_metadata.setComment(comment); setInMemoryMetadata(storage_metadata); @@ -193,13 +207,8 @@ QueryProcessingStage::Enum StorageBuffer::getQueryProcessingStage( const StorageSnapshotPtr &, SelectQueryInfo & query_info) const { - if (destination_id) + if (auto destination = getDestinationTable()) { - auto destination = DatabaseCatalog::instance().getTable(destination_id, local_context); - - if (destination.get() == this) - throw Exception("Destination table is myself. Read will cause infinite loop.", ErrorCodes::INFINITE_LOOP); - /// TODO: Find a way to support projections for StorageBuffer query_info.ignore_projections = true; const auto & destination_metadata = destination->getInMemoryMetadataPtr(); @@ -221,13 +230,8 @@ void StorageBuffer::read( { const auto & metadata_snapshot = storage_snapshot->metadata; - if (destination_id) + if (auto destination = getDestinationTable()) { - auto destination = DatabaseCatalog::instance().getTable(destination_id, local_context); - - if (destination.get() == this) - throw Exception("Destination table is myself. Read will cause infinite loop.", ErrorCodes::INFINITE_LOOP); - auto destination_lock = destination->lockForShare(local_context->getCurrentQueryId(), local_context->getSettingsRef().lock_acquire_timeout); auto destination_metadata_snapshot = destination->getInMemoryMetadataPtr(); @@ -521,8 +525,8 @@ public: auto block = getHeader().cloneWithColumns(chunk.getColumns()); - StoragePtr destination; - if (storage.destination_id) + StoragePtr destination = storage.getDestinationTable(); + if (destination) { destination = DatabaseCatalog::instance().tryGetTable(storage.destination_id, storage.getContext()); if (destination.get() == &storage) @@ -537,7 +541,7 @@ public: /// If the block already exceeds the maximum limit, then we skip the buffer. if (rows > storage.max_thresholds.rows || bytes > storage.max_thresholds.bytes) { - if (storage.destination_id) + if (destination) { LOG_DEBUG(storage.log, "Writing block with {} rows, {} bytes directly.", rows, bytes); storage.writeBlockToDestination(block, destination); @@ -628,15 +632,9 @@ SinkToStoragePtr StorageBuffer::write(const ASTPtr & /*query*/, const StorageMet bool StorageBuffer::mayBenefitFromIndexForIn( const ASTPtr & left_in_operand, ContextPtr query_context, const StorageMetadataPtr & /*metadata_snapshot*/) const { - if (!destination_id) - return false; - - auto destination = DatabaseCatalog::instance().getTable(destination_id, query_context); - - if (destination.get() == this) - throw Exception("Destination table is myself. Read will cause infinite loop.", ErrorCodes::INFINITE_LOOP); - - return destination->mayBenefitFromIndexForIn(left_in_operand, query_context, destination->getInMemoryMetadataPtr()); + if (auto destination = getDestinationTable()) + return destination->mayBenefitFromIndexForIn(left_in_operand, query_context, destination->getInMemoryMetadataPtr()); + return false; } @@ -703,11 +701,8 @@ bool StorageBuffer::optimize( bool StorageBuffer::supportsPrewhere() const { - if (!destination_id) - return false; - auto dest = DatabaseCatalog::instance().tryGetTable(destination_id, getContext()); - if (dest && dest.get() != this) - return dest->supportsPrewhere(); + if (auto destination = getDestinationTable()) + return destination->supportsPrewhere(); return false; } @@ -834,7 +829,7 @@ bool StorageBuffer::flushBuffer(Buffer & buffer, bool check_thresholds, bool loc Stopwatch watch; try { - writeBlockToDestination(block_to_write, DatabaseCatalog::instance().tryGetTable(destination_id, getContext())); + writeBlockToDestination(block_to_write, getDestinationTable()); } catch (...) { @@ -1010,14 +1005,10 @@ void StorageBuffer::checkAlterIsPossible(const AlterCommands & commands, Context std::optional StorageBuffer::totalRows(const Settings & settings) const { std::optional underlying_rows; - auto underlying = DatabaseCatalog::instance().tryGetTable(destination_id, getContext()); + if (auto destination = getDestinationTable()) + underlying_rows = destination->totalRows(settings); - if (underlying) - underlying_rows = underlying->totalRows(settings); - if (!underlying_rows) - return underlying_rows; - - return total_writes.rows + *underlying_rows; + return total_writes.rows + underlying_rows.value_or(0); } std::optional StorageBuffer::totalBytes(const Settings & /*settings*/) const diff --git a/src/Storages/StorageBuffer.h b/src/Storages/StorageBuffer.h index 200b3fc1838..580742c0c84 100644 --- a/src/Storages/StorageBuffer.h +++ b/src/Storages/StorageBuffer.h @@ -169,6 +169,8 @@ private: void backgroundFlush(); void reschedule(); + StoragePtr getDestinationTable() const; + BackgroundSchedulePool & bg_pool; BackgroundSchedulePoolTaskHolder flush_handle; }; diff --git a/src/Storages/StorageDistributed.cpp b/src/Storages/StorageDistributed.cpp index cc55b2ae271..b3ea2cb9f5b 100644 --- a/src/Storages/StorageDistributed.cpp +++ b/src/Storages/StorageDistributed.cpp @@ -59,8 +59,6 @@ #include #include -#include - #include #include #include @@ -761,35 +759,55 @@ SinkToStoragePtr StorageDistributed::write(const ASTPtr &, const StorageMetadata } -std::optional StorageDistributed::distributedWriteBetweenDistributedTables(const StorageDistributed & src_distributed, const ASTInsertQuery & query, ContextPtr local_context) const +std::optional StorageDistributed::distributedWrite(const ASTInsertQuery & query, ContextPtr local_context) { - const auto & settings = local_context->getSettingsRef(); + QueryPipeline pipeline; + + const Settings & settings = local_context->getSettingsRef(); + if (settings.max_distributed_depth && local_context->getClientInfo().distributed_depth >= settings.max_distributed_depth) + throw Exception("Maximum distributed depth exceeded", ErrorCodes::TOO_LARGE_DISTRIBUTED_DEPTH); + + std::shared_ptr storage_src; + auto & select = query.select->as(); auto new_query = std::dynamic_pointer_cast(query.clone()); - - /// Unwrap view() function. - if (src_distributed.remote_table_function_ptr) + if (select.list_of_selects->children.size() == 1) { - const TableFunctionPtr src_table_function = - TableFunctionFactory::instance().get(src_distributed.remote_table_function_ptr, local_context); - const TableFunctionView * view_function = - assert_cast(src_table_function.get()); - new_query->select = view_function->getSelectQuery().clone(); - } - else - { - const auto select_with_union_query = std::make_shared(); - select_with_union_query->list_of_selects = std::make_shared(); + if (auto * select_query = select.list_of_selects->children.at(0)->as()) + { + JoinedTables joined_tables(Context::createCopy(local_context), *select_query); - auto * select = query.select->as().list_of_selects->children.at(0)->as(); - auto new_select_query = std::dynamic_pointer_cast(select->clone()); - select_with_union_query->list_of_selects->children.push_back(new_select_query); + if (joined_tables.tablesCount() == 1) + { + storage_src = std::dynamic_pointer_cast(joined_tables.getLeftTableStorage()); + if (storage_src) + { + /// Unwrap view() function. + if (storage_src->remote_table_function_ptr) + { + const TableFunctionPtr src_table_function = + TableFunctionFactory::instance().get(storage_src->remote_table_function_ptr, local_context); + const TableFunctionView * view_function = + assert_cast(src_table_function.get()); + new_query->select = view_function->getSelectQuery().clone(); + } + else + { + const auto select_with_union_query = std::make_shared(); + select_with_union_query->list_of_selects = std::make_shared(); - new_select_query->replaceDatabaseAndTable(src_distributed.getRemoteDatabaseName(), src_distributed.getRemoteTableName()); + auto new_select_query = std::dynamic_pointer_cast(select_query->clone()); + select_with_union_query->list_of_selects->children.push_back(new_select_query); - new_query->select = select_with_union_query; + new_select_query->replaceDatabaseAndTable(storage_src->getRemoteDatabaseName(), storage_src->getRemoteTableName()); + + new_query->select = select_with_union_query; + } + } + } + } } - const Cluster::AddressesWithFailover & src_addresses = src_distributed.getCluster()->getShardsAddresses(); + const Cluster::AddressesWithFailover & src_addresses = storage_src ? storage_src->getCluster()->getShardsAddresses() : Cluster::AddressesWithFailover{}; const Cluster::AddressesWithFailover & dst_addresses = getCluster()->getShardsAddresses(); /// Compare addresses instead of cluster name, to handle remote()/cluster(). /// (since for remote()/cluster() the getClusterName() is empty string) @@ -804,7 +822,7 @@ std::optional StorageDistributed::distributedWriteBetweenDistribu LOG_WARNING(log, "Parallel distributed INSERT SELECT is not possible " "(source cluster={} ({} addresses), destination cluster={} ({} addresses))", - src_distributed.getClusterName(), + storage_src ? storage_src->getClusterName() : "", src_addresses.size(), getClusterName(), dst_addresses.size()); @@ -831,7 +849,6 @@ std::optional StorageDistributed::distributedWriteBetweenDistribu new_query_str = buf.str(); } - QueryPipeline pipeline; ContextMutablePtr query_context = Context::createCopy(local_context); ++query_context->getClientInfo().distributed_depth; @@ -865,114 +882,6 @@ std::optional StorageDistributed::distributedWriteBetweenDistribu } -std::optional StorageDistributed::distributedWriteFromClusterStorage(const IStorageCluster & src_storage_cluster, const ASTInsertQuery & query, ContextPtr local_context) const -{ - const auto & settings = local_context->getSettingsRef(); - auto extension = src_storage_cluster.getTaskIteratorExtension(local_context); - - auto dst_cluster = getCluster(); - - auto new_query = std::dynamic_pointer_cast(query.clone()); - if (settings.parallel_distributed_insert_select == PARALLEL_DISTRIBUTED_INSERT_SELECT_ALL) - { - new_query->table_id = StorageID(getRemoteDatabaseName(), getRemoteTableName()); - /// Reset table function for INSERT INTO remote()/cluster() - new_query->table_function.reset(); - } - - String new_query_str; - { - WriteBufferFromOwnString buf; - IAST::FormatSettings ast_format_settings(buf, /*one_line*/ true); - ast_format_settings.always_quote_identifiers = true; - new_query->IAST::format(ast_format_settings); - new_query_str = buf.str(); - } - - QueryPipeline pipeline; - ContextMutablePtr query_context = Context::createCopy(local_context); - ++query_context->getClientInfo().distributed_depth; - - /// Here we take addresses from destination cluster and assume source table exists on these nodes - for (const auto & replicas : getCluster()->getShardsAddresses()) - { - /// There will be only one replica, because we consider each replica as a shard - for (const auto & node : replicas) - { - auto connection = std::make_shared( - node.host_name, node.port, query_context->getGlobalContext()->getCurrentDatabase(), - node.user, node.password, node.quota_key, node.cluster, node.cluster_secret, - "ParallelInsertSelectInititiator", - node.compression, - node.secure - ); - - auto remote_query_executor = std::make_shared( - connection, - new_query_str, - Block{}, - query_context, - /*throttler=*/nullptr, - Scalars{}, - Tables{}, - QueryProcessingStage::Complete, - extension); - - QueryPipeline remote_pipeline(std::make_shared(remote_query_executor, false, settings.async_socket_for_remote)); - remote_pipeline.complete(std::make_shared(remote_query_executor->getHeader())); - - pipeline.addCompletedPipeline(std::move(remote_pipeline)); - } - } - - return pipeline; -} - - -std::optional StorageDistributed::distributedWrite(const ASTInsertQuery & query, ContextPtr local_context) -{ - const Settings & settings = local_context->getSettingsRef(); - if (settings.max_distributed_depth && local_context->getClientInfo().distributed_depth >= settings.max_distributed_depth) - throw Exception("Maximum distributed depth exceeded", ErrorCodes::TOO_LARGE_DISTRIBUTED_DEPTH); - - auto & select = query.select->as(); - - StoragePtr src_storage; - - if (select.list_of_selects->children.size() == 1) - { - if (auto * select_query = select.list_of_selects->children.at(0)->as()) - { - JoinedTables joined_tables(Context::createCopy(local_context), *select_query); - - if (joined_tables.tablesCount() == 1) - { - src_storage = joined_tables.getLeftTableStorage(); - } - } - } - - if (!src_storage) - return {}; - - if (auto src_distributed = std::dynamic_pointer_cast(src_storage)) - { - return distributedWriteBetweenDistributedTables(*src_distributed, query, local_context); - } - else if (auto src_storage_cluster = std::dynamic_pointer_cast(src_storage)) - { - return distributedWriteFromClusterStorage(*src_storage_cluster, query, local_context); - } - else if (local_context->getClientInfo().distributed_depth == 0) - { - throw Exception(ErrorCodes::BAD_ARGUMENTS, "Parallel distributed INSERT SELECT is not possible. "\ - "Reason: distributed reading is supported only from Distributed engine or *Cluster table functions, but got {} storage", src_storage->getName()); - } - - return {}; -} - - void StorageDistributed::checkAlterIsPossible(const AlterCommands & commands, ContextPtr local_context) const { auto name_deps = getDependentViewsByColumn(local_context); diff --git a/src/Storages/StorageDistributed.h b/src/Storages/StorageDistributed.h index 3161f4b50f6..7cb25ae46ab 100644 --- a/src/Storages/StorageDistributed.h +++ b/src/Storages/StorageDistributed.h @@ -1,7 +1,6 @@ #pragma once #include -#include #include #include #include @@ -208,9 +207,6 @@ private: void delayInsertOrThrowIfNeeded() const; - std::optional distributedWriteFromClusterStorage(const IStorageCluster & src_storage_cluster, const ASTInsertQuery & query, ContextPtr context) const; - std::optional distributedWriteBetweenDistributedTables(const StorageDistributed & src_distributed, const ASTInsertQuery & query, ContextPtr context) const; - String remote_database; String remote_table; ASTPtr remote_table_function_ptr; diff --git a/src/Storages/StorageFile.cpp b/src/Storages/StorageFile.cpp index f4cf1fe5c58..0788a9f73d8 100644 --- a/src/Storages/StorageFile.cpp +++ b/src/Storages/StorageFile.cpp @@ -1257,7 +1257,7 @@ std::optional StorageFile::tryGetColumnsFromCache( return file_stat.st_mtime; }; - String cache_key = getKeyForSchemaCache(path, format_name, format_settings, context); + auto cache_key = getKeyForSchemaCache(path, format_name, format_settings, context); auto columns = schema_cache.tryGet(cache_key, get_last_mod_time); if (columns) return columns; @@ -1274,7 +1274,7 @@ void StorageFile::addColumnsToCache( const ContextPtr & context) { auto & schema_cache = getSchemaCache(context); - Strings cache_keys = getKeysForSchemaCache(paths, format_name, format_settings, context); + auto cache_keys = getKeysForSchemaCache(paths, format_name, format_settings, context); schema_cache.addMany(cache_keys, columns); } diff --git a/src/Storages/StorageMerge.cpp b/src/Storages/StorageMerge.cpp index 2bedf406b7d..16af2269f61 100644 --- a/src/Storages/StorageMerge.cpp +++ b/src/Storages/StorageMerge.cpp @@ -2,6 +2,7 @@ #include #include #include +#include #include #include #include @@ -528,15 +529,33 @@ QueryPipelineBuilderPtr ReadFromMerge::createSources( real_column_names.push_back(ExpressionActions::getSmallestColumn(storage_snapshot->metadata->getColumns().getAllPhysical())); QueryPlan plan; - storage->read( - plan, - real_column_names, - storage_snapshot, - modified_query_info, - modified_context, - processed_stage, - max_block_size, - UInt32(streams_num)); + if (StorageView * view = dynamic_cast(storage.get())) + { + /// For view storage, we need to rewrite the `modified_query_info.view_query` to optimize read. + /// The most intuitive way is to use InterpreterSelectQuery. + + /// Intercept the settings + modified_context->setSetting("max_threads", streams_num); + modified_context->setSetting("max_streams_to_max_threads_ratio", 1); + modified_context->setSetting("max_block_size", max_block_size); + + InterpreterSelectQuery( + modified_query_info.query, modified_context, storage, view->getInMemoryMetadataPtr(), SelectQueryOptions(processed_stage)) + .buildQueryPlan(plan); + } + else + { + storage->read( + plan, + real_column_names, + storage_snapshot, + modified_query_info, + modified_context, + processed_stage, + max_block_size, + UInt32(streams_num)); + + } if (!plan.isInitialized()) return {}; diff --git a/src/Storages/StorageMergeTree.cpp b/src/Storages/StorageMergeTree.cpp index 908372b462e..507047751f3 100644 --- a/src/Storages/StorageMergeTree.cpp +++ b/src/Storages/StorageMergeTree.cpp @@ -569,9 +569,25 @@ bool StorageMergeTree::hasLightweightDeletedMask() const return has_lightweight_delete_parts.load(std::memory_order_relaxed); } +namespace +{ + +struct PartVersionWithName +{ + Int64 version; + String name; +}; + +bool comparator(const PartVersionWithName & f, const PartVersionWithName & s) +{ + return f.version < s.version; +} + +} + std::optional StorageMergeTree::getIncompleteMutationsStatus(Int64 mutation_version, std::set * mutation_ids) const { - std::unique_lock lock(currently_processing_in_background_mutex); + std::lock_guard lock(currently_processing_in_background_mutex); auto current_mutation_it = current_mutations_by_version.find(mutation_version); /// Killed @@ -587,7 +603,7 @@ std::optional StorageMergeTree::getIncompleteMutationsS auto data_parts = getVisibleDataPartsVector(txn); for (const auto & data_part : data_parts) { - Int64 data_version = getUpdatedDataVersion(data_part, lock); + Int64 data_version = data_part->info.getDataVersion(); if (data_version < mutation_version) { if (!mutation_entry.latest_fail_reason.empty()) @@ -630,9 +646,14 @@ std::optional StorageMergeTree::getIncompleteMutationsS std::vector StorageMergeTree::getMutationsStatus() const { - std::unique_lock lock(currently_processing_in_background_mutex); + std::lock_guard lock(currently_processing_in_background_mutex); - auto part_versions_with_names = getSortedPartVersionsWithNames(lock); + std::vector part_versions_with_names; + auto data_parts = getDataPartsVectorForInternalUsage(); + part_versions_with_names.reserve(data_parts.size()); + for (const auto & part : data_parts) + part_versions_with_names.emplace_back(PartVersionWithName{part->info.getDataVersion(), part->name}); + std::sort(part_versions_with_names.begin(), part_versions_with_names.end(), comparator); std::vector result; for (const auto & kv : current_mutations_by_version) @@ -641,7 +662,7 @@ std::vector StorageMergeTree::getMutationsStatus() cons const MergeTreeMutationEntry & entry = kv.second; const PartVersionWithName needle{mutation_version, ""}; auto versions_it = std::lower_bound( - part_versions_with_names.begin(), part_versions_with_names.end(), needle); + part_versions_with_names.begin(), part_versions_with_names.end(), needle, comparator); size_t parts_to_do = versions_it - part_versions_with_names.begin(); Names parts_to_do_names; @@ -963,8 +984,7 @@ bool StorageMergeTree::partIsAssignedToBackgroundOperation(const DataPartPtr & p std::shared_ptr StorageMergeTree::selectPartsToMutate( const StorageMetadataPtr & metadata_snapshot, String * /* disable_reason */, TableLockHolder & /* table_lock_holder */, - std::unique_lock & currently_processing_in_background_mutex_lock, - bool & were_some_mutations_for_some_parts_skipped) + std::unique_lock & /*currently_processing_in_background_mutex_lock*/) { size_t max_ast_elements = getContext()->getSettingsRef().max_expanded_ast_elements; @@ -993,7 +1013,7 @@ std::shared_ptr StorageMergeTree::selectPartsToMutate( if (currently_merging_mutating_parts.contains(part)) continue; - auto mutations_begin_it = current_mutations_by_version.upper_bound(getUpdatedDataVersion(part, currently_processing_in_background_mutex_lock)); + auto mutations_begin_it = current_mutations_by_version.upper_bound(part->info.getDataVersion()); if (mutations_begin_it == mutations_end_it) continue; @@ -1080,33 +1100,6 @@ std::shared_ptr StorageMergeTree::selectPartsToMutate( assert(commands->empty() == (last_mutation_to_apply == mutations_end_it)); if (!commands->empty()) { - bool is_partition_affected = false; - for (const auto & command : *commands) - { - if (command.partition == nullptr) - { - is_partition_affected = true; - break; - } - - const String partition_id = part->storage.getPartitionIDFromQuery(command.partition, getContext()); - if (partition_id == part->info.partition_id) - { - is_partition_affected = true; - break; - } - } - - if (!is_partition_affected) - { - /// Shall not create a new part, but will do that later if mutation with higher version appear. - /// This is needed in order to not produce excessive mutations of non-related parts. - auto block_range = std::make_pair(part->info.min_block, part->info.max_block); - updated_version_by_block_range[block_range] = last_mutation_to_apply->first; - were_some_mutations_for_some_parts_skipped = true; - continue; - } - auto new_part_info = part->info; new_part_info.mutation = last_mutation_to_apply->first; @@ -1133,7 +1126,6 @@ bool StorageMergeTree::scheduleDataProcessingJob(BackgroundJobsAssignee & assign auto metadata_snapshot = getInMemoryMetadataPtr(); std::shared_ptr merge_entry, mutate_entry; - bool were_some_mutations_skipped = false; auto share_lock = lockForShare(RWLockImpl::NO_QUERY, getSettings()->lock_acquire_timeout_for_background_operations); @@ -1154,19 +1146,11 @@ bool StorageMergeTree::scheduleDataProcessingJob(BackgroundJobsAssignee & assign merge_entry = selectPartsToMerge(metadata_snapshot, false, {}, false, nullptr, share_lock, lock, txn); if (!merge_entry) - mutate_entry = selectPartsToMutate(metadata_snapshot, nullptr, share_lock, lock, were_some_mutations_skipped); + mutate_entry = selectPartsToMutate(metadata_snapshot, nullptr, share_lock, lock); has_mutations = !current_mutations_by_version.empty(); } - if ((!mutate_entry && has_mutations) || were_some_mutations_skipped) - { - /// Notify in case of errors or if some mutation was skipped (because it has no effect on the part). - /// TODO @azat: we can also spot some selection errors when `mutate_entry` is true. - std::lock_guard lock(mutation_wait_mutex); - mutation_wait_event.notify_all(); - } - if (merge_entry) { auto task = std::make_shared(*this, metadata_snapshot, false, Names{}, merge_entry, share_lock, common_assignee_trigger); @@ -1228,22 +1212,11 @@ bool StorageMergeTree::scheduleDataProcessingJob(BackgroundJobsAssignee & assign return scheduled; } -Int64 StorageMergeTree::getUpdatedDataVersion( - const DataPartPtr & part, - std::unique_lock & /* currently_processing_in_background_mutex_lock */) const -{ - auto it = updated_version_by_block_range.find(std::make_pair(part->info.min_block, part->info.max_block)); - if (it != updated_version_by_block_range.end()) - return std::max(part->info.getDataVersion(), static_cast(it->second)); - else - return part->info.getDataVersion(); -} - UInt64 StorageMergeTree::getCurrentMutationVersion( const DataPartPtr & part, - std::unique_lock & currently_processing_in_background_mutex_lock) const + std::unique_lock & /*currently_processing_in_background_mutex_lock*/) const { - auto it = current_mutations_by_version.upper_bound(getUpdatedDataVersion(part, currently_processing_in_background_mutex_lock)); + auto it = current_mutations_by_version.upper_bound(part->info.getDataVersion()); if (it == current_mutations_by_version.begin()) return 0; --it; @@ -1256,7 +1229,7 @@ size_t StorageMergeTree::clearOldMutations(bool truncate) std::vector mutations_to_delete; { - std::unique_lock lock(currently_processing_in_background_mutex); + std::lock_guard lock(currently_processing_in_background_mutex); if (current_mutations_by_version.size() <= finished_mutations_to_keep) return 0; @@ -1268,18 +1241,13 @@ size_t StorageMergeTree::clearOldMutations(bool truncate) end_it = current_mutations_by_version.upper_bound(*min_version); size_t done_count = std::distance(begin_it, end_it); + if (done_count <= finished_mutations_to_keep) return 0; - auto part_versions_with_names = getSortedPartVersionsWithNames(lock); - for (auto it = begin_it; it != end_it; ++it) { - const PartVersionWithName needle{static_cast(it->first), ""}; - auto versions_it = std::lower_bound( - part_versions_with_names.begin(), part_versions_with_names.end(), needle); - - if (versions_it != part_versions_with_names.begin() || !it->second.tid.isPrehistoric()) + if (!it->second.tid.isPrehistoric()) { done_count = std::distance(begin_it, it); break; @@ -1312,21 +1280,6 @@ size_t StorageMergeTree::clearOldMutations(bool truncate) return mutations_to_delete.size(); } -std::vector StorageMergeTree::getSortedPartVersionsWithNames( - std::unique_lock & currently_processing_in_background_mutex_lock) const -{ - std::vector part_versions_with_names; - auto data_parts = getDataPartsVectorForInternalUsage(); - part_versions_with_names.reserve(data_parts.size()); - for (const auto & part : data_parts) - part_versions_with_names.emplace_back(PartVersionWithName{ - getUpdatedDataVersion(part, currently_processing_in_background_mutex_lock), - part->name - }); - ::sort(part_versions_with_names.begin(), part_versions_with_names.end()); - return part_versions_with_names; -} - bool StorageMergeTree::optimize( const ASTPtr & /*query*/, const StorageMetadataPtr & /*metadata_snapshot*/, @@ -1868,9 +1821,9 @@ void StorageMergeTree::attachRestoredParts(MutableDataPartsVector && parts) MutationCommands StorageMergeTree::getFirstAlterMutationCommandsForPart(const DataPartPtr & part) const { - std::unique_lock lock(currently_processing_in_background_mutex); + std::lock_guard lock(currently_processing_in_background_mutex); - auto it = current_mutations_by_version.upper_bound(getUpdatedDataVersion(part, lock)); + auto it = current_mutations_by_version.upper_bound(part->info.getDataVersion()); if (it == current_mutations_by_version.end()) return {}; return it->second.commands; diff --git a/src/Storages/StorageMergeTree.h b/src/Storages/StorageMergeTree.h index 632884db033..b36850f9f4a 100644 --- a/src/Storages/StorageMergeTree.h +++ b/src/Storages/StorageMergeTree.h @@ -147,11 +147,6 @@ private: std::map current_mutations_by_version; - /// We store information about mutations which are not applicable to the partition of each part. - /// The value is a maximum version for a part which will be the same as his current version, - /// that is, to which version it can be upgraded without any change. - std::map, UInt64> updated_version_by_block_range; - std::atomic shutdown_called {false}; std::atomic flush_called {false}; @@ -192,17 +187,6 @@ private: friend struct CurrentlyMergingPartsTagger; - struct PartVersionWithName - { - Int64 version; - String name; - - bool operator <(const PartVersionWithName & s) const - { - return version < s.version; - } - }; - std::shared_ptr selectPartsToMerge( const StorageMetadataPtr & metadata_snapshot, bool aggressive, @@ -216,7 +200,9 @@ private: SelectPartsDecision * select_decision_out = nullptr); - std::shared_ptr selectPartsToMutate(const StorageMetadataPtr & metadata_snapshot, String * disable_reason, TableLockHolder & table_lock_holder, std::unique_lock & currently_processing_in_background_mutex_lock, bool & were_some_mutations_for_some_parts_skipped); + std::shared_ptr selectPartsToMutate( + const StorageMetadataPtr & metadata_snapshot, String * disable_reason, + TableLockHolder & table_lock_holder, std::unique_lock & currently_processing_in_background_mutex_lock); /// For current mutations queue, returns maximum version of mutation for a part, /// with respect of mutations which would not change it. @@ -225,15 +211,8 @@ private: const DataPartPtr & part, std::unique_lock & /* currently_processing_in_background_mutex_lock */) const; - /// Returns maximum version of a part, with respect of mutations which would not change it. - Int64 getUpdatedDataVersion( - const DataPartPtr & part, - std::unique_lock & /* currently_processing_in_background_mutex_lock */) const; - size_t clearOldMutations(bool truncate = false); - std::vector getSortedPartVersionsWithNames(std::unique_lock & /* currently_processing_in_background_mutex_lock */) const; - // Partition helpers void dropPartNoWaitNoThrow(const String & part_name) override; void dropPart(const String & part_name, bool detach, ContextPtr context) override; diff --git a/src/Storages/StorageReplicatedMergeTree.cpp b/src/Storages/StorageReplicatedMergeTree.cpp index 393cdf76c7a..762c3d52627 100644 --- a/src/Storages/StorageReplicatedMergeTree.cpp +++ b/src/Storages/StorageReplicatedMergeTree.cpp @@ -24,6 +24,7 @@ #include #include #include +#include #include #include #include @@ -45,13 +46,11 @@ #include #include -#include #include #include #include #include #include -#include #include #include #include @@ -61,7 +60,6 @@ #include #include #include -#include #include #include @@ -76,7 +74,6 @@ #include #include #include -#include #include #include @@ -163,7 +160,7 @@ namespace ErrorCodes extern const int BAD_ARGUMENTS; extern const int CONCURRENT_ACCESS_NOT_SUPPORTED; extern const int CHECKSUM_DOESNT_MATCH; - extern const int TOO_LARGE_DISTRIBUTED_DEPTH; + extern const int NOT_INITIALIZED; } namespace ActionLocks @@ -301,7 +298,8 @@ StorageReplicatedMergeTree::StorageReplicatedMergeTree( mutations_finalizing_task = getContext()->getSchedulePool().createTask( getStorageID().getFullTableName() + " (StorageReplicatedMergeTree::mutationsFinalizingTask)", [this] { mutationsFinalizingTask(); }); - if (getContext()->hasZooKeeper() || getContext()->hasAuxiliaryZooKeeper(zookeeper_name)) + bool has_zookeeper = getContext()->hasZooKeeper() || getContext()->hasAuxiliaryZooKeeper(zookeeper_name); + if (has_zookeeper) { /// It's possible for getZooKeeper() to timeout if zookeeper host(s) can't /// be reached. In such cases Poco::Exception is thrown after a connection @@ -330,8 +328,14 @@ StorageReplicatedMergeTree::StorageReplicatedMergeTree( catch (...) { if (!attach) + { dropIfEmpty(); - throw; + throw; + } + else + { + current_zookeeper = nullptr; + } } } @@ -339,6 +343,13 @@ StorageReplicatedMergeTree::StorageReplicatedMergeTree( /// It does not make sense for CREATE query if (attach) { + if (current_zookeeper && current_zookeeper->exists(replica_path + "/host")) + { + /// Check it earlier if we can (we don't want incompatible version to start). + /// If "/host" doesn't exist, then replica is probably dropped and there's nothing to check. + ReplicatedMergeTreeAttachThread::checkHasReplicaMetadataInZooKeeper(current_zookeeper, replica_path); + } + if (current_zookeeper && current_zookeeper->exists(replica_path + "/flags/force_restore_data")) { skip_sanity_checks = true; @@ -364,124 +375,77 @@ StorageReplicatedMergeTree::StorageReplicatedMergeTree( throw Exception("Can't create replicated table without ZooKeeper", ErrorCodes::NO_ZOOKEEPER); } - /// Do not activate the replica. It will be readonly. - LOG_ERROR(log, "No ZooKeeper: table will be in readonly mode."); has_metadata_in_zookeeper = std::nullopt; - return; + + if (!has_zookeeper) + { + /// Do not activate the replica. It will be readonly. + LOG_ERROR(log, "No ZooKeeper defined: table will stay in readonly mode."); + return; + } } - if (attach && !current_zookeeper->exists(zookeeper_path + "/metadata")) + if (attach) { - LOG_WARNING(log, "No metadata in ZooKeeper for {}: table will be in readonly mode.", zookeeper_path); - has_metadata_in_zookeeper = false; + LOG_INFO(log, "Table will be in readonly mode until initialization is finished"); + attach_thread.emplace(*this); + attach_thread->setSkipSanityChecks(skip_sanity_checks); return; } auto metadata_snapshot = getInMemoryMetadataPtr(); - /// May it be ZK lost not the whole root, so the upper check passed, but only the /replicas/replica - /// folder. - if (attach && !current_zookeeper->exists(replica_path)) - { - LOG_WARNING(log, "No metadata in ZooKeeper for {}: table will be in readonly mode", replica_path); - has_metadata_in_zookeeper = false; - return; - } - has_metadata_in_zookeeper = true; - if (!attach) + if (!getDataPartsForInternalUsage().empty()) + throw Exception("Data directory for table already contains data parts" + " - probably it was unclean DROP table or manual intervention." + " You must either clear directory by hand or use ATTACH TABLE" + " instead of CREATE TABLE if you need to use that parts.", ErrorCodes::INCORRECT_DATA); + + try { - if (!getDataPartsForInternalUsage().empty()) - throw Exception("Data directory for table already contains data parts" - " - probably it was unclean DROP table or manual intervention." - " You must either clear directory by hand or use ATTACH TABLE" - " instead of CREATE TABLE if you need to use that parts.", ErrorCodes::INCORRECT_DATA); + bool is_first_replica = createTableIfNotExists(metadata_snapshot); try { - bool is_first_replica = createTableIfNotExists(metadata_snapshot); + /// NOTE If it's the first replica, these requests to ZooKeeper look redundant, we already know everything. - try - { - /// NOTE If it's the first replica, these requests to ZooKeeper look redundant, we already know everything. - - /// We have to check granularity on other replicas. If it's fixed we - /// must create our new replica with fixed granularity and store this - /// information in /replica/metadata. - other_replicas_fixed_granularity = checkFixedGranularityInZookeeper(); - - checkTableStructure(zookeeper_path, metadata_snapshot); - - Coordination::Stat metadata_stat; - current_zookeeper->get(zookeeper_path + "/metadata", &metadata_stat); - metadata_version = metadata_stat.version; - } - catch (Coordination::Exception & e) - { - if (!is_first_replica && e.code == Coordination::Error::ZNONODE) - throw Exception("Table " + zookeeper_path + " was suddenly removed.", ErrorCodes::ALL_REPLICAS_LOST); - else - throw; - } - - if (!is_first_replica) - createReplica(metadata_snapshot); - } - catch (...) - { - /// If replica was not created, rollback creation of data directory. - dropIfEmpty(); - throw; - } - } - else - { - /// In old tables this node may missing or be empty - String replica_metadata; - const bool replica_metadata_exists = current_zookeeper->tryGet(replica_path + "/metadata", replica_metadata); - - if (!replica_metadata_exists || replica_metadata.empty()) - { - /// We have to check shared node granularity before we create ours. + /// We have to check granularity on other replicas. If it's fixed we + /// must create our new replica with fixed granularity and store this + /// information in /replica/metadata. other_replicas_fixed_granularity = checkFixedGranularityInZookeeper(); - ReplicatedMergeTreeTableMetadata current_metadata(*this, metadata_snapshot); + checkTableStructure(zookeeper_path, metadata_snapshot); - current_zookeeper->createOrUpdate(replica_path + "/metadata", current_metadata.toString(), - zkutil::CreateMode::Persistent); - } - - checkTableStructure(replica_path, metadata_snapshot); - checkParts(skip_sanity_checks); - - if (current_zookeeper->exists(replica_path + "/metadata_version")) - { - metadata_version = parse(current_zookeeper->get(replica_path + "/metadata_version")); - } - else - { - /// This replica was created with old clickhouse version, so we have - /// to take version of global node. If somebody will alter our - /// table, then we will fill /metadata_version node in zookeeper. - /// Otherwise on the next restart we can again use version from - /// shared metadata node because it was not changed. Coordination::Stat metadata_stat; current_zookeeper->get(zookeeper_path + "/metadata", &metadata_stat); metadata_version = metadata_stat.version; } - /// Temporary directories contain uninitialized results of Merges or Fetches (after forced restart), - /// don't allow to reinitialize them, delete each of them immediately. - clearOldTemporaryDirectories(0, {"tmp_", "delete_tmp_", "tmp-fetch_"}); - clearOldWriteAheadLogs(); - if (getSettings()->merge_tree_enable_clear_old_broken_detached) - clearOldBrokenPartsFromDetachedDirecory(); + catch (Coordination::Exception & e) + { + if (!is_first_replica && e.code == Coordination::Error::ZNONODE) + throw Exception("Table " + zookeeper_path + " was suddenly removed.", ErrorCodes::ALL_REPLICAS_LOST); + else + throw; + } + + if (!is_first_replica) + createReplica(metadata_snapshot); + } + catch (...) + { + /// If replica was not created, rollback creation of data directory. + dropIfEmpty(); + throw; } createNewZooKeeperNodes(); syncPinnedPartUUIDs(); createTableSharedID(); + + initialization_done = true; } @@ -879,7 +843,6 @@ void StorageReplicatedMergeTree::drop() if (!zookeeper) throw Exception("Can't drop readonly replicated table (need to drop data in ZooKeeper as well)", ErrorCodes::TABLE_IS_READ_ONLY); - shutdown(); dropReplica(zookeeper, zookeeper_path, replica_name, log, getSettings()); } @@ -907,20 +870,10 @@ void StorageReplicatedMergeTree::dropReplica(zkutil::ZooKeeperPtr zookeeper, con return; } - /// Analog of removeRecursive(remote_replica_path) - /// but it removes "metadata" firstly. - /// - /// This will allow to mark table as readonly - /// and skip any checks of parts between on-disk and in the zookeeper. - /// - /// Without this removeRecursive() may remove "parts" first - /// and on DETACH/ATTACH (or server restart) it will trigger the following error: - /// - /// "The local set of parts of table X doesn't look like the set of parts in ZooKeeper" - /// { - /// Remove metadata first - [[maybe_unused]] auto code = zookeeper->tryRemove(fs::path(remote_replica_path) / "metadata"); + /// Remove "host" node first to mark replica as dropped (the choice is arbitrary, + /// it could be any node without children that exists since ancient server versions and not re-created on startup) + [[maybe_unused]] auto code = zookeeper->tryRemove(fs::path(remote_replica_path) / "host"); assert(code == Coordination::Error::ZOK || code == Coordination::Error::ZNONODE); /// Then try to remove paths that are known to be flat (all children are leafs) @@ -941,11 +894,11 @@ void StorageReplicatedMergeTree::dropReplica(zkutil::ZooKeeperPtr zookeeper, con ops.emplace_back(zkutil::makeRemoveRequest(remote_replica_path + "/" + node, -1)); ops.emplace_back(zkutil::makeRemoveRequest(remote_replica_path + "/columns", -1)); - ops.emplace_back(zkutil::makeRemoveRequest(remote_replica_path + "/host", -1)); ops.emplace_back(zkutil::makeRemoveRequest(remote_replica_path + "/is_lost", -1)); ops.emplace_back(zkutil::makeRemoveRequest(remote_replica_path + "/log_pointer", -1)); ops.emplace_back(zkutil::makeRemoveRequest(remote_replica_path + "/max_processed_insert_time", -1)); ops.emplace_back(zkutil::makeRemoveRequest(remote_replica_path + "/min_unprocessed_insert_time", -1)); + ops.emplace_back(zkutil::makeRemoveRequest(remote_replica_path + "/metadata", -1)); ops.emplace_back(zkutil::makeRemoveRequest(remote_replica_path + "/metadata_version", -1)); ops.emplace_back(zkutil::makeRemoveRequest(remote_replica_path + "/mutation_pointer", -1)); Coordination::Responses res; @@ -3486,13 +3439,15 @@ void StorageReplicatedMergeTree::removePartAndEnqueueFetch(const String & part_n void StorageReplicatedMergeTree::startBeingLeader() { + auto zookeeper = getZooKeeper(); + if (!getSettings()->replicated_can_become_leader) { LOG_INFO(log, "Will not enter leader election because replicated_can_become_leader=0"); return; } - zkutil::checkNoOldLeaders(log, *current_zookeeper, fs::path(zookeeper_path) / "leader_election"); + zkutil::checkNoOldLeaders(log, *zookeeper, fs::path(zookeeper_path) / "leader_election"); LOG_INFO(log, "Became leader"); is_leader = true; @@ -4191,8 +4146,19 @@ DataPartStoragePtr StorageReplicatedMergeTree::fetchExistsPart( return part->data_part_storage; } - void StorageReplicatedMergeTree::startup() +{ + if (attach_thread) + { + attach_thread->start(); + attach_thread->waitFirstTry(); + return; + } + + startupImpl(); +} + +void StorageReplicatedMergeTree::startupImpl() { /// Do not start replication if ZooKeeper is not configured or there is no metadata in zookeeper if (!has_metadata_in_zookeeper.has_value() || !*has_metadata_in_zookeeper) @@ -4200,6 +4166,7 @@ void StorageReplicatedMergeTree::startup() try { + auto zookeeper = getZooKeeper(); InterserverIOEndpointPtr data_parts_exchange_ptr = std::make_shared(*this); [[maybe_unused]] auto prev_ptr = std::atomic_exchange(&data_parts_exchange_endpoint, data_parts_exchange_ptr); assert(prev_ptr == nullptr); @@ -4259,6 +4226,8 @@ void StorageReplicatedMergeTree::shutdown() mutations_finalizing_task->deactivate(); stopBeingLeader(); + if (attach_thread) + attach_thread->shutdown(); restarting_thread.shutdown(); background_operations_assignee.finish(); part_moves_between_shards_orchestrator.shutdown(); @@ -4472,106 +4441,6 @@ SinkToStoragePtr StorageReplicatedMergeTree::write(const ASTPtr & /*query*/, con } -std::optional StorageReplicatedMergeTree::distributedWriteFromClusterStorage(const std::shared_ptr & src_storage_cluster, const ASTInsertQuery & query, ContextPtr local_context) -{ - const auto & settings = local_context->getSettingsRef(); - auto extension = src_storage_cluster->getTaskIteratorExtension(local_context); - - /// Here we won't check that the cluster formed from table replicas is a subset of a cluster specified in s3Cluster/hdfsCluster table function - auto src_cluster = src_storage_cluster->getCluster(local_context); - - /// Actually the query doesn't change, we just serialize it to string - String query_str; - { - WriteBufferFromOwnString buf; - IAST::FormatSettings ast_format_settings(buf, /*one_line*/ true); - ast_format_settings.always_quote_identifiers = true; - query.IAST::format(ast_format_settings); - query_str = buf.str(); - } - - QueryPipeline pipeline; - ContextMutablePtr query_context = Context::createCopy(local_context); - ++query_context->getClientInfo().distributed_depth; - - for (const auto & replicas : src_cluster->getShardsAddresses()) - { - /// There will be only one replica, because we consider each replica as a shard - for (const auto & node : replicas) - { - auto connection = std::make_shared( - node.host_name, node.port, query_context->getGlobalContext()->getCurrentDatabase(), - node.user, node.password, node.quota_key, node.cluster, node.cluster_secret, - "ParallelInsertSelectInititiator", - node.compression, - node.secure - ); - - auto remote_query_executor = std::make_shared( - connection, - query_str, - Block{}, - query_context, - /*throttler=*/nullptr, - Scalars{}, - Tables{}, - QueryProcessingStage::Complete, - extension); - - QueryPipeline remote_pipeline(std::make_shared(remote_query_executor, false, settings.async_socket_for_remote)); - remote_pipeline.complete(std::make_shared(remote_query_executor->getHeader())); - - pipeline.addCompletedPipeline(std::move(remote_pipeline)); - } - } - - return pipeline; -} - -std::optional StorageReplicatedMergeTree::distributedWrite(const ASTInsertQuery & query, ContextPtr local_context) -{ - /// Do not enable parallel distributed INSERT SELECT in case when query probably comes from another server - if (local_context->getClientInfo().query_kind != ClientInfo::QueryKind::INITIAL_QUERY) - return {}; - - const Settings & settings = local_context->getSettingsRef(); - if (settings.max_distributed_depth && local_context->getClientInfo().distributed_depth >= settings.max_distributed_depth) - throw Exception("Maximum distributed depth exceeded", ErrorCodes::TOO_LARGE_DISTRIBUTED_DEPTH); - - auto & select = query.select->as(); - - StoragePtr src_storage; - - if (select.list_of_selects->children.size() == 1) - { - if (auto * select_query = select.list_of_selects->children.at(0)->as()) - { - JoinedTables joined_tables(Context::createCopy(local_context), *select_query); - - if (joined_tables.tablesCount() == 1) - { - src_storage = joined_tables.getLeftTableStorage(); - } - } - } - - if (!src_storage) - return {}; - - if (auto src_distributed = std::dynamic_pointer_cast(src_storage)) - { - return distributedWriteFromClusterStorage(src_distributed, query, local_context); - } - else if (local_context->getClientInfo().distributed_depth == 0) - { - throw Exception(ErrorCodes::BAD_ARGUMENTS, "Parallel distributed INSERT SELECT is not possible. Reason: distributed "\ - "reading into Replicated table is supported only from *Cluster table functions, but got {} storage", src_storage->getName()); - } - - return {}; -} - - bool StorageReplicatedMergeTree::optimize( const ASTPtr &, const StorageMetadataPtr &, @@ -5098,9 +4967,14 @@ bool StorageReplicatedMergeTree::getFakePartCoveringAllPartsInPartition(const St void StorageReplicatedMergeTree::restoreMetadataInZooKeeper() { LOG_INFO(log, "Restoring replica metadata"); + + if (!initialization_done) + throw Exception(ErrorCodes::NOT_INITIALIZED, "Table is not initialized yet"); + if (!is_readonly) throw Exception(ErrorCodes::BAD_ARGUMENTS, "Replica must be readonly"); + if (getZooKeeper()->exists(replica_path)) throw Exception(ErrorCodes::BAD_ARGUMENTS, "Replica path is present at {} - nothing to restore. " @@ -5152,7 +5026,7 @@ void StorageReplicatedMergeTree::restoreMetadataInZooKeeper() LOG_INFO(log, "Attached all partitions, starting table"); - startup(); + startupImpl(); } void StorageReplicatedMergeTree::dropPartNoWaitNoThrow(const String & part_name) @@ -7544,7 +7418,7 @@ void StorageReplicatedMergeTree::createTableSharedID() if (table_shared_id != UUIDHelpers::Nil) throw Exception(ErrorCodes::LOGICAL_ERROR, "Table shared id already initialized"); - zkutil::ZooKeeperPtr zookeeper = getZooKeeper(); + auto zookeeper = getZooKeeper(); String zookeeper_table_id_path = fs::path(zookeeper_path) / "table_shared_id"; String id; if (!zookeeper->tryGet(zookeeper_table_id_path, id)) @@ -7605,7 +7479,7 @@ void StorageReplicatedMergeTree::lockSharedDataTemporary(const String & part_nam String id = part_id; boost::replace_all(id, "/", "_"); - Strings zc_zookeeper_paths = getZeroCopyPartPath(*getSettings(), toString(disk->getType()), getTableSharedID(), + Strings zc_zookeeper_paths = getZeroCopyPartPath(*getSettings(), toString(disk->getDataSourceDescription().type), getTableSharedID(), part_name, zookeeper_path); for (const auto & zc_zookeeper_path : zc_zookeeper_paths) @@ -7795,11 +7669,11 @@ DataPartStoragePtr StorageReplicatedMergeTree::tryToFetchIfShared( const String & path) { const auto settings = getSettings(); - auto disk_type = disk->getType(); + auto data_source_description = disk->getDataSourceDescription(); if (!(disk->supportZeroCopyReplication() && settings->allow_remote_fs_zero_copy_replication)) return nullptr; - String replica = getSharedDataReplica(part, disk_type); + String replica = getSharedDataReplica(part, data_source_description.type); /// We can't fetch part when none replicas have this part on a same type remote disk if (replica.empty()) @@ -7808,9 +7682,8 @@ DataPartStoragePtr StorageReplicatedMergeTree::tryToFetchIfShared( return executeFetchShared(replica, part.name, disk, path); } - String StorageReplicatedMergeTree::getSharedDataReplica( - const IMergeTreeDataPart & part, DiskType disk_type) const + const IMergeTreeDataPart & part, DataSourceType data_source_type) const { String best_replica; @@ -7818,7 +7691,7 @@ String StorageReplicatedMergeTree::getSharedDataReplica( if (!zookeeper) return ""; - Strings zc_zookeeper_paths = getZeroCopyPartPath(*getSettings(), toString(disk_type), getTableSharedID(), part.name, + Strings zc_zookeeper_paths = getZeroCopyPartPath(*getSettings(), toString(data_source_type), getTableSharedID(), part.name, zookeeper_path); std::set replicas; @@ -7929,7 +7802,7 @@ std::optional StorageReplicatedMergeTree::getZeroCopyPartPath(const Stri if (!disk || !disk->supportZeroCopyReplication()) return std::nullopt; - return getZeroCopyPartPath(*getSettings(), toString(disk->getType()), getTableSharedID(), part_name, zookeeper_path)[0]; + return getZeroCopyPartPath(*getSettings(), toString(disk->getDataSourceDescription().type), getTableSharedID(), part_name, zookeeper_path)[0]; } std::optional StorageReplicatedMergeTree::tryCreateZeroCopyExclusiveLock(const String & part_name, const DiskPtr & disk) @@ -8334,7 +8207,7 @@ bool StorageReplicatedMergeTree::removeSharedDetachedPart(DiskPtr disk, const St String id = disk->getUniqueId(checksums); bool can_remove = false; std::tie(can_remove, files_not_to_remove) = StorageReplicatedMergeTree::unlockSharedDataByID(id, table_uuid, part_name, - detached_replica_name, toString(disk->getType()), zookeeper, local_context->getReplicatedMergeTreeSettings(), &Poco::Logger::get("StorageReplicatedMergeTree"), + detached_replica_name, toString(disk->getDataSourceDescription().type), zookeeper, local_context->getReplicatedMergeTreeSettings(), &Poco::Logger::get("StorageReplicatedMergeTree"), detached_zookeeper_path); keep_shared = !can_remove; diff --git a/src/Storages/StorageReplicatedMergeTree.h b/src/Storages/StorageReplicatedMergeTree.h index 3f03fb70f7a..2e2a5ca79b7 100644 --- a/src/Storages/StorageReplicatedMergeTree.h +++ b/src/Storages/StorageReplicatedMergeTree.h @@ -4,7 +4,6 @@ #include #include #include -#include #include #include #include @@ -14,6 +13,7 @@ #include #include #include +#include #include #include #include @@ -29,6 +29,7 @@ #include #include #include +#include #include #include #include @@ -137,8 +138,6 @@ public: SinkToStoragePtr write(const ASTPtr & query, const StorageMetadataPtr & /*metadata_snapshot*/, ContextPtr context) override; - std::optional distributedWrite(const ASTInsertQuery & /*query*/, ContextPtr /*context*/) override; - bool optimize( const ASTPtr & query, const StorageMetadataPtr & metadata_snapshot, @@ -286,7 +285,7 @@ public: DataPartStoragePtr tryToFetchIfShared(const IMergeTreeDataPart & part, const DiskPtr & disk, const String & path) override; /// Get best replica having this partition on a same type remote disk - String getSharedDataReplica(const IMergeTreeDataPart & part, DiskType disk_type) const; + String getSharedDataReplica(const IMergeTreeDataPart & part, DataSourceType data_source_type) const; inline String getReplicaName() const { return replica_name; } @@ -342,6 +341,7 @@ private: friend class ReplicatedMergeTreeCleanupThread; friend class ReplicatedMergeTreeAlterThread; friend class ReplicatedMergeTreeRestartingThread; + friend class ReplicatedMergeTreeAttachThread; friend class ReplicatedMergeTreeMergeStrategyPicker; friend struct ReplicatedMergeTreeLogEntry; friend class ScopedPartitionMergeLock; @@ -447,8 +447,13 @@ private: /// A thread that processes reconnection to ZooKeeper when the session expires. ReplicatedMergeTreeRestartingThread restarting_thread; + /// A thread that attaches the table using ZooKeeper + std::optional attach_thread; + PartMovesBetweenShardsOrchestrator part_moves_between_shards_orchestrator; + std::atomic initialization_done{false}; + /// True if replica was created for existing table with fixed granularity bool other_replicas_fixed_granularity = false; @@ -468,8 +473,6 @@ private: std::mutex last_broken_disks_mutex; std::set last_broken_disks; - static std::optional distributedWriteFromClusterStorage(const std::shared_ptr & src_storage_cluster, const ASTInsertQuery & query, ContextPtr context); - template void foreachActiveParts(Func && func, bool select_sequential_consistency) const; @@ -840,6 +843,8 @@ private: /// Create ephemeral lock in zookeeper for part and disk which support zero copy replication. /// If somebody already holding the lock -- return std::nullopt. std::optional tryCreateZeroCopyExclusiveLock(const String & part_name, const DiskPtr & disk) override; + + void startupImpl(); }; String getPartNamePossiblyFake(MergeTreeDataFormatVersion format_version, const MergeTreePartInfo & part_info); diff --git a/src/Storages/StorageS3.cpp b/src/Storages/StorageS3.cpp index fea45dfc8fd..1685de55b6e 100644 --- a/src/Storages/StorageS3.cpp +++ b/src/Storages/StorageS3.cpp @@ -1389,7 +1389,7 @@ std::optional StorageS3::tryGetColumnsFromCache( }; String source = fs::path(s3_configuration.uri.uri.getHost() + std::to_string(s3_configuration.uri.uri.getPort())) / path; - String cache_key = getKeyForSchemaCache(source, format_name, format_settings, ctx); + auto cache_key = getKeyForSchemaCache(source, format_name, format_settings, ctx); auto columns = schema_cache.tryGet(cache_key, get_last_mod_time); if (columns) return columns; @@ -1410,7 +1410,7 @@ void StorageS3::addColumnsToCache( Strings sources; sources.reserve(keys.size()); std::transform(keys.begin(), keys.end(), std::back_inserter(sources), [&](const String & key){ return host_and_bucket / key; }); - Strings cache_keys = getKeysForSchemaCache(sources, format_name, format_settings, ctx); + auto cache_keys = getKeysForSchemaCache(sources, format_name, format_settings, ctx); auto & schema_cache = getSchemaCache(ctx); schema_cache.addMany(cache_keys, columns); } diff --git a/src/Storages/StorageS3Cluster.cpp b/src/Storages/StorageS3Cluster.cpp index 0c5e69cb906..a3f368effa7 100644 --- a/src/Storages/StorageS3Cluster.cpp +++ b/src/Storages/StorageS3Cluster.cpp @@ -56,7 +56,7 @@ StorageS3Cluster::StorageS3Cluster( const ConstraintsDescription & constraints_, ContextPtr context_, const String & compression_method_) - : IStorageCluster(table_id_) + : IStorage(table_id_) , s3_configuration{S3::URI{Poco::URI{filename_}}, access_key_id_, secret_access_key_, {}, {}, S3Settings::ReadWriteSettings(context_->getSettingsRef())} , filename(filename_) , cluster_name(cluster_name_) @@ -105,7 +105,12 @@ Pipe StorageS3Cluster::read( unsigned /*num_streams*/) { StorageS3::updateS3Configuration(context, s3_configuration); - createIteratorAndCallback(query_info.query, context); + + auto cluster = context->getCluster(cluster_name)->getClusterWithReplicasAsShards(context->getSettingsRef()); + + auto iterator = std::make_shared( + *s3_configuration.client, s3_configuration.uri, query_info.query, virtual_block, context); + auto callback = std::make_shared([iterator]() mutable -> String { return iterator->next(); }); /// Calculate the header. This is significant, because some columns could be thrown away in some cases like query with count(*) Block header = @@ -165,29 +170,6 @@ QueryProcessingStage::Enum StorageS3Cluster::getQueryProcessingStage( } -void StorageS3Cluster::createIteratorAndCallback(ASTPtr query, ContextPtr context) const -{ - cluster = context->getCluster(cluster_name)->getClusterWithReplicasAsShards(context->getSettingsRef()); - iterator = std::make_shared( - *s3_configuration.client, s3_configuration.uri, query, virtual_block, context); - callback = std::make_shared([iter = this->iterator]() mutable -> String { return iter->next(); }); -} - - -RemoteQueryExecutor::Extension StorageS3Cluster::getTaskIteratorExtension(ContextPtr context) const -{ - createIteratorAndCallback(/*query=*/nullptr, context); - return RemoteQueryExecutor::Extension{.task_iterator = callback}; -} - - -ClusterPtr StorageS3Cluster::getCluster(ContextPtr context) const -{ - createIteratorAndCallback(/*query=*/nullptr, context); - return cluster; -} - - NamesAndTypesList StorageS3Cluster::getVirtuals() const { return virtual_columns; diff --git a/src/Storages/StorageS3Cluster.h b/src/Storages/StorageS3Cluster.h index e18c33e79da..f823d1fdf04 100644 --- a/src/Storages/StorageS3Cluster.h +++ b/src/Storages/StorageS3Cluster.h @@ -10,7 +10,6 @@ #include "Client/Connection.h" #include #include -#include #include namespace DB @@ -18,7 +17,7 @@ namespace DB class Context; -class StorageS3Cluster : public IStorageCluster +class StorageS3Cluster : public IStorage { public: StorageS3Cluster( @@ -43,22 +42,15 @@ public: NamesAndTypesList getVirtuals() const override; - RemoteQueryExecutor::Extension getTaskIteratorExtension(ContextPtr context) const override; - ClusterPtr getCluster(ContextPtr context) const override; private: StorageS3::S3Configuration s3_configuration; + String filename; String cluster_name; String format_name; String compression_method; NamesAndTypesList virtual_columns; Block virtual_block; - - mutable ClusterPtr cluster; - mutable std::shared_ptr iterator; - mutable std::shared_ptr callback; - - void createIteratorAndCallback(ASTPtr query, ContextPtr context) const; }; diff --git a/src/Storages/StorageS3Settings.cpp b/src/Storages/StorageS3Settings.cpp index 6c979d69795..353e324c853 100644 --- a/src/Storages/StorageS3Settings.cpp +++ b/src/Storages/StorageS3Settings.cpp @@ -34,6 +34,13 @@ void StorageS3Settings::loadFromConfig(const String & config_elem, const Poco::U return with_default ? config.getUInt64(config_elem + "." + key + "." + elem, default_value) : config.getUInt64(config_elem + "." + key + "." + elem); }; + + auto get_bool_for_key = [&](const String & key, const String & elem, bool with_default = true, bool default_value = false) + { + return with_default ? config.getBool(config_elem + "." + key + "." + elem, default_value) : config.getBool(config_elem + "." + key + "." + elem); + }; + + for (const String & key : config_keys) { if (config.has(config_elem + "." + key + ".endpoint")) @@ -82,6 +89,7 @@ void StorageS3Settings::loadFromConfig(const String & config_elem, const Poco::U rw_settings.upload_part_size_multiply_parts_count_threshold = get_uint_for_key(key, "upload_part_size_multiply_parts_count_threshold", true, settings.s3_upload_part_size_multiply_parts_count_threshold); rw_settings.max_single_part_upload_size = get_uint_for_key(key, "max_single_part_upload_size", true, settings.s3_max_single_part_upload_size); rw_settings.max_connections = get_uint_for_key(key, "max_connections", true, settings.s3_max_connections); + rw_settings.check_objects_after_upload = get_bool_for_key(key, "check_objects_after_upload", true, false); s3_settings.emplace(endpoint, S3Settings{std::move(auth_settings), std::move(rw_settings)}); } @@ -112,6 +120,7 @@ S3Settings::ReadWriteSettings::ReadWriteSettings(const Settings & settings) upload_part_size_multiply_parts_count_threshold = settings.s3_upload_part_size_multiply_parts_count_threshold; max_single_part_upload_size = settings.s3_max_single_part_upload_size; max_connections = settings.s3_max_connections; + check_objects_after_upload = settings.s3_check_objects_after_upload; } void S3Settings::ReadWriteSettings::updateFromSettingsIfEmpty(const Settings & settings) @@ -128,6 +137,7 @@ void S3Settings::ReadWriteSettings::updateFromSettingsIfEmpty(const Settings & s max_single_part_upload_size = settings.s3_max_single_part_upload_size; if (!max_connections) max_connections = settings.s3_max_connections; + check_objects_after_upload = settings.s3_check_objects_after_upload; } } diff --git a/src/Storages/StorageS3Settings.h b/src/Storages/StorageS3Settings.h index 21195fad215..9ef51c77191 100644 --- a/src/Storages/StorageS3Settings.h +++ b/src/Storages/StorageS3Settings.h @@ -60,6 +60,7 @@ struct S3Settings size_t upload_part_size_multiply_parts_count_threshold = 0; size_t max_single_part_upload_size = 0; size_t max_connections = 0; + bool check_objects_after_upload = false; ReadWriteSettings() = default; explicit ReadWriteSettings(const Settings & settings); @@ -71,7 +72,8 @@ struct S3Settings && upload_part_size_multiply_factor == other.upload_part_size_multiply_factor && upload_part_size_multiply_parts_count_threshold == other.upload_part_size_multiply_parts_count_threshold && max_single_part_upload_size == other.max_single_part_upload_size - && max_connections == other.max_connections; + && max_connections == other.max_connections + && check_objects_after_upload == other.check_objects_after_upload; } void updateFromSettingsIfEmpty(const Settings & settings); diff --git a/src/Storages/StorageURL.cpp b/src/Storages/StorageURL.cpp index 8b115f5824e..c3da392b9cd 100644 --- a/src/Storages/StorageURL.cpp +++ b/src/Storages/StorageURL.cpp @@ -71,7 +71,7 @@ IStorageURLBase::IStorageURLBase( ASTPtr partition_by_) : IStorage(table_id_) , uri(uri_) - , compression_method(compression_method_) + , compression_method(chooseCompressionMethod(Poco::URI(uri_).getPath(), compression_method_)) , format_name(format_name_) , format_settings(format_settings_) , headers(headers_) @@ -164,7 +164,7 @@ namespace const ColumnsDescription & columns, UInt64 max_block_size, const ConnectionTimeouts & timeouts, - const String & compression_method, + CompressionMethod compression_method, size_t download_threads, const ReadWriteBufferFromHTTP::HTTPHeaderEntries & headers_ = {}, const URIParams & params = {}, @@ -245,7 +245,7 @@ namespace const String & http_method, std::function callback, const ConnectionTimeouts & timeouts, - const String & compression_method, + CompressionMethod compression_method, Poco::Net::HTTPBasicCredentials & credentials, const ReadWriteBufferFromHTTP::HTTPHeaderEntries & headers, bool glob_url, @@ -354,7 +354,7 @@ namespace std::move(read_buffer_factory), threadPoolCallbackRunner(IOThreadPool::get()), download_threads), - chooseCompressionMethod(request_uri.getPath(), compression_method), + compression_method, settings.zstd_window_log_max); } } @@ -386,7 +386,7 @@ namespace delay_initialization, /* use_external_buffer */ false, /* skip_url_not_found_error */ skip_url_not_found_error), - chooseCompressionMethod(request_uri.getPath(), compression_method), + compression_method, settings.zstd_window_log_max); } catch (...) @@ -566,7 +566,7 @@ std::function IStorageURLBase::getReadPOSTDataCallback( ColumnsDescription IStorageURLBase::getTableStructureFromData( const String & format, const String & uri, - const String & compression_method, + CompressionMethod compression_method, const ReadWriteBufferFromHTTP::HTTPHeaderEntries & headers, const std::optional & format_settings, ContextPtr context) @@ -791,7 +791,7 @@ SinkToStoragePtr IStorageURLBase::write(const ASTPtr & query, const StorageMetad metadata_snapshot->getSampleBlock(), context, ConnectionTimeouts::getHTTPTimeouts(context), - chooseCompressionMethod(uri, compression_method), + compression_method, http_method); } else @@ -803,7 +803,7 @@ SinkToStoragePtr IStorageURLBase::write(const ASTPtr & query, const StorageMetad metadata_snapshot->getSampleBlock(), context, ConnectionTimeouts::getHTTPTimeouts(context), - chooseCompressionMethod(uri, compression_method), + compression_method, http_method); } } @@ -836,7 +836,7 @@ std::optional IStorageURLBase::tryGetColumnsFromCache( return last_mod_time; }; - String cache_key = getKeyForSchemaCache(url, format_name, format_settings, context); + auto cache_key = getKeyForSchemaCache(url, format_name, format_settings, context); auto columns = schema_cache.tryGet(cache_key, get_last_mod_time); if (columns) return columns; @@ -853,7 +853,7 @@ void IStorageURLBase::addColumnsToCache( const ContextPtr & context) { auto & schema_cache = getSchemaCache(context); - Strings cache_keys = getKeysForSchemaCache(urls, format_name, format_settings, context); + auto cache_keys = getKeysForSchemaCache(urls, format_name, format_settings, context); schema_cache.addMany(cache_keys, columns); } @@ -1078,7 +1078,7 @@ URLBasedDataSourceConfiguration StorageURL::getConfiguration(ASTs & args, Contex } if (configuration.format == "auto") - configuration.format = FormatFactory::instance().getFormatFromFileName(configuration.url, true); + configuration.format = FormatFactory::instance().getFormatFromFileName(Poco::URI(configuration.url).getPath(), true); return configuration; } diff --git a/src/Storages/StorageURL.h b/src/Storages/StorageURL.h index fb0d8e8fa43..63c803f2d26 100644 --- a/src/Storages/StorageURL.h +++ b/src/Storages/StorageURL.h @@ -44,7 +44,7 @@ public: static ColumnsDescription getTableStructureFromData( const String & format, const String & uri, - const String & compression_method, + CompressionMethod compression_method, const ReadWriteBufferFromHTTP::HTTPHeaderEntries & headers, const std::optional & format_settings, ContextPtr context); @@ -67,7 +67,7 @@ protected: ASTPtr partition_by = nullptr); String uri; - String compression_method; + CompressionMethod compression_method; String format_name; // For URL engine, we use format settings from server context + `SETTINGS` // clause of the `CREATE` query. In this case, format_settings is set. diff --git a/src/Storages/StorageXDBC.cpp b/src/Storages/StorageXDBC.cpp index 0b7a1ae75d4..aacbb5fa302 100644 --- a/src/Storages/StorageXDBC.cpp +++ b/src/Storages/StorageXDBC.cpp @@ -27,11 +27,11 @@ StorageXDBC::StorageXDBC( const StorageID & table_id_, const std::string & remote_database_name_, const std::string & remote_table_name_, - const ColumnsDescription & columns_, + ColumnsDescription columns_, + ConstraintsDescription constraints_, const String & comment, ContextPtr context_, const BridgeHelperPtr bridge_helper_) - /// Please add support for constraints as soon as StorageODBC or JDBC will support insertion. : IStorageURLBase( "", context_, @@ -39,7 +39,7 @@ StorageXDBC::StorageXDBC( IXDBCBridgeHelper::DEFAULT_FORMAT, getFormatSettings(context_), columns_, - ConstraintsDescription{}, + constraints_, comment, "" /* CompressionMethod */) , bridge_helper(bridge_helper_) @@ -137,7 +137,7 @@ SinkToStoragePtr StorageXDBC::write(const ASTPtr & /* query */, const StorageMet metadata_snapshot->getSampleBlock(), local_context, ConnectionTimeouts::getHTTPTimeouts(local_context), - chooseCompressionMethod(uri, compression_method)); + compression_method); } bool StorageXDBC::supportsSubsetOfColumns() const @@ -179,6 +179,7 @@ namespace checkAndGetLiteralArgument(engine_args[1], "database_name"), checkAndGetLiteralArgument(engine_args[2], "table_name"), args.columns, + args.constraints, args.comment, args.getContext(), bridge_helper); diff --git a/src/Storages/StorageXDBC.h b/src/Storages/StorageXDBC.h index 442db5277e0..a2bb9c15baf 100644 --- a/src/Storages/StorageXDBC.h +++ b/src/Storages/StorageXDBC.h @@ -32,7 +32,8 @@ public: const StorageID & table_id_, const std::string & remote_database_name, const std::string & remote_table_name, - const ColumnsDescription & columns_, + ColumnsDescription columns_, + ConstraintsDescription constraints_, const String & comment, ContextPtr context_, BridgeHelperPtr bridge_helper_); diff --git a/src/Storages/System/StorageSystemContributors.generated.cpp b/src/Storages/System/StorageSystemContributors.generated.cpp index d86a0d4f5df..a5d90655155 100644 --- a/src/Storages/System/StorageSystemContributors.generated.cpp +++ b/src/Storages/System/StorageSystemContributors.generated.cpp @@ -7,6 +7,7 @@ const char * auto_contributors[] { "243f6a88 85a308d3", "243f6a8885a308d313198a2e037", "3ldar-nasyrov", + "546", "7", "821008736@qq.com", "ANDREI STAROVEROV", @@ -15,6 +16,7 @@ const char * auto_contributors[] { "Ahmed Dardery", "Aimiyoo", "Akazz", + "AlPerevyshin", "Alain BERRIER", "Albert Kidrachev", "Alberto", @@ -215,6 +217,7 @@ const char * auto_contributors[] { "DF5HSE", "DIAOZHAFENG", "Dale McDiarmid", + "Dale Mcdiarmid", "Dan Roscigno", "DanRoscigno", "Daniel Bershatsky", @@ -261,6 +264,7 @@ const char * auto_contributors[] { "Dongdong Yang", "DoomzD", "Dr. Strange Looker", + "Duc Canh Le", "DuckSoft", "Egor O'Sten", "Egor Savin", @@ -293,6 +297,7 @@ const char * auto_contributors[] { "Fabiano Francesconi", "Fadi Hadzh", "Fan()", + "Fangyuan Deng", "FawnD2", "Federico Ceratto", "Federico Rodriguez", @@ -349,6 +354,7 @@ const char * auto_contributors[] { "HuFuwang", "Hui Wang", "ILya Limarenko", + "Ignat Loskutov", "Igor", "Igor Hatarist", "Igor Mineev", @@ -412,6 +418,7 @@ const char * auto_contributors[] { "John Skopis", "Jonatas Freitas", "Jordi Villar", + "Josh Taylor", "João Figueiredo", "Julian Gilyadov", "Julian Zhou", @@ -461,10 +468,12 @@ const char * auto_contributors[] { "Leopold Schabel", "Lev Borodin", "Lewinma", + "Li Yin", "Liu Cong", "LiuCong", "LiuYangkuan", "Lopatin Konstantin", + "Lorenzo Mangani", "Loud_Scream", "Lucid Dreams", "Luis Bosque", @@ -477,6 +486,7 @@ const char * auto_contributors[] { "Maksim", "Maksim Fedotov", "Maksim Kita", + "Mallik Hassan", "Malte", "Marat IDRISOV", "Marcelo Rodriguez", @@ -556,6 +566,7 @@ const char * auto_contributors[] { "Mikhail f. Shiryaev", "MikuSugar", "Milad Arabi", + "Mingliang Pan", "Misko Lee", "Mohamad Fadhil", "Mohammad Hossein Sekhavat", @@ -605,6 +616,7 @@ const char * auto_contributors[] { "Nikolay Vasiliev", "Nikolay Volosatov", "Nir Peled", + "Nityananda Gohain", "Niu Zhaojie", "Odin Hultgren Van Der Horst", "Okada Haruki", @@ -738,7 +750,9 @@ const char * auto_contributors[] { "Simon Podlipsky", "Sina", "Sjoerd Mulder", + "SkyhotQin", "Slach", + "Smita Kulkarni", "Snow", "Sofia Antipushina", "Stanislav Pavlovichev", @@ -747,6 +761,7 @@ const char * auto_contributors[] { "Stefan Thies", "Stepan", "Stepan Herold", + "Stephan", "Steve-金勇", "Stig Bakken", "Storozhuk Kostiantyn", @@ -763,6 +778,7 @@ const char * auto_contributors[] { "Tai White", "Taleh Zaliyev", "Tangaev", + "Tanya Bragin", "Tatiana", "Tatiana Kirillova", "Teja", @@ -910,6 +926,8 @@ const char * auto_contributors[] { "alesapin", "alex-zaitsev", "alex.lvxin", + "alexX512", + "alexander goryanets", "alexander kozhikhov", "alexey-milovidov", "alexeypavlenko", @@ -968,6 +986,7 @@ const char * auto_contributors[] { "chertus", "chou.fan", "christophe.kalenzaga", + "clickhouse-robot-curie", "cms", "cmsxbc", "cn-ds", @@ -1197,6 +1216,7 @@ const char * auto_contributors[] { "mwish", "myrrc", "nagorny", + "nathanbegbie", "nauta", "nautaa", "ndchikin", @@ -1233,6 +1253,7 @@ const char * auto_contributors[] { "proller", "pufit", "pyos", + "pzhdfy", "qianlixiang", "qianmoQ", "qieqieplus", @@ -1242,6 +1263,7 @@ const char * auto_contributors[] { "r1j1k", "rainbowsysu", "redclusive", + "renwujie", "rfraposa", "ritaank", "rnbondarenko", @@ -1319,11 +1341,13 @@ const char * auto_contributors[] { "vitstn", "vivarum", "vladimir golovchenko", + "vsrsvas", "vxider", "vzakaznikov", "wangchao", "wangdh15", "weeds085490", + "whysage", "wuxiaobai24", "wzl", "xPoSx", diff --git a/src/Storages/System/StorageSystemDisks.cpp b/src/Storages/System/StorageSystemDisks.cpp index 86238ab8df1..ef2c695d6b7 100644 --- a/src/Storages/System/StorageSystemDisks.cpp +++ b/src/Storages/System/StorageSystemDisks.cpp @@ -23,6 +23,7 @@ StorageSystemDisks::StorageSystemDisks(const StorageID & table_id_) {"total_space", std::make_shared()}, {"keep_free_space", std::make_shared()}, {"type", std::make_shared()}, + {"is_encrypted", std::make_shared()}, {"cache_path", std::make_shared()}, })); setInMemoryMetadata(storage_metadata); @@ -45,6 +46,7 @@ Pipe StorageSystemDisks::read( MutableColumnPtr col_total = ColumnUInt64::create(); MutableColumnPtr col_keep = ColumnUInt64::create(); MutableColumnPtr col_type = ColumnString::create(); + MutableColumnPtr col_is_encrypted = ColumnUInt8::create(); MutableColumnPtr col_cache_path = ColumnString::create(); for (const auto & [disk_name, disk_ptr] : context->getDisksMap()) @@ -54,7 +56,9 @@ Pipe StorageSystemDisks::read( col_free->insert(disk_ptr->getAvailableSpace()); col_total->insert(disk_ptr->getTotalSpace()); col_keep->insert(disk_ptr->getKeepingFreeSpace()); - col_type->insert(toString(disk_ptr->getType())); + auto data_source_description = disk_ptr->getDataSourceDescription(); + col_type->insert(toString(data_source_description.type)); + col_is_encrypted->insert(data_source_description.is_encrypted); String cache_path; if (disk_ptr->supportsCache()) @@ -70,6 +74,7 @@ Pipe StorageSystemDisks::read( res_columns.emplace_back(std::move(col_total)); res_columns.emplace_back(std::move(col_keep)); res_columns.emplace_back(std::move(col_type)); + res_columns.emplace_back(std::move(col_is_encrypted)); res_columns.emplace_back(std::move(col_cache_path)); UInt64 num_rows = res_columns.at(0)->size(); diff --git a/src/Storages/System/StorageSystemFunctions.cpp b/src/Storages/System/StorageSystemFunctions.cpp index e2bc699d3f1..db6b51cb4f1 100644 --- a/src/Storages/System/StorageSystemFunctions.cpp +++ b/src/Storages/System/StorageSystemFunctions.cpp @@ -38,7 +38,13 @@ namespace ErrorCodes namespace { template - void fillRow(MutableColumns & res_columns, const String & name, UInt64 is_aggregate, const String & create_query, FunctionOrigin function_origin, const Factory & f) + void fillRow( + MutableColumns & res_columns, + const String & name, + UInt64 is_aggregate, + const String & create_query, + FunctionOrigin function_origin, + const Factory & factory) { res_columns[0]->insert(name); res_columns[1]->insert(is_aggregate); @@ -50,15 +56,25 @@ namespace } else { - res_columns[2]->insert(f.isCaseInsensitive(name)); - if (f.isAlias(name)) - res_columns[3]->insert(f.aliasTo(name)); + res_columns[2]->insert(factory.isCaseInsensitive(name)); + if (factory.isAlias(name)) + res_columns[3]->insert(factory.aliasTo(name)); else res_columns[3]->insertDefault(); } res_columns[4]->insert(create_query); res_columns[5]->insert(static_cast(function_origin)); + + if constexpr (std::is_same_v) + { + if (factory.isAlias(name)) + res_columns[6]->insertDefault(); + else + res_columns[6]->insert(factory.getDocumentation(name).description); + } + else + res_columns[6]->insertDefault(); } } @@ -79,7 +95,8 @@ NamesAndTypesList StorageSystemFunctions::getNamesAndTypes() {"case_insensitive", std::make_shared()}, {"alias_to", std::make_shared()}, {"create_query", std::make_shared()}, - {"origin", std::make_shared(getOriginEnumsAndValues())} + {"origin", std::make_shared(getOriginEnumsAndValues())}, + {"description", std::make_shared()}, }; } diff --git a/src/Storages/System/StorageSystemSchemaInferenceCache.cpp b/src/Storages/System/StorageSystemSchemaInferenceCache.cpp index 1a32128c7e0..b11fc137d8c 100644 --- a/src/Storages/System/StorageSystemSchemaInferenceCache.cpp +++ b/src/Storages/System/StorageSystemSchemaInferenceCache.cpp @@ -44,16 +44,13 @@ NamesAndTypesList StorageSystemSchemaInferenceCache::getNamesAndTypes() static void fillDataImpl(MutableColumns & res_columns, SchemaCache & schema_cache, const String & storage_name) { auto s3_schema_cache_data = schema_cache.getAll(); - String source; - String format; - String additional_format_info; + for (const auto & [key, schema_info] : s3_schema_cache_data) { - splitSchemaCacheKey(key, source, format, additional_format_info); res_columns[0]->insert(storage_name); - res_columns[1]->insert(source); - res_columns[2]->insert(format); - res_columns[3]->insert(additional_format_info); + res_columns[1]->insert(key.source); + res_columns[2]->insert(key.format); + res_columns[3]->insert(key.additional_format_info); res_columns[4]->insert(schema_info.registration_time); res_columns[5]->insert(getSchemaString(schema_info.columns)); } diff --git a/src/Storages/System/StorageSystemTableFunctions.cpp b/src/Storages/System/StorageSystemTableFunctions.cpp index 2824e1726e9..308cbc5686d 100644 --- a/src/Storages/System/StorageSystemTableFunctions.cpp +++ b/src/Storages/System/StorageSystemTableFunctions.cpp @@ -6,15 +6,21 @@ namespace DB NamesAndTypesList StorageSystemTableFunctions::getNamesAndTypes() { - return {{"name", std::make_shared()}}; + return + { + {"name", std::make_shared()}, + {"description", std::make_shared()} + }; } void StorageSystemTableFunctions::fillData(MutableColumns & res_columns, ContextPtr, const SelectQueryInfo &) const { - const auto & functions_names = TableFunctionFactory::instance().getAllRegisteredNames(); + const auto & factory = TableFunctionFactory::instance(); + const auto & functions_names = factory.getAllRegisteredNames(); for (const auto & function_name : functions_names) { res_columns[0]->insert(function_name); + res_columns[1]->insert(factory.getDocumentation(function_name).description); } } diff --git a/src/Storages/System/StorageSystemTableFunctions.h b/src/Storages/System/StorageSystemTableFunctions.h index 04123e4b1b3..a74e2968731 100644 --- a/src/Storages/System/StorageSystemTableFunctions.h +++ b/src/Storages/System/StorageSystemTableFunctions.h @@ -20,7 +20,6 @@ public: } static NamesAndTypesList getNamesAndTypes(); - }; } diff --git a/src/Storages/tests/gtest_transform_query_for_external_database.cpp b/src/Storages/tests/gtest_transform_query_for_external_database.cpp index 3248f790bdb..131bc2b85e3 100644 --- a/src/Storages/tests/gtest_transform_query_for_external_database.cpp +++ b/src/Storages/tests/gtest_transform_query_for_external_database.cpp @@ -5,6 +5,7 @@ #include #include #include +#include #include #include #include @@ -64,6 +65,9 @@ private: {"create_time", std::make_shared()}, {"field", std::make_shared()}, {"value", std::make_shared()}, + {"a", std::make_shared()}, + {"b", std::make_shared()}, + {"foo", std::make_shared()}, }), TableWithColumnNamesAndTypes( createDBAndTable("table2"), @@ -269,3 +273,12 @@ TEST(TransformQueryForExternalDatabase, Null) "SELECT field FROM table WHERE isNotNull(field)", R"(SELECT "field" FROM "test"."table" WHERE "field" IS NOT NULL)"); } + +TEST(TransformQueryForExternalDatabase, ToDate) +{ + const State & state = State::instance(); + + check(state, 1, + "SELECT foo FROM table WHERE a=10 AND b=toDate('2019-10-05')", + R"(SELECT "a", "b", "foo" FROM "test"."table" WHERE ("a" = 10) AND ("b" = '2019-10-05'))"); +} diff --git a/src/TableFunctions/ITableFunctionXDBC.cpp b/src/TableFunctions/ITableFunctionXDBC.cpp index e9d33acc6de..3d72d98e7ea 100644 --- a/src/TableFunctions/ITableFunctionXDBC.cpp +++ b/src/TableFunctions/ITableFunctionXDBC.cpp @@ -1,9 +1,6 @@ -#include -#include - -#include #include #include +#include #include #include #include @@ -16,10 +13,9 @@ #include #include #include -#include -#include #include "registerTableFunctions.h" + namespace DB { namespace ErrorCodes @@ -33,13 +29,12 @@ void ITableFunctionXDBC::parseArguments(const ASTPtr & ast_function, ContextPtr const auto & args_func = ast_function->as(); if (!args_func.arguments) - throw Exception("Table function '" + getName() + "' must have arguments.", ErrorCodes::LOGICAL_ERROR); + throw Exception(ErrorCodes::LOGICAL_ERROR, "Table function '{}' must have arguments.", getName()); ASTs & args = args_func.arguments->children; if (args.size() != 2 && args.size() != 3) - throw Exception("Table function '" + getName() + "' requires 2 or 3 arguments: " + getName() + "('DSN', table) or " + getName() - + "('DSN', schema, table)", - ErrorCodes::NUMBER_OF_ARGUMENTS_DOESNT_MATCH); + throw Exception(ErrorCodes::NUMBER_OF_ARGUMENTS_DOESNT_MATCH, + "Table function '{0}' requires 2 or 3 arguments: {0}('DSN', table) or {0}('DSN', schema, table)", getName()); for (auto & arg : args) arg = evaluateConstantExpressionOrIdentifierAsLiteral(arg, context); @@ -61,7 +56,6 @@ void ITableFunctionXDBC::startBridgeIfNot(ContextPtr context) const { if (!helper) { - /// Have to const_cast, because bridges store their commands inside context helper = createBridgeHelper(context, context->getSettingsRef().http_receive_timeout.value, connection_string); helper->startBridgeSync(); } @@ -71,16 +65,15 @@ ColumnsDescription ITableFunctionXDBC::getActualTableStructure(ContextPtr contex { startBridgeIfNot(context); - /* Infer external table structure */ + /// Infer external table structure. Poco::URI columns_info_uri = helper->getColumnsInfoURI(); columns_info_uri.addQueryParameter("connection_string", connection_string); if (!schema_name.empty()) columns_info_uri.addQueryParameter("schema", schema_name); columns_info_uri.addQueryParameter("table", remote_table_name); - const auto use_nulls = context->getSettingsRef().external_table_functions_use_nulls; - columns_info_uri.addQueryParameter("external_table_functions_use_nulls", - Poco::NumberFormatter::format(use_nulls)); + bool use_nulls = context->getSettingsRef().external_table_functions_use_nulls; + columns_info_uri.addQueryParameter("external_table_functions_use_nulls", toString(use_nulls)); Poco::Net::HTTPBasicCredentials credentials{}; ReadWriteBufferFromHTTP buf(columns_info_uri, Poco::Net::HTTPRequest::HTTP_POST, {}, ConnectionTimeouts::getHTTPTimeouts(context), credentials); @@ -97,7 +90,7 @@ StoragePtr ITableFunctionXDBC::executeImpl(const ASTPtr & /*ast_function*/, Cont startBridgeIfNot(context); auto columns = getActualTableStructure(context); auto result = std::make_shared( - StorageID(getDatabaseName(), table_name), schema_name, remote_table_name, columns, String{}, context, helper); + StorageID(getDatabaseName(), table_name), schema_name, remote_table_name, columns, ConstraintsDescription{}, String{}, context, helper); result->startup(); return result; } diff --git a/src/TableFunctions/TableFunctionFactory.cpp b/src/TableFunctions/TableFunctionFactory.cpp index 8c2245d4abe..5ed22e39300 100644 --- a/src/TableFunctions/TableFunctionFactory.cpp +++ b/src/TableFunctions/TableFunctionFactory.cpp @@ -17,14 +17,15 @@ namespace ErrorCodes } -void TableFunctionFactory::registerFunction(const std::string & name, Value creator, CaseSensitiveness case_sensitiveness) +void TableFunctionFactory::registerFunction( + const std::string & name, TableFunctionCreator creator, Documentation doc, CaseSensitiveness case_sensitiveness) { - if (!table_functions.emplace(name, creator).second) + if (!table_functions.emplace(name, TableFunctionFactoryData{creator, doc}).second) throw Exception("TableFunctionFactory: the table function name '" + name + "' is not unique", ErrorCodes::LOGICAL_ERROR); if (case_sensitiveness == CaseInsensitive - && !case_insensitive_table_functions.emplace(Poco::toLower(name), creator).second) + && !case_insensitive_table_functions.emplace(Poco::toLower(name), TableFunctionFactoryData{creator, doc}).second) throw Exception("TableFunctionFactory: the case insensitive table function name '" + name + "' is not unique", ErrorCodes::LOGICAL_ERROR); } @@ -57,12 +58,14 @@ TableFunctionPtr TableFunctionFactory::tryGet( auto it = table_functions.find(name); if (table_functions.end() != it) - res = it->second(); + { + res = it->second.first(); + } else { it = case_insensitive_table_functions.find(Poco::toLower(name)); if (case_insensitive_table_functions.end() != it) - res = it->second(); + res = it->second.first(); } if (!res) @@ -83,6 +86,15 @@ bool TableFunctionFactory::isTableFunctionName(const std::string & name) const return table_functions.contains(name); } +Documentation TableFunctionFactory::getDocumentation(const std::string & name) const +{ + auto it = table_functions.find(name); + if (it == table_functions.end()) + throw Exception(ErrorCodes::UNKNOWN_FUNCTION, "Unknown table function {}", name); + + return it->second.second; +} + TableFunctionFactory & TableFunctionFactory::instance() { static TableFunctionFactory ret; diff --git a/src/TableFunctions/TableFunctionFactory.h b/src/TableFunctions/TableFunctionFactory.h index 59b4ffb9fd5..8ff352ff9ac 100644 --- a/src/TableFunctions/TableFunctionFactory.h +++ b/src/TableFunctions/TableFunctionFactory.h @@ -3,7 +3,7 @@ #include #include #include - +#include #include #include @@ -18,26 +18,28 @@ namespace DB class Context; using TableFunctionCreator = std::function; +using TableFunctionFactoryData = std::pair; /** Lets you get a table function by its name. */ -class TableFunctionFactory final: private boost::noncopyable, public IFactoryWithAliases +class TableFunctionFactory final: private boost::noncopyable, public IFactoryWithAliases { public: static TableFunctionFactory & instance(); /// Register a function by its name. /// No locking, you must register all functions before usage of get. - void registerFunction(const std::string & name, Value creator, CaseSensitiveness case_sensitiveness = CaseSensitive); + void registerFunction( + const std::string & name, + TableFunctionCreator creator, + Documentation doc = {}, + CaseSensitiveness case_sensitiveness = CaseSensitive); template - void registerFunction(CaseSensitiveness case_sensitiveness = CaseSensitive) + void registerFunction(Documentation doc = {}, CaseSensitiveness case_sensitiveness = CaseSensitive) { - auto creator = [] () -> TableFunctionPtr - { - return std::make_shared(); - }; - registerFunction(Function::name, std::move(creator), case_sensitiveness); + auto creator = []() -> TableFunctionPtr { return std::make_shared(); }; + registerFunction(Function::name, std::move(creator), std::move(doc), case_sensitiveness); } /// Throws an exception if not found. @@ -46,6 +48,8 @@ public: /// Returns nullptr if not found. TableFunctionPtr tryGet(const std::string & name, ContextPtr context) const; + Documentation getDocumentation(const std::string & name) const; + bool isTableFunctionName(const std::string & name) const; private: diff --git a/src/TableFunctions/TableFunctionFormat.cpp b/src/TableFunctions/TableFunctionFormat.cpp index 0aac91d6095..9f239adb538 100644 --- a/src/TableFunctions/TableFunctionFormat.cpp +++ b/src/TableFunctions/TableFunctionFormat.cpp @@ -91,7 +91,7 @@ StoragePtr TableFunctionFormat::executeImpl(const ASTPtr & /*ast_function*/, Con void registerTableFunctionFormat(TableFunctionFactory & factory) { - factory.registerFunction(TableFunctionFactory::CaseInsensitive); + factory.registerFunction({}, TableFunctionFactory::CaseInsensitive); } } diff --git a/src/TableFunctions/TableFunctionMeiliSearch.h b/src/TableFunctions/TableFunctionMeiliSearch.h index 9634322d0e5..86be944ab12 100644 --- a/src/TableFunctions/TableFunctionMeiliSearch.h +++ b/src/TableFunctions/TableFunctionMeiliSearch.h @@ -4,17 +4,18 @@ namespace DB { + class TableFunctionMeiliSearch : public ITableFunction { public: - static constexpr auto name = "MeiliSearch"; + static constexpr auto name = "meilisearch"; String getName() const override { return name; } private: StoragePtr executeImpl( const ASTPtr & ast_function, ContextPtr context, const String & table_name, ColumnsDescription cached_columns) const override; - const char * getStorageTypeName() const override { return "MeiliSearch"; } + const char * getStorageTypeName() const override { return "meilisearch"; } ColumnsDescription getActualTableStructure(ContextPtr context) const override; void parseArguments(const ASTPtr & ast_function, ContextPtr context) override; diff --git a/src/TableFunctions/TableFunctionURL.cpp b/src/TableFunctions/TableFunctionURL.cpp index f4742792573..bbae0990062 100644 --- a/src/TableFunctions/TableFunctionURL.cpp +++ b/src/TableFunctions/TableFunctionURL.cpp @@ -54,7 +54,7 @@ void TableFunctionURL::parseArguments(const ASTPtr & ast_function, ContextPtr co filename = configuration.url; format = configuration.format; if (format == "auto") - format = FormatFactory::instance().getFormatFromFileName(filename, true); + format = FormatFactory::instance().getFormatFromFileName(Poco::URI(filename).getPath(), true); structure = configuration.structure; compression_method = configuration.compression_method; } @@ -113,11 +113,21 @@ ReadWriteBufferFromHTTP::HTTPHeaderEntries TableFunctionURL::getHeaders() const ColumnsDescription TableFunctionURL::getActualTableStructure(ContextPtr context) const { if (structure == "auto") - return StorageURL::getTableStructureFromData(format, filename, compression_method, getHeaders(), std::nullopt, context); + return StorageURL::getTableStructureFromData(format, + filename, + chooseCompressionMethod(Poco::URI(filename).getPath(), compression_method), + getHeaders(), + std::nullopt, + context); return parseColumnsListFromString(structure, context); } +String TableFunctionURL::getFormatFromFirstArgument() +{ + return FormatFactory::instance().getFormatFromFileName(Poco::URI(filename).getPath(), true); +} + void registerTableFunctionURL(TableFunctionFactory & factory) { factory.registerFunction(); diff --git a/src/TableFunctions/TableFunctionURL.h b/src/TableFunctions/TableFunctionURL.h index 35483b1a04a..9fad42c8d2d 100644 --- a/src/TableFunctions/TableFunctionURL.h +++ b/src/TableFunctions/TableFunctionURL.h @@ -32,6 +32,8 @@ private: const std::string & table_name, const String & compression_method_) const override; const char * getStorageTypeName() const override { return "URL"; } + String getFormatFromFirstArgument() override; + ReadWriteBufferFromHTTP::HTTPHeaderEntries getHeaders() const; URLBasedDataSourceConfiguration configuration; diff --git a/src/TableFunctions/TableFunctionValues.cpp b/src/TableFunctions/TableFunctionValues.cpp index 8d52cff4879..7b8de69a4f8 100644 --- a/src/TableFunctions/TableFunctionValues.cpp +++ b/src/TableFunctions/TableFunctionValues.cpp @@ -147,7 +147,7 @@ StoragePtr TableFunctionValues::executeImpl(const ASTPtr & ast_function, Context void registerTableFunctionValues(TableFunctionFactory & factory) { - factory.registerFunction(TableFunctionFactory::CaseInsensitive); + factory.registerFunction({}, TableFunctionFactory::CaseInsensitive); } } diff --git a/src/TableFunctions/TableFunctionZeros.cpp b/src/TableFunctions/TableFunctionZeros.cpp index 3baa09a65ea..126166b547b 100644 --- a/src/TableFunctions/TableFunctionZeros.cpp +++ b/src/TableFunctions/TableFunctionZeros.cpp @@ -48,8 +48,37 @@ StoragePtr TableFunctionZeros::executeImpl(const ASTPtr & ast_fun void registerTableFunctionZeros(TableFunctionFactory & factory) { - factory.registerFunction>(); - factory.registerFunction>(); + factory.registerFunction>({R"( +Generates a stream of zeros (a table with one column 'zero' of type 'UInt8') of specified size. + +This table function is used in performance tests, where you want to spend as little time as possible to data generation while testing some other parts of queries. + +In contrast to the `zeros_mt`, this table function is using single thread for data generation. + +Example: +[example:1] +This query will test the speed of `randomPrintableASCII` function using single thread. + +See also the `system.zeros` table. +)", +{{"1", "SELECT count() FROM zeros(100000000) WHERE NOT ignore(randomPrintableASCII(10))"}} +}); + + factory.registerFunction>({R"( +Generates a stream of zeros (a table with one column 'zero' of type 'UInt8') of specified size. + +This table function is used in performance tests, where you want to spend as little time as possible to data generation while testing some other parts of queries. + +In contrast to the `zeros`, this table function is using multiple threads for data generation, according to the `max_threads` setting. + +Example: +[example:1] +This query will test the speed of `randomPrintableASCII` function using multiple threads. + +See also the `system.zeros` table. +)", +{{"1", "SELECT count() FROM zeros_mt(1000000000) WHERE NOT ignore(randomPrintableASCII(10))"}} +}); } template diff --git a/tests/ci/build_check.py b/tests/ci/build_check.py index 610877a5508..f58c7a74dfe 100644 --- a/tests/ci/build_check.py +++ b/tests/ci/build_check.py @@ -38,8 +38,6 @@ BUILD_LOG_NAME = "build_log.log" def _can_export_binaries(build_config: BuildConfig) -> bool: if build_config["package_type"] != "deb": return False - if build_config["bundled"] != "bundled": - return False if build_config["libraries"] == "shared": return False if build_config["sanitizer"] != "": @@ -60,8 +58,9 @@ def get_packager_cmd( ) -> str: package_type = build_config["package_type"] comp = build_config["compiler"] + cmake_flags = "-DENABLE_CLICKHOUSE_SELF_EXTRACTING=1" cmd = ( - f"cd {packager_path} && ./packager --output-dir={output_path} " + f"cd {packager_path} && CMAKE_FLAGS='{cmake_flags}' ./packager --output-dir={output_path} " f"--package-type={package_type} --compiler={comp}" ) diff --git a/tests/ci/build_report_check.py b/tests/ci/build_report_check.py index 7ebebb881bc..673b0204864 100644 --- a/tests/ci/build_report_check.py +++ b/tests/ci/build_report_check.py @@ -37,7 +37,6 @@ class BuildResult: compiler, build_type, sanitizer, - bundled, libraries, status, elapsed_seconds, @@ -46,7 +45,6 @@ class BuildResult: self.compiler = compiler self.build_type = build_type self.sanitizer = sanitizer - self.bundled = bundled self.libraries = libraries self.status = status self.elapsed_seconds = elapsed_seconds @@ -91,7 +89,6 @@ def get_failed_report( compiler="unknown", build_type="unknown", sanitizer="unknown", - bundled="unknown", libraries="unknown", status=message, elapsed_seconds=0, @@ -108,7 +105,6 @@ def process_report( compiler=build_config["compiler"], build_type=build_config["build_type"], sanitizer=build_config["sanitizer"], - bundled=build_config["bundled"], libraries=build_config["libraries"], status="success" if build_report["status"] else "failure", elapsed_seconds=build_report["elapsed_seconds"], diff --git a/tests/ci/ci_config.py b/tests/ci/ci_config.py index 4b2cf4df743..61dfb07f762 100644 --- a/tests/ci/ci_config.py +++ b/tests/ci/ci_config.py @@ -13,7 +13,6 @@ CI_CONFIG = { "sanitizer": "", "package_type": "deb", "static_binary_name": "amd64", - "bundled": "bundled", "libraries": "static", "additional_pkgs": True, "tidy": "disable", @@ -24,7 +23,6 @@ CI_CONFIG = { "build_type": "", "sanitizer": "", "package_type": "coverity", - "bundled": "bundled", "libraries": "static", "tidy": "disable", "with_coverage": False, @@ -36,7 +34,6 @@ CI_CONFIG = { "sanitizer": "", "package_type": "deb", "static_binary_name": "aarch64", - "bundled": "bundled", "libraries": "static", "additional_pkgs": True, "tidy": "disable", @@ -47,7 +44,6 @@ CI_CONFIG = { "build_type": "", "sanitizer": "address", "package_type": "deb", - "bundled": "bundled", "libraries": "static", "tidy": "disable", "with_coverage": False, @@ -57,7 +53,6 @@ CI_CONFIG = { "build_type": "", "sanitizer": "undefined", "package_type": "deb", - "bundled": "bundled", "libraries": "static", "tidy": "disable", "with_coverage": False, @@ -67,7 +62,6 @@ CI_CONFIG = { "build_type": "", "sanitizer": "thread", "package_type": "deb", - "bundled": "bundled", "libraries": "static", "tidy": "disable", "with_coverage": False, @@ -77,7 +71,6 @@ CI_CONFIG = { "build_type": "", "sanitizer": "memory", "package_type": "deb", - "bundled": "bundled", "libraries": "static", "tidy": "disable", "with_coverage": False, @@ -87,7 +80,6 @@ CI_CONFIG = { "build_type": "debug", "sanitizer": "", "package_type": "deb", - "bundled": "bundled", "libraries": "static", "tidy": "disable", "with_coverage": False, @@ -97,7 +89,6 @@ CI_CONFIG = { "build_type": "", "sanitizer": "", "package_type": "binary", - "bundled": "bundled", "libraries": "static", "tidy": "disable", "with_coverage": False, @@ -108,7 +99,6 @@ CI_CONFIG = { "sanitizer": "", "package_type": "binary", "static_binary_name": "debug-amd64", - "bundled": "bundled", "libraries": "static", "tidy": "enable", "with_coverage": False, @@ -118,7 +108,6 @@ CI_CONFIG = { "build_type": "", "sanitizer": "", "package_type": "binary", - "bundled": "bundled", "libraries": "shared", "tidy": "disable", "with_coverage": False, @@ -129,7 +118,6 @@ CI_CONFIG = { "sanitizer": "", "package_type": "binary", "static_binary_name": "macos", - "bundled": "bundled", "libraries": "static", "tidy": "disable", "with_coverage": False, @@ -139,7 +127,6 @@ CI_CONFIG = { "build_type": "", "sanitizer": "", "package_type": "binary", - "bundled": "bundled", "libraries": "static", "tidy": "disable", "with_coverage": False, @@ -150,7 +137,6 @@ CI_CONFIG = { "sanitizer": "", "package_type": "binary", "static_binary_name": "freebsd", - "bundled": "bundled", "libraries": "static", "tidy": "disable", "with_coverage": False, @@ -161,7 +147,6 @@ CI_CONFIG = { "sanitizer": "", "package_type": "binary", "static_binary_name": "macos-aarch64", - "bundled": "bundled", "libraries": "static", "tidy": "disable", "with_coverage": False, @@ -172,7 +157,6 @@ CI_CONFIG = { "sanitizer": "", "package_type": "binary", "static_binary_name": "powerpc64le", - "bundled": "bundled", "libraries": "static", "tidy": "disable", "with_coverage": False, diff --git a/tests/ci/performance_comparison_check.py b/tests/ci/performance_comparison_check.py index 40befc78de2..78cf9fad001 100644 --- a/tests/ci/performance_comparison_check.py +++ b/tests/ci/performance_comparison_check.py @@ -141,11 +141,19 @@ if __name__ == "__main__": if not os.path.exists(result_path): os.makedirs(result_path) - docker_env += ( - " -e CLICKHOUSE_PERFORMANCE_COMPARISON_DATABASE_URL" - " -e CLICKHOUSE_PERFORMANCE_COMPARISON_DATABASE_USER" - " -e CLICKHOUSE_PERFORMANCE_COMPARISON_DATABASE_USER_PASSWORD" - ) + database_url = get_parameter_from_ssm("clickhouse-test-stat-url") + database_username = get_parameter_from_ssm("clickhouse-test-stat-login") + database_password = get_parameter_from_ssm("clickhouse-test-stat-password") + + env_extra = { + "CLICKHOUSE_PERFORMANCE_COMPARISON_DATABASE_URL": f"{database_url}:9440", + "CLICKHOUSE_PERFORMANCE_COMPARISON_DATABASE_USER": database_username, + "CLICKHOUSE_PERFORMANCE_COMPARISON_DATABASE_USER_PASSWORD": database_password, + "CLICKHOUSE_PERFORMANCE_COMPARISON_CHECK_NAME": check_name_with_group, + "CLICKHOUSE_PERFORMANCE_COMPARISON_CHECK_NAME_PREFIX": check_name_prefix, + } + + docker_env += "".join([f" -e {name}" for name in env_extra]) run_command = get_run_command( result_path, @@ -158,23 +166,10 @@ if __name__ == "__main__": ) logging.info("Going to run command %s", run_command) - popen_env = os.environ.copy() - - database_url = get_parameter_from_ssm("clickhouse-test-stat-url") - database_username = get_parameter_from_ssm("clickhouse-test-stat-login") - database_password = get_parameter_from_ssm("clickhouse-test-stat-password") - - popen_env.update( - { - "CLICKHOUSE_PERFORMANCE_COMPARISON_DATABASE_URL": f"{database_url}:9440", - "CLICKHOUSE_PERFORMANCE_COMPARISON_DATABASE_USER": database_username, - "CLICKHOUSE_PERFORMANCE_COMPARISON_DATABASE_USER_PASSWORD": database_password, - "CLICKHOUSE_PERFORMANCE_COMPARISON_CHECK_NAME": check_name_with_group, - "CLICKHOUSE_PERFORMANCE_COMPARISON_CHECK_NAME_PREFIX": check_name_prefix, - } - ) - run_log_path = os.path.join(temp_path, "runlog.log") + + popen_env = os.environ.copy() + popen_env.update(env_extra) with TeePopen(run_command, run_log_path, env=popen_env) as process: retcode = process.wait() if retcode == 0: diff --git a/tests/ci/report.py b/tests/ci/report.py index 7d84185b863..a6700f50dfc 100644 --- a/tests/ci/report.py +++ b/tests/ci/report.py @@ -283,7 +283,6 @@ tr:hover td {{filter: brightness(95%);}} Compiler Build type Sanitizer -Bundled Libraries Status Build log @@ -328,7 +327,6 @@ def create_build_html_report( else: row += "{}".format("none") - row += "{}".format(build_result.bundled) row += "{}".format(build_result.libraries) if build_result.status: diff --git a/tests/ci/stress_check.py b/tests/ci/stress_check.py index 497df013cf4..e644eef3bc8 100644 --- a/tests/ci/stress_check.py +++ b/tests/ci/stress_check.py @@ -34,7 +34,7 @@ def get_run_command( # a static link, don't use S3_URL or S3_DOWNLOAD "-e S3_URL='https://s3.amazonaws.com/clickhouse-datasets' " # For dmesg - "--cap-add syslog " + "--privileged " f"--volume={build_path}:/package_folder " f"--volume={result_folder}:/test_output " f"--volume={repo_tests_path}:/usr/share/clickhouse-test " diff --git a/tests/clickhouse-test b/tests/clickhouse-test index 3d62746dc48..f40c93c6f5d 100755 --- a/tests/clickhouse-test +++ b/tests/clickhouse-test @@ -1714,6 +1714,75 @@ def removesuffix(text, *suffixes): return text +def reportCoverageFor(args, what, query, permissive = False): + value = clickhouse_execute(args, query).decode() + + if value != "": + print(f"\nThe following {what} were not covered by tests:\n") + print(value) + print("\n") + return permissive + + return True + + +def reportCoverage(args): + return reportCoverageFor( + args, + "functions", + """ + SELECT name + FROM system.functions + WHERE NOT is_aggregate AND origin = 'System' AND alias_to = '' + AND name NOT IN + ( + SELECT arrayJoin(used_functions) FROM system.query_log WHERE event_date >= yesterday() + ) + ORDER BY name + """, + True + ) and reportCoverageFor( + args, + "aggregate functions", + """ + SELECT name + FROM system.functions + WHERE is_aggregate AND origin = 'System' AND alias_to = '' + AND name NOT IN + ( + SELECT arrayJoin(used_aggregate_functions) FROM system.query_log WHERE event_date >= yesterday() + ) + ORDER BY name + """ + ) and reportCoverageFor( + args, + "aggregate function combinators", + """ + SELECT name + FROM system.aggregate_function_combinators + WHERE NOT is_internal + AND name NOT IN + ( + SELECT arrayJoin(used_aggregate_function_combinators) FROM system.query_log WHERE event_date >= yesterday() + ) + ORDER BY name + """ + ) and reportCoverageFor( + args, + "data type families", + """ + SELECT name + FROM system.data_type_families + WHERE alias_to = '' AND name NOT LIKE 'Interval%' + AND name NOT IN + ( + SELECT arrayJoin(used_data_type_families) FROM system.query_log WHERE event_date >= yesterday() + ) + ORDER BY name + """ + ) + + def main(args): global server_died global stop_time @@ -1845,6 +1914,9 @@ def main(args): else: print("All tests have finished.") + if args.report_coverage and not reportCoverage(args): + exit_code.value = 1 + sys.exit(exit_code.value) @@ -2125,6 +2197,12 @@ if __name__ == "__main__": type=int, help="Max number of failed tests in a row (stop tests if higher)", ) + parser.add_argument( + "--report-coverage", + action="store_true", + default=False, + help="Check what high-level server components were covered by tests", + ) args = parser.parse_args() if args.queries and not os.path.isdir(args.queries): diff --git a/tests/config/config.d/enable_zero_copy_replication.xml b/tests/config/config.d/enable_zero_copy_replication.xml new file mode 100644 index 00000000000..1233f2f7826 --- /dev/null +++ b/tests/config/config.d/enable_zero_copy_replication.xml @@ -0,0 +1,5 @@ + + + 1 + + diff --git a/tests/config/config.d/nlp.xml b/tests/config/config.d/nlp.xml new file mode 100644 index 00000000000..740054674ba --- /dev/null +++ b/tests/config/config.d/nlp.xml @@ -0,0 +1,22 @@ + + + + + en + plain + /etc/clickhouse-server/config.d/ext-en.txt + + + ru + plain + /etc/clickhouse-server/config.d/ext-ru.txt + + + + + + en + /etc/clickhouse-server/config.d/lem-en.bin + + + diff --git a/tests/integration/test_nlp/dictionaries/ext-en.txt b/tests/config/ext-en.txt similarity index 100% rename from tests/integration/test_nlp/dictionaries/ext-en.txt rename to tests/config/ext-en.txt diff --git a/tests/integration/test_nlp/dictionaries/ext-ru.txt b/tests/config/ext-ru.txt similarity index 100% rename from tests/integration/test_nlp/dictionaries/ext-ru.txt rename to tests/config/ext-ru.txt diff --git a/tests/config/install.sh b/tests/config/install.sh index 45af0fbcd07..e27675b8abb 100755 --- a/tests/config/install.sh +++ b/tests/config/install.sh @@ -47,6 +47,8 @@ ln -sf $SRC_PATH/config.d/named_collection.xml $DEST_SERVER_PATH/config.d/ ln -sf $SRC_PATH/config.d/ssl_certs.xml $DEST_SERVER_PATH/config.d/ ln -sf $SRC_PATH/config.d/filesystem_cache_log.xml $DEST_SERVER_PATH/config.d/ ln -sf $SRC_PATH/config.d/session_log.xml $DEST_SERVER_PATH/config.d/ +ln -sf $SRC_PATH/config.d/enable_zero_copy_replication.xml $DEST_SERVER_PATH/config.d/ +ln -sf $SRC_PATH/config.d/nlp.xml $DEST_SERVER_PATH/config.d/ ln -sf $SRC_PATH/users.d/log_queries.xml $DEST_SERVER_PATH/users.d/ ln -sf $SRC_PATH/users.d/readonly.xml $DEST_SERVER_PATH/users.d/ @@ -58,6 +60,7 @@ ln -sf $SRC_PATH/users.d/session_log_test.xml $DEST_SERVER_PATH/users.d/ ln -sf $SRC_PATH/users.d/memory_profiler.xml $DEST_SERVER_PATH/users.d/ ln -sf $SRC_PATH/users.d/no_fsync_metadata.xml $DEST_SERVER_PATH/users.d/ ln -sf $SRC_PATH/users.d/filelog.xml $DEST_SERVER_PATH/users.d/ +ln -sf $SRC_PATH/users.d/enable_blobs_check.xml $DEST_SERVER_PATH/users.d/ # FIXME DataPartsExchange may hang for http_send_timeout seconds # when nobody is going to read from the other side of socket (due to "Fetching of part was cancelled"), @@ -73,6 +76,10 @@ ln -sf $SRC_PATH/test_function.xml $DEST_SERVER_PATH/ ln -sf $SRC_PATH/top_level_domains $DEST_SERVER_PATH/ +ln -sf $SRC_PATH/ext-en.txt $DEST_SERVER_PATH/config.d/ +ln -sf $SRC_PATH/ext-ru.txt $DEST_SERVER_PATH/config.d/ +ln -sf $SRC_PATH/lem-en.bin $DEST_SERVER_PATH/config.d/ + ln -sf $SRC_PATH/server.key $DEST_SERVER_PATH/ ln -sf $SRC_PATH/server.crt $DEST_SERVER_PATH/ ln -sf $SRC_PATH/dhparam.pem $DEST_SERVER_PATH/ diff --git a/tests/integration/test_nlp/dictionaries/lem-en.bin b/tests/config/lem-en.bin similarity index 100% rename from tests/integration/test_nlp/dictionaries/lem-en.bin rename to tests/config/lem-en.bin diff --git a/tests/config/users.d/enable_blobs_check.xml b/tests/config/users.d/enable_blobs_check.xml new file mode 100644 index 00000000000..0877bfcfa9b --- /dev/null +++ b/tests/config/users.d/enable_blobs_check.xml @@ -0,0 +1,7 @@ + + + + 1 + + + diff --git a/tests/integration/test_backup_restore_new/test.py b/tests/integration/test_backup_restore_new/test.py index fc44bf5ee13..2fe3bb99e45 100644 --- a/tests/integration/test_backup_restore_new/test.py +++ b/tests/integration/test_backup_restore_new/test.py @@ -89,6 +89,8 @@ def test_restore_table(engine): assert instance.query("SELECT count(), sum(x) FROM test.table") == "100\t4950\n" instance.query(f"BACKUP TABLE test.table TO {backup_name}") + assert instance.contains_in_log("using native copy") + instance.query("DROP TABLE test.table") assert instance.query("EXISTS test.table") == "0\n" @@ -129,6 +131,8 @@ def test_restore_table_under_another_name(): assert instance.query("SELECT count(), sum(x) FROM test.table") == "100\t4950\n" instance.query(f"BACKUP TABLE test.table TO {backup_name}") + assert instance.contains_in_log("using native copy") + assert instance.query("EXISTS test.table2") == "0\n" instance.query(f"RESTORE TABLE test.table AS test.table2 FROM {backup_name}") @@ -142,6 +146,8 @@ def test_backup_table_under_another_name(): assert instance.query("SELECT count(), sum(x) FROM test.table") == "100\t4950\n" instance.query(f"BACKUP TABLE test.table AS test.table2 TO {backup_name}") + assert instance.contains_in_log("using native copy") + assert instance.query("EXISTS test.table2") == "0\n" instance.query(f"RESTORE TABLE test.table2 FROM {backup_name}") @@ -170,6 +176,8 @@ def test_incremental_backup(): assert instance.query("SELECT count(), sum(x) FROM test.table") == "100\t4950\n" instance.query(f"BACKUP TABLE test.table TO {backup_name}") + assert instance.contains_in_log("using native copy") + instance.query("INSERT INTO test.table VALUES (65, 'a'), (66, 'b')") assert instance.query("SELECT count(), sum(x) FROM test.table") == "102\t5081\n" @@ -244,6 +252,8 @@ def test_file_engine(): assert instance.query("SELECT count(), sum(x) FROM test.table") == "100\t4950\n" instance.query(f"BACKUP TABLE test.table TO {backup_name}") + assert instance.contains_in_log("using native copy") + instance.query("DROP TABLE test.table") assert instance.query("EXISTS test.table") == "0\n" @@ -257,6 +267,9 @@ def test_database(): assert instance.query("SELECT count(), sum(x) FROM test.table") == "100\t4950\n" instance.query(f"BACKUP DATABASE test TO {backup_name}") + + assert instance.contains_in_log("using native copy") + instance.query("DROP DATABASE test") instance.query(f"RESTORE DATABASE test FROM {backup_name}") @@ -269,6 +282,7 @@ def test_zip_archive(): assert instance.query("SELECT count(), sum(x) FROM test.table") == "100\t4950\n" instance.query(f"BACKUP TABLE test.table TO {backup_name}") + assert os.path.isfile(get_path_to_backup(backup_name)) instance.query("DROP TABLE test.table") diff --git a/tests/integration/test_checking_s3_blobs_paranoid/__init__.py b/tests/integration/test_checking_s3_blobs_paranoid/__init__.py new file mode 100644 index 00000000000..e5a0d9b4834 --- /dev/null +++ b/tests/integration/test_checking_s3_blobs_paranoid/__init__.py @@ -0,0 +1 @@ +#!/usr/bin/env python3 diff --git a/tests/integration/test_checking_s3_blobs_paranoid/configs/setting.xml b/tests/integration/test_checking_s3_blobs_paranoid/configs/setting.xml new file mode 100644 index 00000000000..f8749488f57 --- /dev/null +++ b/tests/integration/test_checking_s3_blobs_paranoid/configs/setting.xml @@ -0,0 +1,23 @@ + + + + + + 1 + 1 + + + + + + + + ::/0 + + default + default + + + + + diff --git a/tests/integration/test_checking_s3_blobs_paranoid/configs/storage_conf.xml b/tests/integration/test_checking_s3_blobs_paranoid/configs/storage_conf.xml new file mode 100644 index 00000000000..733205ce3e1 --- /dev/null +++ b/tests/integration/test_checking_s3_blobs_paranoid/configs/storage_conf.xml @@ -0,0 +1,31 @@ + + + + + test + + + + + + s3 + http://minio1:9001/root/data/ + minio + minio123 + + + + + + +
+ s3 +
+
+
+
+
+ + s3 + +
diff --git a/tests/integration/test_checking_s3_blobs_paranoid/test.py b/tests/integration/test_checking_s3_blobs_paranoid/test.py new file mode 100644 index 00000000000..adb56b1899c --- /dev/null +++ b/tests/integration/test_checking_s3_blobs_paranoid/test.py @@ -0,0 +1,52 @@ +#!/usr/bin/env python3 + +import logging +import os +import time + + +from helpers.cluster import ClickHouseCluster +import pytest + + +@pytest.fixture(scope="module") +def cluster(): + try: + cluster = ClickHouseCluster(__file__) + cluster.add_instance( + "node", + main_configs=[ + "configs/storage_conf.xml", + ], + user_configs=[ + "configs/setting.xml", + ], + with_minio=True, + ) + logging.info("Starting cluster...") + cluster.start() + logging.info("Cluster started") + + yield cluster + finally: + cluster.shutdown() + + +def test_paranoid_check_in_logs(cluster): + node = cluster.instances["node"] + + node.query( + """ + CREATE TABLE s3_failover_test ( + id Int64, + data String + ) ENGINE=MergeTree() + ORDER BY id + """ + ) + + node.query("INSERT INTO s3_failover_test VALUES (1, 'Hello')") + + assert node.contains_in_log("exists after upload") + + assert node.query("SELECT * FROM s3_failover_test ORDER BY id") == "1\tHello\n" diff --git a/tests/integration/test_concurrent_backups_s3/test.py b/tests/integration/test_concurrent_backups_s3/test.py index d3ca1fd35a7..81ac293acf9 100644 --- a/tests/integration/test_concurrent_backups_s3/test.py +++ b/tests/integration/test_concurrent_backups_s3/test.py @@ -24,6 +24,7 @@ def start_cluster(): cluster.shutdown() +@pytest.mark.skip(reason="Too flaky :(") def test_concurrent_backups(start_cluster): node.query("DROP TABLE IF EXISTS s3_test NO DELAY") columns = [f"column_{i} UInt64" for i in range(1000)] diff --git a/tests/integration/test_concurrent_threads_soft_limit/configs/config_defined_1.xml b/tests/integration/test_concurrent_threads_soft_limit/configs/config_defined_1.xml index 026563ecd53..5f93f2dc319 100644 --- a/tests/integration/test_concurrent_threads_soft_limit/configs/config_defined_1.xml +++ b/tests/integration/test_concurrent_threads_soft_limit/configs/config_defined_1.xml @@ -1,6 +1,6 @@ - 1 + 1 system query_log
diff --git a/tests/integration/test_concurrent_threads_soft_limit/configs/config_defined_50.xml b/tests/integration/test_concurrent_threads_soft_limit/configs/config_defined_50.xml index 55f1bf32bf6..f3b8c6a03b4 100644 --- a/tests/integration/test_concurrent_threads_soft_limit/configs/config_defined_50.xml +++ b/tests/integration/test_concurrent_threads_soft_limit/configs/config_defined_50.xml @@ -1,6 +1,6 @@ - 50 + 50 system query_log
diff --git a/tests/integration/test_concurrent_threads_soft_limit/configs/config_limit_reached.xml b/tests/integration/test_concurrent_threads_soft_limit/configs/config_limit_reached.xml index c7d86765212..77303cb0c35 100644 --- a/tests/integration/test_concurrent_threads_soft_limit/configs/config_limit_reached.xml +++ b/tests/integration/test_concurrent_threads_soft_limit/configs/config_limit_reached.xml @@ -1,6 +1,6 @@ - 10 + 10 system query_log
diff --git a/tests/integration/test_disk_types/test.py b/tests/integration/test_disk_types/test.py index a26f80165e8..5f7b430d7ef 100644 --- a/tests/integration/test_disk_types/test.py +++ b/tests/integration/test_disk_types/test.py @@ -6,7 +6,7 @@ disk_types = { "disk_s3": "s3", "disk_memory": "memory", "disk_hdfs": "hdfs", - "disk_encrypted": "encrypted", + "disk_encrypted": "s3", } @@ -34,14 +34,30 @@ def test_different_types(cluster): if disk == "": # skip empty line (after split at last position) continue fields = disk.split("\t") - assert len(fields) >= 6 + assert len(fields) >= 7 assert disk_types.get(fields[0], "UNKNOWN") == fields[5] + if "encrypted" in fields[0]: + assert fields[6] == "1" + else: + assert fields[6] == "0" def test_select_by_type(cluster): node = cluster.instances["node"] for name, disk_type in list(disk_types.items()): - assert ( - node.query("SELECT name FROM system.disks WHERE type='" + disk_type + "'") - == name + "\n" - ) + if disk_type != "s3": + assert ( + node.query( + "SELECT name FROM system.disks WHERE type='" + disk_type + "'" + ) + == name + "\n" + ) + else: + assert ( + node.query( + "SELECT name FROM system.disks WHERE type='" + + disk_type + + "' ORDER BY name" + ) + == "disk_encrypted\ndisk_s3\n" + ) diff --git a/tests/integration/test_file_schema_inference_cache/test.py b/tests/integration/test_file_schema_inference_cache/test.py index 30c9a788d6f..b8f6ac51186 100755 --- a/tests/integration/test_file_schema_inference_cache/test.py +++ b/tests/integration/test_file_schema_inference_cache/test.py @@ -27,7 +27,7 @@ def get_profile_event_for_query(node, query, profile_event): query = query.replace("'", "\\'") return int( node.query( - f"select ProfileEvents['{profile_event}'] from system.query_log where query='{query}' and type = 'QueryFinish' order by event_time desc limit 1" + f"select ProfileEvents['{profile_event}'] from system.query_log where query='{query}' and type = 'QueryFinish' order by query_start_time_microseconds desc limit 1" ) ) diff --git a/tests/integration/test_keeper_force_recovery/test.py b/tests/integration/test_keeper_force_recovery/test.py index 3109562f1c3..f3bb0ca56e3 100644 --- a/tests/integration/test_keeper_force_recovery/test.py +++ b/tests/integration/test_keeper_force_recovery/test.py @@ -54,6 +54,7 @@ def get_fake_zk(nodename, timeout=30.0): _fake_zk_instance = KazooClient( hosts=cluster.get_instance_ip(nodename) + ":9181", timeout=timeout, + connection_retry=KazooRetry(max_tries=10), command_retry=KazooRetry(max_tries=10), ) @@ -93,9 +94,9 @@ def wait_nodes(nodes): def wait_and_assert_data(zk, path, data): - while zk.exists(path) is None: + while zk.retry(zk.exists, path) is None: time.sleep(0.1) - assert zk.get(path)[0] == data.encode() + assert zk.retry(zk.get, path)[0] == data.encode() def close_zk(zk): diff --git a/tests/integration/test_mutations_with_merge_tree/test.py b/tests/integration/test_mutations_with_merge_tree/test.py index 7831cde7dea..e7d7abc8c7c 100644 --- a/tests/integration/test_mutations_with_merge_tree/test.py +++ b/tests/integration/test_mutations_with_merge_tree/test.py @@ -27,6 +27,7 @@ def started_cluster(): cluster.shutdown() +@pytest.mark.skip(reason="Skipping mutations in partition does not work") def test_mutations_in_partition_background(started_cluster): try: numbers = 100 @@ -79,6 +80,7 @@ def test_mutations_in_partition_background(started_cluster): instance_test_mutations.query(f"""DROP TABLE {name}""") +@pytest.mark.skip(reason="Skipping mutations in partition does not work") @pytest.mark.parametrize("sync", [("last",), ("all",)]) def test_mutations_in_partition_sync(started_cluster, sync): try: @@ -190,6 +192,7 @@ def test_mutations_with_truncate_table(started_cluster): ) +@pytest.mark.skip(reason="Skipping mutations in partition does not work") def test_mutations_will_not_hang_for_non_existing_parts_sync(started_cluster): try: numbers = 100 @@ -227,6 +230,7 @@ def test_mutations_will_not_hang_for_non_existing_parts_sync(started_cluster): instance_test_mutations.query(f"""DROP TABLE {name}""") +@pytest.mark.skip(reason="Skipping mutations in partition does not work") def test_mutations_will_not_hang_for_non_existing_parts_async(started_cluster): try: numbers = 100 diff --git a/tests/integration/test_mysql_protocol/test.py b/tests/integration/test_mysql_protocol/test.py index 6e61675563f..1338b0b2378 100644 --- a/tests/integration/test_mysql_protocol/test.py +++ b/tests/integration/test_mysql_protocol/test.py @@ -365,7 +365,7 @@ def test_mysql_replacement_query(started_cluster): demux=True, ) assert code == 0 - assert stdout.decode() == "DATABASE()\ndefault\n" + assert stdout.decode() == "currentDatabase()\ndefault\n" code, (stdout, stderr) = started_cluster.mysql_client_container.exec_run( """ @@ -377,7 +377,7 @@ def test_mysql_replacement_query(started_cluster): demux=True, ) assert code == 0 - assert stdout.decode() == "DATABASE()\ndefault\n" + assert stdout.decode() == "currentDatabase()\ndefault\n" def test_mysql_select_user(started_cluster): diff --git a/tests/integration/test_nlp/test.py b/tests/integration/test_nlp/test.py deleted file mode 100644 index e15c9ecfaa6..00000000000 --- a/tests/integration/test_nlp/test.py +++ /dev/null @@ -1,149 +0,0 @@ -import os -import sys - -import pytest - -sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__)))) -SCRIPT_DIR = os.path.dirname(os.path.realpath(__file__)) - -from helpers.cluster import ClickHouseCluster - - -cluster = ClickHouseCluster(__file__) -instance = cluster.add_instance("instance", main_configs=["configs/dicts_config.xml"]) - - -def copy_file_to_container(local_path, dist_path, container_id): - os.system( - "docker cp {local} {cont_id}:{dist}".format( - local=local_path, cont_id=container_id, dist=dist_path - ) - ) - - -@pytest.fixture(scope="module") -def start_cluster(): - try: - cluster.start() - - copy_file_to_container( - os.path.join(SCRIPT_DIR, "dictionaries/."), - "/etc/clickhouse-server/dictionaries", - instance.docker_id, - ) - - yield cluster - finally: - cluster.shutdown() - - -def test_lemmatize(start_cluster): - assert ( - instance.query( - "SELECT lemmatize('en', 'wolves')", - settings={"allow_experimental_nlp_functions": 1}, - ) - == "wolf\n" - ) - assert ( - instance.query( - "SELECT lemmatize('en', 'dogs')", - settings={"allow_experimental_nlp_functions": 1}, - ) - == "dog\n" - ) - assert ( - instance.query( - "SELECT lemmatize('en', 'looking')", - settings={"allow_experimental_nlp_functions": 1}, - ) - == "look\n" - ) - assert ( - instance.query( - "SELECT lemmatize('en', 'took')", - settings={"allow_experimental_nlp_functions": 1}, - ) - == "take\n" - ) - assert ( - instance.query( - "SELECT lemmatize('en', 'imported')", - settings={"allow_experimental_nlp_functions": 1}, - ) - == "import\n" - ) - assert ( - instance.query( - "SELECT lemmatize('en', 'tokenized')", - settings={"allow_experimental_nlp_functions": 1}, - ) - == "tokenize\n" - ) - assert ( - instance.query( - "SELECT lemmatize('en', 'flown')", - settings={"allow_experimental_nlp_functions": 1}, - ) - == "fly\n" - ) - - -def test_synonyms_extensions(start_cluster): - assert ( - instance.query( - "SELECT synonyms('en', 'crucial')", - settings={"allow_experimental_nlp_functions": 1}, - ) - == "['important','big','critical','crucial','essential']\n" - ) - assert ( - instance.query( - "SELECT synonyms('en', 'cheerful')", - settings={"allow_experimental_nlp_functions": 1}, - ) - == "['happy','cheerful','delighted','ecstatic']\n" - ) - assert ( - instance.query( - "SELECT synonyms('en', 'yet')", - settings={"allow_experimental_nlp_functions": 1}, - ) - == "['however','nonetheless','but','yet']\n" - ) - assert ( - instance.query( - "SELECT synonyms('en', 'quiz')", - settings={"allow_experimental_nlp_functions": 1}, - ) - == "['quiz','query','check','exam']\n" - ) - - assert ( - instance.query( - "SELECT synonyms('ru', 'главный')", - settings={"allow_experimental_nlp_functions": 1}, - ) - == "['важный','большой','высокий','хороший','главный']\n" - ) - assert ( - instance.query( - "SELECT synonyms('ru', 'веселый')", - settings={"allow_experimental_nlp_functions": 1}, - ) - == "['веселый','счастливый','живой','яркий','смешной']\n" - ) - assert ( - instance.query( - "SELECT synonyms('ru', 'правда')", - settings={"allow_experimental_nlp_functions": 1}, - ) - == "['хотя','однако','но','правда']\n" - ) - assert ( - instance.query( - "SELECT synonyms('ru', 'экзамен')", - settings={"allow_experimental_nlp_functions": 1}, - ) - == "['экзамен','испытание','проверка']\n" - ) diff --git a/tests/integration/test_no_local_metadata_node/test.py b/tests/integration/test_no_local_metadata_node/test.py deleted file mode 100644 index a4f04035a11..00000000000 --- a/tests/integration/test_no_local_metadata_node/test.py +++ /dev/null @@ -1,56 +0,0 @@ -import pytest - -from helpers.cluster import ClickHouseCluster - -cluster = ClickHouseCluster(__file__) -node1 = cluster.add_instance("node1", with_zookeeper=True) - - -@pytest.fixture(scope="module") -def start_cluster(): - try: - cluster.start() - - yield cluster - finally: - cluster.shutdown() - - -def test_table_start_without_metadata(start_cluster): - node1.query( - """ - CREATE TABLE test (date Date) - ENGINE = ReplicatedMergeTree('/clickhouse/table/test_table', '1') - ORDER BY tuple() - """ - ) - - node1.query("INSERT INTO test VALUES(toDate('2019-12-01'))") - - assert node1.query("SELECT date FROM test") == "2019-12-01\n" - - # some fake alter - node1.query("ALTER TABLE test MODIFY COLUMN date Date DEFAULT toDate('2019-10-01')") - - assert node1.query("SELECT date FROM test") == "2019-12-01\n" - - node1.query("DETACH TABLE test") - zk_cli = cluster.get_kazoo_client("zoo1") - - # simulate update from old version - zk_cli.delete("/clickhouse/table/test_table/replicas/1/metadata") - zk_cli.delete("/clickhouse/table/test_table/replicas/1/metadata_version") - - node1.query("ATTACH TABLE test") - - assert node1.query("SELECT date FROM test") == "2019-12-01\n" - - node1.query("ALTER TABLE test MODIFY COLUMN date Date DEFAULT toDate('2019-09-01')") - - node1.query("DETACH TABLE test") - - zk_cli.set("/clickhouse/table/test_table/replicas/1/metadata", b"") - - node1.query("ATTACH TABLE test") - - assert node1.query("SELECT date FROM test") == "2019-12-01\n" diff --git a/tests/integration/test_replicated_database/configs/config.xml b/tests/integration/test_replicated_database/configs/config.xml index 9d217943bd7..16cd942e975 100644 --- a/tests/integration/test_replicated_database/configs/config.xml +++ b/tests/integration/test_replicated_database/configs/config.xml @@ -1,3 +1,6 @@ 10 + + 10 + diff --git a/tests/integration/test_replicated_database/test.py b/tests/integration/test_replicated_database/test.py index f716fac8508..0cf237d57f3 100644 --- a/tests/integration/test_replicated_database/test.py +++ b/tests/integration/test_replicated_database/test.py @@ -788,23 +788,24 @@ def test_startup_without_zk(started_cluster): main_node.query( "CREATE DATABASE startup ENGINE = Replicated('/clickhouse/databases/startup', 'shard1', 'replica1');" ) - # main_node.query("CREATE TABLE startup.rmt (n int) ENGINE=ReplicatedMergeTree order by n") - main_node.query("CREATE TABLE startup.rmt (n int) ENGINE=MergeTree order by n") + main_node.query( + "CREATE TABLE startup.rmt (n int) ENGINE=ReplicatedMergeTree order by n" + ) + main_node.query("INSERT INTO startup.rmt VALUES (42)") with PartitionManager() as pm: pm.drop_instance_zk_connections(main_node) - main_node.restart_clickhouse(stop_start_wait_sec=30) + main_node.restart_clickhouse(stop_start_wait_sec=60) assert main_node.query("SELECT (*,).1 FROM startup.rmt") == "42\n" - for _ in range(10): - try: - main_node.query("CREATE TABLE startup.m (n int) ENGINE=Memory") - break - except: - time.sleep(1) + # we need to wait until the table is not readonly + main_node.query_with_retry("INSERT INTO startup.rmt VALUES(42)") + + main_node.query_with_retry("CREATE TABLE startup.m (n int) ENGINE=Memory") main_node.query("EXCHANGE TABLES startup.rmt AND startup.m") assert main_node.query("SELECT (*,).1 FROM startup.m") == "42\n" + main_node.query("DROP DATABASE startup SYNC") diff --git a/tests/integration/test_replicated_merge_tree_s3/configs/config.d/storage_conf.xml b/tests/integration/test_replicated_merge_tree_s3/configs/config.d/storage_conf.xml index 92706ff445d..00aa03b1a92 100644 --- a/tests/integration/test_replicated_merge_tree_s3/configs/config.d/storage_conf.xml +++ b/tests/integration/test_replicated_merge_tree_s3/configs/config.d/storage_conf.xml @@ -7,6 +7,12 @@ minio minio123 + + cache + s3 + ./s3_cache/ + 1000000 +
@@ -16,6 +22,13 @@ + + +
+ s3_cache +
+
+
diff --git a/tests/integration/test_replicated_merge_tree_s3/test.py b/tests/integration/test_replicated_merge_tree_s3/test.py index cc85a4eab02..37027d07969 100644 --- a/tests/integration/test_replicated_merge_tree_s3/test.py +++ b/tests/integration/test_replicated_merge_tree_s3/test.py @@ -5,6 +5,8 @@ import string import pytest from helpers.cluster import ClickHouseCluster +TABLE_NAME = "s3_test" + @pytest.fixture(scope="module") def cluster(): @@ -58,8 +60,13 @@ def generate_values(date_str, count, sign=1): def create_table(cluster, additional_settings=None): - create_table_statement = """ - CREATE TABLE s3_test ON CLUSTER cluster( + settings = { + "storage_policy": "s3", + } + settings.update(additional_settings) + + create_table_statement = f""" + CREATE TABLE {TABLE_NAME} ON CLUSTER cluster( dt Date, id Int64, data String, @@ -67,20 +74,42 @@ def create_table(cluster, additional_settings=None): ) ENGINE=ReplicatedMergeTree() PARTITION BY dt ORDER BY (dt, id) - SETTINGS storage_policy='s3' + SETTINGS {",".join((k+"="+repr(v) for k, v in settings.items()))} """ - if additional_settings: - create_table_statement += "," - create_table_statement += additional_settings list(cluster.instances.values())[0].query(create_table_statement) +def insert(cluster, node_idxs, verify=True): + all_values = "" + for node_idx in node_idxs: + node = cluster.instances["node" + str(node_idx)] + values = generate_values("2020-01-0" + str(node_idx), 4096) + node.query( + f"INSERT INTO {TABLE_NAME} VALUES {values}", + settings={"insert_quorum": 3}, + ) + if node_idx != 1: + all_values += "," + all_values += values + + if verify: + for node_idx in node_idxs: + node = cluster.instances["node" + str(node_idx)] + assert ( + node.query( + f"SELECT * FROM {TABLE_NAME} order by dt, id FORMAT Values", + settings={"select_sequential_consistency": 1}, + ) + == all_values + ) + + @pytest.fixture(autouse=True) def drop_table(cluster): yield for node in list(cluster.instances.values()): - node.query("DROP TABLE IF EXISTS s3_test") + node.query(f"DROP TABLE IF EXISTS {TABLE_NAME}") minio = cluster.minio_client # Remove extra objects to prevent tests cascade failing @@ -95,32 +124,39 @@ def drop_table(cluster): def test_insert_select_replicated(cluster, min_rows_for_wide_part, files_per_part): create_table( cluster, - additional_settings="min_rows_for_wide_part={}".format(min_rows_for_wide_part), + additional_settings={"min_rows_for_wide_part": min_rows_for_wide_part}, ) - all_values = "" - for node_idx in range(1, 4): - node = cluster.instances["node" + str(node_idx)] - values = generate_values("2020-01-0" + str(node_idx), 4096) - node.query( - "INSERT INTO s3_test VALUES {}".format(values), - settings={"insert_quorum": 3}, - ) - if node_idx != 1: - all_values += "," - all_values += values - - for node_idx in range(1, 4): - node = cluster.instances["node" + str(node_idx)] - assert ( - node.query( - "SELECT * FROM s3_test order by dt, id FORMAT Values", - settings={"select_sequential_consistency": 1}, - ) - == all_values - ) + insert(cluster, node_idxs=[1, 2, 3], verify=True) minio = cluster.minio_client assert len(list(minio.list_objects(cluster.minio_bucket, "data/"))) == 3 * ( FILES_OVERHEAD + files_per_part * 3 ) + + +def test_drop_cache_on_cluster(cluster): + create_table( + cluster, + additional_settings={"storage_policy": "s3_cache"}, + ) + + insert(cluster, node_idxs=[1, 2, 3], verify=True) + + node1 = cluster.instances["node1"] + node2 = cluster.instances["node2"] + node3 = cluster.instances["node3"] + + node1.query( + f"select * from clusterAllReplicas(cluster, default, {TABLE_NAME}) format Null" + ) + + assert int(node1.query("select count() from system.filesystem_cache")) > 0 + assert int(node2.query("select count() from system.filesystem_cache")) > 0 + assert int(node3.query("select count() from system.filesystem_cache")) > 0 + + node1.query("system drop filesystem cache on cluster cluster") + + assert int(node1.query("select count() from system.filesystem_cache")) == 0 + assert int(node2.query("select count() from system.filesystem_cache")) == 0 + assert int(node3.query("select count() from system.filesystem_cache")) == 0 diff --git a/tests/integration/test_rocksdb_options/test.py b/tests/integration/test_rocksdb_options/test.py index a00d3528eed..a9e12eae4fd 100644 --- a/tests/integration/test_rocksdb_options/test.py +++ b/tests/integration/test_rocksdb_options/test.py @@ -30,6 +30,18 @@ def test_valid_options(start_cluster): DROP TABLE test; """ ) + node.query( + """ + CREATE TABLE test (key UInt64, value String) Engine=EmbeddedRocksDB(0) PRIMARY KEY(key); + DROP TABLE test; + """ + ) + node.query( + """ + CREATE TABLE test (key UInt64, value String) Engine=EmbeddedRocksDB(10) PRIMARY KEY(key); + DROP TABLE test; + """ + ) def test_invalid_options(start_cluster): diff --git a/tests/integration/test_s3_aws_sdk_is_total_garbage/__init__.py b/tests/integration/test_s3_aws_sdk_is_total_garbage/__init__.py new file mode 100644 index 00000000000..e5a0d9b4834 --- /dev/null +++ b/tests/integration/test_s3_aws_sdk_is_total_garbage/__init__.py @@ -0,0 +1 @@ +#!/usr/bin/env python3 diff --git a/tests/integration/test_s3_aws_sdk_is_total_garbage/configs/storage_conf.xml b/tests/integration/test_s3_aws_sdk_is_total_garbage/configs/storage_conf.xml new file mode 100644 index 00000000000..4c5e33a7998 --- /dev/null +++ b/tests/integration/test_s3_aws_sdk_is_total_garbage/configs/storage_conf.xml @@ -0,0 +1,35 @@ + + + test + + + + + + s3 + + http://resolver:8080/root/data/ + minio + minio123 + + true + + 0 + true + + + + + + +
+ s3 +
+
+
+
+
+ + s3 + +
diff --git a/tests/integration/test_s3_aws_sdk_is_total_garbage/configs/upload_min_size.xml b/tests/integration/test_s3_aws_sdk_is_total_garbage/configs/upload_min_size.xml new file mode 100644 index 00000000000..801cec16039 --- /dev/null +++ b/tests/integration/test_s3_aws_sdk_is_total_garbage/configs/upload_min_size.xml @@ -0,0 +1,22 @@ + + + + + 1 + 1 + + + + + + + + ::/0 + + default + default + + + + + diff --git a/tests/integration/test_s3_aws_sdk_is_total_garbage/s3_endpoint/endpoint.py b/tests/integration/test_s3_aws_sdk_is_total_garbage/s3_endpoint/endpoint.py new file mode 100644 index 00000000000..d6a732cc681 --- /dev/null +++ b/tests/integration/test_s3_aws_sdk_is_total_garbage/s3_endpoint/endpoint.py @@ -0,0 +1,36 @@ +#!/usr/bin/env python3 +from bottle import request, route, run, response + +# Handle for MultipleObjectsDelete. +@route("/<_bucket>", ["POST"]) +def delete(_bucket): + response.set_header( + "Location", "http://minio1:9001/" + _bucket + "?" + request.query_string + ) + response.status = 307 + return "Redirected" + + +@route("/<_bucket>/<_path:path>", ["GET", "POST", "PUT", "DELETE"]) +def server(_bucket, _path): + # CompleteMultipartUpload request + # We always returning 200 + error in body to simulate: https://aws.amazon.com/premiumsupport/knowledge-center/s3-resolve-200-internalerror/ + if request.query_string.startswith("uploadId="): + response.status = 200 + response.content_type = "text/xml" + return 'InternalErrorWe encountered an internal error. Please try again.txfbd566d03042474888193-00608d7538' + + response.set_header( + "Location", + "http://minio1:9001/" + _bucket + "/" + _path + "?" + request.query_string, + ) + response.status = 307 + return "Redirected" + + +@route("/") +def ping(): + return "OK" + + +run(host="0.0.0.0", port=8080) diff --git a/tests/integration/test_s3_aws_sdk_is_total_garbage/test.py b/tests/integration/test_s3_aws_sdk_is_total_garbage/test.py new file mode 100644 index 00000000000..c70d7c03a1d --- /dev/null +++ b/tests/integration/test_s3_aws_sdk_is_total_garbage/test.py @@ -0,0 +1,88 @@ +#!/usr/bin/env python3 + +import logging +import os +import time + + +import pytest + +from helpers.client import QueryRuntimeException +from helpers.cluster import ClickHouseCluster + + +# Runs custom python-based S3 endpoint. +def run_endpoint(cluster): + logging.info("Starting custom S3 endpoint") + container_id = cluster.get_container_id("resolver") + current_dir = os.path.dirname(__file__) + cluster.copy_file_to_container( + container_id, + os.path.join(current_dir, "s3_endpoint", "endpoint.py"), + "endpoint.py", + ) + cluster.exec_in_container(container_id, ["python", "endpoint.py"], detach=True) + + # Wait for S3 endpoint start + num_attempts = 100 + for attempt in range(num_attempts): + ping_response = cluster.exec_in_container( + cluster.get_container_id("resolver"), + ["curl", "-s", "http://resolver:8080/"], + nothrow=True, + ) + if ping_response != "OK": + if attempt == num_attempts - 1: + assert ping_response == "OK", 'Expected "OK", but got "{}"'.format( + ping_response + ) + else: + time.sleep(1) + else: + break + + logging.info("S3 endpoint started") + + +@pytest.fixture(scope="module") +def cluster(): + try: + cluster = ClickHouseCluster(__file__) + cluster.add_instance( + "node", + main_configs=[ + "configs/storage_conf.xml", + ], + user_configs=[ + "configs/upload_min_size.xml", + ], + with_minio=True, + ) + logging.info("Starting cluster...") + cluster.start() + logging.info("Cluster started") + + run_endpoint(cluster) + + yield cluster + finally: + cluster.shutdown() + + +def test_dataloss(cluster): + node = cluster.instances["node"] + + node.query( + """ + CREATE TABLE s3_failover_test ( + id Int64, + data String + ) ENGINE=MergeTree() + ORDER BY id + """ + ) + + # Must throw an exception because we use proxy which always fail + # CompleteMultipartUpload requests + with pytest.raises(Exception): + node.query("INSERT INTO s3_failover_test VALUES (1, 'Hello')") diff --git a/tests/integration/test_s3_cluster/configs/cluster.xml b/tests/integration/test_s3_cluster/configs/cluster.xml index 39275e99abd..18f15763633 100644 --- a/tests/integration/test_s3_cluster/configs/cluster.xml +++ b/tests/integration/test_s3_cluster/configs/cluster.xml @@ -20,23 +20,8 @@ - - - - - - s0_0_0 - 9000 - - - s0_0_1 - 9000 - - - - cluster_simple - + \ No newline at end of file diff --git a/tests/integration/test_s3_cluster/test.py b/tests/integration/test_s3_cluster/test.py index 2384aa6e059..2cbb36fcf06 100644 --- a/tests/integration/test_s3_cluster/test.py +++ b/tests/integration/test_s3_cluster/test.py @@ -34,24 +34,10 @@ def started_cluster(): try: cluster = ClickHouseCluster(__file__) cluster.add_instance( - "s0_0_0", - main_configs=["configs/cluster.xml"], - macros={"replica": "node1", "shard": "shard1"}, - with_minio=True, - with_zookeeper=True, - ) - cluster.add_instance( - "s0_0_1", - main_configs=["configs/cluster.xml"], - macros={"replica": "replica2", "shard": "shard1"}, - with_zookeeper=True, - ) - cluster.add_instance( - "s0_1_0", - main_configs=["configs/cluster.xml"], - macros={"replica": "replica1", "shard": "shard2"}, - with_zookeeper=True, + "s0_0_0", main_configs=["configs/cluster.xml"], with_minio=True ) + cluster.add_instance("s0_0_1", main_configs=["configs/cluster.xml"]) + cluster.add_instance("s0_1_0", main_configs=["configs/cluster.xml"]) logging.info("Starting cluster...") cluster.start() @@ -69,17 +55,17 @@ def test_select_all(started_cluster): pure_s3 = node.query( """ SELECT * from s3( - 'http://minio1:9001/root/data/{clickhouse,database}/*', - 'minio', 'minio123', 'CSV', - 'name String, value UInt32, polygon Array(Array(Tuple(Float64, Float64)))') + 'http://minio1:9001/root/data/{clickhouse,database}/*', + 'minio', 'minio123', 'CSV', + 'name String, value UInt32, polygon Array(Array(Tuple(Float64, Float64)))') ORDER BY (name, value, polygon)""" ) # print(pure_s3) s3_distibuted = node.query( """ SELECT * from s3Cluster( - 'cluster_simple', - 'http://minio1:9001/root/data/{clickhouse,database}/*', 'minio', 'minio123', 'CSV', + 'cluster_simple', + 'http://minio1:9001/root/data/{clickhouse,database}/*', 'minio', 'minio123', 'CSV', 'name String, value UInt32, polygon Array(Array(Tuple(Float64, Float64)))') ORDER BY (name, value, polygon)""" ) # print(s3_distibuted) @@ -92,15 +78,15 @@ def test_count(started_cluster): pure_s3 = node.query( """ SELECT count(*) from s3( - 'http://minio1:9001/root/data/{clickhouse,database}/*', - 'minio', 'minio123', 'CSV', + 'http://minio1:9001/root/data/{clickhouse,database}/*', + 'minio', 'minio123', 'CSV', 'name String, value UInt32, polygon Array(Array(Tuple(Float64, Float64)))')""" ) # print(pure_s3) s3_distibuted = node.query( """ SELECT count(*) from s3Cluster( - 'cluster_simple', 'http://minio1:9001/root/data/{clickhouse,database}/*', + 'cluster_simple', 'http://minio1:9001/root/data/{clickhouse,database}/*', 'minio', 'minio123', 'CSV', 'name String, value UInt32, polygon Array(Array(Tuple(Float64, Float64)))')""" ) @@ -139,13 +125,13 @@ def test_union_all(started_cluster): SELECT * FROM ( SELECT * from s3( - 'http://minio1:9001/root/data/{clickhouse,database}/*', - 'minio', 'minio123', 'CSV', - 'name String, value UInt32, polygon Array(Array(Tuple(Float64, Float64)))') + 'http://minio1:9001/root/data/{clickhouse,database}/*', + 'minio', 'minio123', 'CSV', + 'name String, value UInt32, polygon Array(Array(Tuple(Float64, Float64)))') UNION ALL SELECT * from s3( - 'http://minio1:9001/root/data/{clickhouse,database}/*', - 'minio', 'minio123', 'CSV', + 'http://minio1:9001/root/data/{clickhouse,database}/*', + 'minio', 'minio123', 'CSV', 'name String, value UInt32, polygon Array(Array(Tuple(Float64, Float64)))') ) ORDER BY (name, value, polygon) @@ -157,13 +143,13 @@ def test_union_all(started_cluster): SELECT * FROM ( SELECT * from s3Cluster( - 'cluster_simple', - 'http://minio1:9001/root/data/{clickhouse,database}/*', 'minio', 'minio123', 'CSV', + 'cluster_simple', + 'http://minio1:9001/root/data/{clickhouse,database}/*', 'minio', 'minio123', 'CSV', 'name String, value UInt32, polygon Array(Array(Tuple(Float64, Float64)))') UNION ALL SELECT * from s3Cluster( - 'cluster_simple', - 'http://minio1:9001/root/data/{clickhouse,database}/*', 'minio', 'minio123', 'CSV', + 'cluster_simple', + 'http://minio1:9001/root/data/{clickhouse,database}/*', 'minio', 'minio123', 'CSV', 'name String, value UInt32, polygon Array(Array(Tuple(Float64, Float64)))') ) ORDER BY (name, value, polygon) @@ -180,12 +166,12 @@ def test_wrong_cluster(started_cluster): """ SELECT count(*) from s3Cluster( 'non_existent_cluster', - 'http://minio1:9001/root/data/{clickhouse,database}/*', + 'http://minio1:9001/root/data/{clickhouse,database}/*', 'minio', 'minio123', 'CSV', 'name String, value UInt32, polygon Array(Array(Tuple(Float64, Float64)))') UNION ALL SELECT count(*) from s3Cluster( 'non_existent_cluster', - 'http://minio1:9001/root/data/{clickhouse,database}/*', + 'http://minio1:9001/root/data/{clickhouse,database}/*', 'minio', 'minio123', 'CSV', 'name String, value UInt32, polygon Array(Array(Tuple(Float64, Float64)))') """ ) @@ -198,137 +184,14 @@ def test_ambiguous_join(started_cluster): result = node.query( """ SELECT l.name, r.value from s3Cluster( - 'cluster_simple', - 'http://minio1:9001/root/data/{clickhouse,database}/*', 'minio', 'minio123', 'CSV', + 'cluster_simple', + 'http://minio1:9001/root/data/{clickhouse,database}/*', 'minio', 'minio123', 'CSV', 'name String, value UInt32, polygon Array(Array(Tuple(Float64, Float64)))') as l JOIN s3Cluster( - 'cluster_simple', - 'http://minio1:9001/root/data/{clickhouse,database}/*', 'minio', 'minio123', 'CSV', + 'cluster_simple', + 'http://minio1:9001/root/data/{clickhouse,database}/*', 'minio', 'minio123', 'CSV', 'name String, value UInt32, polygon Array(Array(Tuple(Float64, Float64)))') as r ON l.name = r.name """ ) assert "AMBIGUOUS_COLUMN_NAME" not in result - - -def test_distributed_insert_select(started_cluster): - first_replica_first_shard = started_cluster.instances["s0_0_0"] - second_replica_first_shard = started_cluster.instances["s0_0_1"] - first_replica_second_shard = started_cluster.instances["s0_1_0"] - - first_replica_first_shard.query( - """ - CREATE TABLE insert_select_local ON CLUSTER 'cluster_simple' (a String, b UInt64) - ENGINE=ReplicatedMergeTree('/clickhouse/tables/{shard}/insert_select', '{replica}') - ORDER BY (a, b); - """ - ) - - first_replica_first_shard.query( - """ - CREATE TABLE insert_select_distributed ON CLUSTER 'cluster_simple' as insert_select_local - ENGINE = Distributed('cluster_simple', default, insert_select_local, b % 2); - """ - ) - - for file_number in range(100): - first_replica_first_shard.query( - """ - INSERT INTO TABLE FUNCTION s3('http://minio1:9001/root/data/generated/file_{}.csv', 'minio', 'minio123', 'CSV','a String, b UInt64') - SELECT repeat('{}', 10), number from numbers(100); - """.format( - file_number, file_number - ) - ) - - first_replica_first_shard.query( - """ - INSERT INTO insert_select_distributed SELECT * FROM s3Cluster( - 'cluster_simple', - 'http://minio1:9001/root/data/generated/*.csv', 'minio', 'minio123', 'CSV','a String, b UInt64' - ) SETTINGS parallel_distributed_insert_select=1; - """ - ) - - for line in ( - first_replica_first_shard.query("""SELECT * FROM insert_select_local;""") - .strip() - .split("\n") - ): - _, b = line.split() - assert int(b) % 2 == 0 - - for line in ( - second_replica_first_shard.query("""SELECT * FROM insert_select_local;""") - .strip() - .split("\n") - ): - _, b = line.split() - assert int(b) % 2 == 0 - - for line in ( - first_replica_second_shard.query("""SELECT * FROM insert_select_local;""") - .strip() - .split("\n") - ): - _, b = line.split() - assert int(b) % 2 == 1 - - -def test_distributed_insert_select_with_replicated(started_cluster): - first_replica_first_shard = started_cluster.instances["s0_0_0"] - second_replica_first_shard = started_cluster.instances["s0_0_1"] - - first_replica_first_shard.query( - """ - CREATE TABLE insert_select_replicated_local ON CLUSTER 'first_shard' (a String, b UInt64) - ENGINE=ReplicatedMergeTree('/clickhouse/tables/{shard}/insert_select_with_replicated', '{replica}') - ORDER BY (a, b); - """ - ) - - for replica in [first_replica_first_shard, second_replica_first_shard]: - replica.query( - """ - SYSTEM STOP FETCHES; - """ - ) - replica.query( - """ - SYSTEM STOP MERGES; - """ - ) - - for file_number in range(100): - first_replica_first_shard.query( - """ - INSERT INTO TABLE FUNCTION s3('http://minio1:9001/root/data/generated_replicated/file_{}.csv', 'minio', 'minio123', 'CSV','a String, b UInt64') - SELECT repeat('{}', 10), number from numbers(100); - """.format( - file_number, file_number - ) - ) - - first_replica_first_shard.query( - """ - INSERT INTO insert_select_replicated_local SELECT * FROM s3Cluster( - 'first_shard', - 'http://minio1:9001/root/data/generated_replicated/*.csv', 'minio', 'minio123', 'CSV','a String, b UInt64' - ) SETTINGS parallel_distributed_insert_select=1; - """ - ) - - first = int( - first_replica_first_shard.query( - """SELECT count(*) FROM insert_select_replicated_local""" - ).strip() - ) - second = int( - second_replica_first_shard.query( - """SELECT count(*) FROM insert_select_replicated_local""" - ).strip() - ) - - assert first != 0 - assert second != 0 - assert first + second == 100 * 100 diff --git a/tests/integration/test_s3_table_functions/__init__.py b/tests/integration/test_s3_table_functions/__init__.py new file mode 100644 index 00000000000..e5a0d9b4834 --- /dev/null +++ b/tests/integration/test_s3_table_functions/__init__.py @@ -0,0 +1 @@ +#!/usr/bin/env python3 diff --git a/tests/integration/test_s3_table_functions/configs/config.d/minio.xml b/tests/integration/test_s3_table_functions/configs/config.d/minio.xml new file mode 100644 index 00000000000..62fae02306d --- /dev/null +++ b/tests/integration/test_s3_table_functions/configs/config.d/minio.xml @@ -0,0 +1,12 @@ + + + + + + + http://minio1:9001/root/data/ + minio + minio123 + + + diff --git a/tests/integration/test_s3_table_functions/test.py b/tests/integration/test_s3_table_functions/test.py new file mode 100644 index 00000000000..516d6582990 --- /dev/null +++ b/tests/integration/test_s3_table_functions/test.py @@ -0,0 +1,97 @@ +import logging +import pytest +from helpers.cluster import ClickHouseCluster +from helpers.network import PartitionManager +from helpers.client import QueryRuntimeException + + +cluster = ClickHouseCluster(__file__) +node = cluster.add_instance( + "node", + main_configs=[ + "configs/config.d/minio.xml", + ], + with_minio=True, +) + +settings = { + "s3_max_connections": "1", + "max_insert_threads": "1", + "s3_truncate_on_insert": "1", + "s3_min_upload_part_size": "33554432", +} + + +@pytest.fixture(scope="module") +def started_cluster(): + try: + logging.info("Starting cluster...") + cluster.start() + logging.info("Cluster started") + + yield cluster + finally: + logging.info("Stopping cluster") + cluster.shutdown() + logging.info("Cluster stopped") + + +def test_s3_table_functions(started_cluster): + """ + Simple test to check s3 table function functionalities + """ + node.query( + """ + INSERT INTO FUNCTION s3 + ( + nc_s3, + filename = 'test_file.tsv.gz', + format = 'TSV', + structure = 'number UInt64', + compression_method = 'gz' + ) + SELECT * FROM numbers(1000000) + """, + settings=settings, + ) + + assert ( + node.query( + """ + SELECT count(*) FROM s3 + ( + nc_s3, + filename = 'test_file.tsv.gz', + format = 'TSV', + structure = 'number UInt64', + compression_method = 'gz' + ); + """ + ) + == "1000000\n" + ) + + +def test_s3_table_functions_timeouts(started_cluster): + """ + Test with timeout limit of 1200ms. + This should raise an Exception and pass. + """ + with PartitionManager() as pm: + pm.add_network_delay(node, 1200) + + with pytest.raises(QueryRuntimeException): + node.query( + """ + INSERT INTO FUNCTION s3 + ( + nc_s3, + filename = 'test_file.tsv.gz', + format = 'TSV', + structure = 'number UInt64', + compression_method = 'gz' + ) + SELECT * FROM numbers(1000000) + """, + settings=settings, + ) diff --git a/tests/integration/test_storage_hdfs/test.py b/tests/integration/test_storage_hdfs/test.py index 86fb5ab578c..34243e4b58d 100644 --- a/tests/integration/test_storage_hdfs/test.py +++ b/tests/integration/test_storage_hdfs/test.py @@ -636,7 +636,7 @@ def get_profile_event_for_query(node, query, profile_event): query = query.replace("'", "\\'") return int( node.query( - f"select ProfileEvents['{profile_event}'] from system.query_log where query='{query}' and type = 'QueryFinish' order by event_time desc limit 1" + f"select ProfileEvents['{profile_event}'] from system.query_log where query='{query}' and type = 'QueryFinish' order by query_start_time_microseconds desc limit 1" ) ) diff --git a/tests/integration/test_storage_kafka/test.py b/tests/integration/test_storage_kafka/test.py index 166ba773e27..46bf7b0b3a0 100644 --- a/tests/integration/test_storage_kafka/test.py +++ b/tests/integration/test_storage_kafka/test.py @@ -4102,6 +4102,31 @@ def test_issue26643(kafka_cluster): # kafka_cluster.open_bash_shell('instance') +def test_num_consumers_limit(kafka_cluster): + instance.query("DROP TABLE IF EXISTS test.kafka") + + error = instance.query_and_get_error( + """ + CREATE TABLE test.kafka (key UInt64, value UInt64) + ENGINE = Kafka('{kafka_broker}:19092', '{kafka_topic_old}', '{kafka_group_name_old}', '{kafka_format_json_each_row}', '\\n', '', 100) + SETTINGS kafka_commit_on_select = 1; + """ + ) + + assert "BAD_ARGUMENTS" in error + + instance.query( + """ + SET kafka_disable_num_consumers_limit = 1; + CREATE TABLE test.kafka (key UInt64, value UInt64) + ENGINE = Kafka('{kafka_broker}:19092', '{kafka_topic_old}', '{kafka_group_name_old}', '{kafka_format_json_each_row}', '\\n', '', 100) + SETTINGS kafka_commit_on_select = 1; + """ + ) + + instance.query("DROP TABLE test.kafka") + + if __name__ == "__main__": cluster.start() input("Cluster created, press any key to destroy...") diff --git a/tests/integration/test_storage_meilisearch/test.py b/tests/integration/test_storage_meilisearch/test.py index b420c28a37e..fd11644cf31 100644 --- a/tests/integration/test_storage_meilisearch/test.py +++ b/tests/integration/test_storage_meilisearch/test.py @@ -651,19 +651,19 @@ def test_table_function(started_cluster): assert ( node.query( - "SELECT COUNT() FROM MeiliSearch('http://meili1:7700', 'new_table', '')" + "SELECT COUNT() FROM meilisearch('http://meili1:7700', 'new_table', '')" ) == "100\n" ) assert ( node.query( - "SELECT sum(id) FROM MeiliSearch('http://meili1:7700', 'new_table', '')" + "SELECT sum(id) FROM meilisearch('http://meili1:7700', 'new_table', '')" ) == str(sum(range(0, 100))) + "\n" ) assert ( node.query( - "SELECT data FROM MeiliSearch('http://meili1:7700', 'new_table', '') WHERE id = 42" + "SELECT data FROM meilisearch('http://meili1:7700', 'new_table', '') WHERE id = 42" ) == hex(42 * 42) + "\n" ) @@ -685,35 +685,35 @@ def test_table_function_secure(started_cluster): assert ( node.query( - "SELECT COUNT() FROM MeiliSearch('http://meili_secure:7700', 'new_table', 'password')" + "SELECT COUNT() FROM meilisearch('http://meili_secure:7700', 'new_table', 'password')" ) == "100\n" ) assert ( node.query( - "SELECT sum(id) FROM MeiliSearch('http://meili_secure:7700', 'new_table', 'password')" + "SELECT sum(id) FROM meilisearch('http://meili_secure:7700', 'new_table', 'password')" ) == str(sum(range(0, 100))) + "\n" ) assert ( node.query( - "SELECT data FROM MeiliSearch('http://meili_secure:7700', 'new_table', 'password') WHERE id = 42" + "SELECT data FROM meilisearch('http://meili_secure:7700', 'new_table', 'password') WHERE id = 42" ) == hex(42 * 42) + "\n" ) error = node.query_and_get_error( - "SELECT COUNT() FROM MeiliSearch('http://meili_secure:7700', 'new_table', 'wrong_password')" + "SELECT COUNT() FROM meilisearch('http://meili_secure:7700', 'new_table', 'wrong_password')" ) assert "MEILISEARCH_EXCEPTION" in error error = node.query_and_get_error( - "SELECT sum(id) FROM MeiliSearch('http://meili_secure:7700', 'new_table', 'wrong_password')" + "SELECT sum(id) FROM meilisearch('http://meili_secure:7700', 'new_table', 'wrong_password')" ) assert "MEILISEARCH_EXCEPTION" in error error = node.query_and_get_error( - "SELECT data FROM MeiliSearch('http://meili_secure:7700', 'new_table', 'wrong_password') WHERE id = 42" + "SELECT data FROM meilisearch('http://meili_secure:7700', 'new_table', 'wrong_password') WHERE id = 42" ) assert "MEILISEARCH_EXCEPTION" in error @@ -751,103 +751,103 @@ def test_types_in_table_function(started_cluster): assert ( node.query( - "SELECT id FROM MeiliSearch('http://meili1:7700', 'types_table', '')" + "SELECT id FROM meilisearch('http://meili1:7700', 'types_table', '')" ) == "1\n" ) assert ( node.query( - "SELECT UInt8_test FROM MeiliSearch('http://meili1:7700', 'types_table', '')" + "SELECT UInt8_test FROM meilisearch('http://meili1:7700', 'types_table', '')" ) == "128\n" ) assert ( node.query( - "SELECT UInt16_test FROM MeiliSearch('http://meili1:7700', 'types_table', '')" + "SELECT UInt16_test FROM meilisearch('http://meili1:7700', 'types_table', '')" ) == "32768\n" ) assert ( node.query( - "SELECT UInt32_test FROM MeiliSearch('http://meili1:7700', 'types_table', '')" + "SELECT UInt32_test FROM meilisearch('http://meili1:7700', 'types_table', '')" ) == "2147483648\n" ) assert ( node.query( - "SELECT Int8_test FROM MeiliSearch('http://meili1:7700', 'types_table', '')" + "SELECT Int8_test FROM meilisearch('http://meili1:7700', 'types_table', '')" ) == "-128\n" ) assert ( node.query( - "SELECT Int16_test FROM MeiliSearch('http://meili1:7700', 'types_table', '')" + "SELECT Int16_test FROM meilisearch('http://meili1:7700', 'types_table', '')" ) == "-32768\n" ) assert ( node.query( - "SELECT Int32_test FROM MeiliSearch('http://meili1:7700', 'types_table', '')" + "SELECT Int32_test FROM meilisearch('http://meili1:7700', 'types_table', '')" ) == "-2147483648\n" ) assert ( node.query( - "SELECT Int64_test FROM MeiliSearch('http://meili1:7700', 'types_table', '')" + "SELECT Int64_test FROM meilisearch('http://meili1:7700', 'types_table', '')" ) == "-9223372036854775808\n" ) assert ( node.query( - "SELECT String_test FROM MeiliSearch('http://meili1:7700', 'types_table', '')" + "SELECT String_test FROM meilisearch('http://meili1:7700', 'types_table', '')" ) == "abacaba\n" ) assert ( node.query( - "SELECT Float32_test FROM MeiliSearch('http://meili1:7700', 'types_table', '')" + "SELECT Float32_test FROM meilisearch('http://meili1:7700', 'types_table', '')" ) == "42.42\n" ) assert ( node.query( - "SELECT Float32_test FROM MeiliSearch('http://meili1:7700', 'types_table', '')" + "SELECT Float32_test FROM meilisearch('http://meili1:7700', 'types_table', '')" ) == "42.42\n" ) assert ( node.query( - "SELECT Array_test FROM MeiliSearch('http://meili1:7700', 'types_table', '')" + "SELECT Array_test FROM meilisearch('http://meili1:7700', 'types_table', '')" ) == "[['aba','caba'],['2d','array']]\n" ) assert ( node.query( - "SELECT Null_test1 FROM MeiliSearch('http://meili1:7700', 'types_table', '')" + "SELECT Null_test1 FROM meilisearch('http://meili1:7700', 'types_table', '')" ) == "value\n" ) assert ( node.query( - "SELECT Null_test2 FROM MeiliSearch('http://meili1:7700', 'types_table', '')" + "SELECT Null_test2 FROM meilisearch('http://meili1:7700', 'types_table', '')" ) == "NULL\n" ) assert ( node.query( - "SELECT Bool_test1 FROM MeiliSearch('http://meili1:7700', 'types_table', '')" + "SELECT Bool_test1 FROM meilisearch('http://meili1:7700', 'types_table', '')" ) == "1\n" ) assert ( node.query( - "SELECT Bool_test2 FROM MeiliSearch('http://meili1:7700', 'types_table', '')" + "SELECT Bool_test2 FROM meilisearch('http://meili1:7700', 'types_table', '')" ) == "0\n" ) assert ( node.query( - "SELECT Json_test FROM MeiliSearch('http://meili1:7700', 'types_table', '')" + "SELECT Json_test FROM meilisearch('http://meili1:7700', 'types_table', '')" ) == '{"a":1,"b":{"in_json":"qwerty"}}\n' ) diff --git a/tests/integration/test_storage_s3/test.py b/tests/integration/test_storage_s3/test.py index 0eb3fbf4ca7..67857437a45 100644 --- a/tests/integration/test_storage_s3/test.py +++ b/tests/integration/test_storage_s3/test.py @@ -1478,52 +1478,46 @@ def test_wrong_format_usage(started_cluster): instance = started_cluster.instances["dummy"] instance.query( - f"insert into function s3('http://{started_cluster.minio_host}:{started_cluster.minio_port}/{bucket}/test_wrong_format.native') select * from numbers(10)" + f"insert into function s3('http://{started_cluster.minio_host}:{started_cluster.minio_port}/{bucket}/test_wrong_format.native') select * from numbers(10e6)" ) + # size(test_wrong_format.native) = 10e6*8+16(header) ~= 76MiB + # ensure that not all file will be loaded into memory result = instance.query_and_get_error( - f"desc s3('http://{started_cluster.minio_host}:{started_cluster.minio_port}/{bucket}/test_wrong_format.native', 'Parquet') settings input_format_allow_seeks=0, max_memory_usage=1000" + f"desc s3('http://{started_cluster.minio_host}:{started_cluster.minio_port}/{bucket}/test_wrong_format.native', 'Parquet') settings input_format_allow_seeks=0, max_memory_usage='10Mi'" ) assert "Not a Parquet file" in result -def get_profile_event_for_query(instance, query, profile_event): +def check_profile_event_for_query(instance, query, profile_event, amount): instance.query("system flush logs") - time.sleep(0.5) query = query.replace("'", "\\'") - return int( + res = int( instance.query( - f"select ProfileEvents['{profile_event}'] from system.query_log where query='{query}' and type = 'QueryFinish' order by event_time desc limit 1" + f"select ProfileEvents['{profile_event}'] from system.query_log where query='{query}' and type = 'QueryFinish' order by query_start_time_microseconds desc limit 1" ) ) + assert res == amount + def check_cache_misses(instance, file, storage_name, started_cluster, bucket, amount=1): query = f"desc {storage_name}('http://{started_cluster.minio_host}:{started_cluster.minio_port}/{bucket}/{file}')" - assert ( - get_profile_event_for_query(instance, query, "SchemaInferenceCacheMisses") - == amount - ) + check_profile_event_for_query(instance, query, "SchemaInferenceCacheMisses", amount) def check_cache_hits(instance, file, storage_name, started_cluster, bucket, amount=1): query = f"desc {storage_name}('http://{started_cluster.minio_host}:{started_cluster.minio_port}/{bucket}/{file}')" - assert ( - get_profile_event_for_query(instance, query, "SchemaInferenceCacheHits") - == amount - ) + check_profile_event_for_query(instance, query, "SchemaInferenceCacheHits", amount) def check_cache_invalidations( instance, file, storage_name, started_cluster, bucket, amount=1 ): query = f"desc {storage_name}('http://{started_cluster.minio_host}:{started_cluster.minio_port}/{bucket}/{file}')" - assert ( - get_profile_event_for_query( - instance, query, "SchemaInferenceCacheInvalidations" - ) - == amount + check_profile_event_for_query( + instance, query, "SchemaInferenceCacheInvalidations", amount ) @@ -1531,9 +1525,8 @@ def check_cache_evictions( instance, file, storage_name, started_cluster, bucket, amount=1 ): query = f"desc {storage_name}('http://{started_cluster.minio_host}:{started_cluster.minio_port}/{bucket}/{file}')" - assert ( - get_profile_event_for_query(instance, query, "SchemaInferenceCacheEvictions") - == amount + check_profile_event_for_query( + instance, query, "SchemaInferenceCacheEvictions", amount ) diff --git a/tests/integration/test_transactions/test.py b/tests/integration/test_transactions/test.py index 2dfdc889856..daa4c287982 100644 --- a/tests/integration/test_transactions/test.py +++ b/tests/integration/test_transactions/test.py @@ -24,7 +24,7 @@ def tx(session, query): return node.http_query(None, data=query, params=params) -def test_rollback_unfinished_on_restart(start_cluster): +def test_rollback_unfinished_on_restart1(start_cluster): node.query( "create table mt (n int, m int) engine=MergeTree order by n partition by n % 2" ) @@ -67,14 +67,6 @@ def test_rollback_unfinished_on_restart(start_cluster): tx(1, "insert into mt values (5, 50)") tx(1, "alter table mt update m = m+n in partition id '1' where 1") - # check that uncommitted merge will be rolled back on restart - tx(2, "begin transaction") - tid4 = tx(2, "select transactionID()").strip() - tx( - 2, - "optimize table mt partition id '0' final settings optimize_throw_if_noop = 1", - ) - # check that uncommitted insert will be rolled back on restart tx(3, "begin transaction") tid5 = tx(3, "select transactionID()").strip() @@ -102,14 +94,14 @@ def test_rollback_unfinished_on_restart(start_cluster): res = res.replace(tid1, "tid1").replace("csn" + csn1 + "_", "csn_1") res = res.replace(tid2, "tid2").replace("csn" + csn2 + "_", "csn_2") res = res.replace(tid3, "tid3") - res = res.replace(tid4, "tid4") res = res.replace(tid5, "tid5") res = res.replace(tid6, "tid6").replace("csn" + csn6 + "_", "csn_6") assert ( res == "0_2_2_0\t1\ttid0\tcsn1_\t(0,0,'00000000-0000-0000-0000-000000000000')\tcsn0_\n" - "0_2_4_1\t0\ttid4\tcsn18446744073709551615_\ttid0\tcsn0_\n" + "0_2_2_0_7\t0\ttid3\tcsn18446744073709551615_\ttid0\tcsn0_\n" "0_4_4_0\t1\ttid2\tcsn_2\t(0,0,'00000000-0000-0000-0000-000000000000')\tcsn0_\n" + "0_4_4_0_7\t0\ttid3\tcsn18446744073709551615_\ttid0\tcsn0_\n" "0_8_8_0\t0\ttid5\tcsn18446744073709551615_\ttid0\tcsn0_\n" "1_1_1_0\t0\ttid0\tcsn1_\ttid1\tcsn_1\n" "1_3_3_0\t1\ttid2\tcsn_2\t(0,0,'00000000-0000-0000-0000-000000000000')\tcsn0_\n" @@ -118,3 +110,85 @@ def test_rollback_unfinished_on_restart(start_cluster): "1_6_6_0\t0\ttid3\tcsn18446744073709551615_\ttid0\tcsn0_\n" "1_6_6_0_7\t0\ttid3\tcsn18446744073709551615_\ttid0\tcsn0_\n" ) + + +def test_rollback_unfinished_on_restart2(start_cluster): + node.query( + "create table mt2 (n int, m int) engine=MergeTree order by n partition by n % 2" + ) + node.query("insert into mt2 values (1, 10), (2, 20)") + tid0 = "(1,1,'00000000-0000-0000-0000-000000000000')" + + # it will hold a snapshot and avoid parts cleanup + tx(0, "begin transaction") + + tx(4, "begin transaction") + + tx(1, "begin transaction") + tid1 = tx(1, "select transactionID()").strip() + tx(1, "alter table mt2 drop partition id '1'") + tx(1, "commit") + + tx(1, "begin transaction") + tid2 = tx(1, "select transactionID()").strip() + tx(1, "insert into mt2 values (3, 30), (4, 40)") + tx(1, "commit") + + node.query("system flush logs") + csn1 = node.query( + "select csn from system.transactions_info_log where type='Commit' and tid={}".format( + tid1 + ) + ).strip() + csn2 = node.query( + "select csn from system.transactions_info_log where type='Commit' and tid={}".format( + tid2 + ) + ).strip() + + # check that uncommitted merge will be rolled back on restart + tx(2, "begin transaction") + tid4 = tx(2, "select transactionID()").strip() + tx( + 2, + "optimize table mt2 partition id '0' final settings optimize_throw_if_noop = 1", + ) + + # check that uncommitted insert will be rolled back on restart + tx(3, "begin transaction") + tid5 = tx(3, "select transactionID()").strip() + tx(3, "insert into mt2 values (6, 70)") + + tid6 = tx(4, "select transactionID()").strip() + tx(4, "commit") + node.query("system flush logs") + csn6 = node.query( + "select csn from system.transactions_info_log where type='Commit' and tid={}".format( + tid6 + ) + ).strip() + + node.restart_clickhouse(kill=True) + + assert ( + node.query("select *, _part from mt2 order by n") + == "2\t20\t0_2_2_0\n3\t30\t1_3_3_0\n4\t40\t0_4_4_0\n" + ) + res = node.query( + "select name, active, creation_tid, 'csn' || toString(creation_csn) || '_', removal_tid, 'csn' || toString(removal_csn) || '_' from system.parts where table='mt2' order by name" + ) + res = res.replace(tid0, "tid0") + res = res.replace(tid1, "tid1").replace("csn" + csn1 + "_", "csn_1") + res = res.replace(tid2, "tid2").replace("csn" + csn2 + "_", "csn_2") + res = res.replace(tid4, "tid4") + res = res.replace(tid5, "tid5") + res = res.replace(tid6, "tid6").replace("csn" + csn6 + "_", "csn_6") + assert ( + res + == "0_2_2_0\t1\ttid0\tcsn1_\t(0,0,'00000000-0000-0000-0000-000000000000')\tcsn0_\n" + "0_2_4_1\t0\ttid4\tcsn18446744073709551615_\ttid0\tcsn0_\n" + "0_4_4_0\t1\ttid2\tcsn_2\t(0,0,'00000000-0000-0000-0000-000000000000')\tcsn0_\n" + "0_5_5_0\t0\ttid5\tcsn18446744073709551615_\ttid0\tcsn0_\n" + "1_1_1_0\t0\ttid0\tcsn1_\ttid1\tcsn_1\n" + "1_3_3_0\t1\ttid2\tcsn_2\t(0,0,'00000000-0000-0000-0000-000000000000')\tcsn0_\n" + ) diff --git a/tests/integration/test_version_update_after_mutation/test.py b/tests/integration/test_version_update_after_mutation/test.py index a21186bba8d..6b27c69462a 100644 --- a/tests/integration/test_version_update_after_mutation/test.py +++ b/tests/integration/test_version_update_after_mutation/test.py @@ -10,7 +10,7 @@ node1 = cluster.add_instance( "node1", with_zookeeper=True, image="yandex/clickhouse-server", - tag="20.1.10.70", + tag="20.4.9.110", with_installed_binary=True, stay_alive=True, ) @@ -18,7 +18,7 @@ node2 = cluster.add_instance( "node2", with_zookeeper=True, image="yandex/clickhouse-server", - tag="20.1.10.70", + tag="20.4.9.110", with_installed_binary=True, stay_alive=True, ) @@ -26,7 +26,7 @@ node3 = cluster.add_instance( "node3", with_zookeeper=True, image="yandex/clickhouse-server", - tag="20.1.10.70", + tag="20.4.9.110", with_installed_binary=True, stay_alive=True, ) diff --git a/tests/jepsen.clickhouse-keeper/src/jepsen/clickhouse_keeper/main.clj b/tests/jepsen.clickhouse-keeper/src/jepsen/clickhouse_keeper/main.clj index 0384d4d583a..cd1aa540e45 100644 --- a/tests/jepsen.clickhouse-keeper/src/jepsen/clickhouse_keeper/main.clj +++ b/tests/jepsen.clickhouse-keeper/src/jepsen/clickhouse_keeper/main.clj @@ -79,8 +79,7 @@ :default ""] [nil, "--bench-opts STR" "Run perf-test mode" :default "--generator list_medium_nodes -c 30 -i 1000"] - ["-c" "--clickhouse-source URL" "URL for clickhouse deb or tgz package" - :default "https://clickhouse-builds.s3.yandex.net/21677/ef82333089156907a0979669d9374c2e18daabe5/clickhouse_build_check/clang-11_relwithdebuginfo_none_bundled_unsplitted_disable_False_deb/clickhouse-common-static_21.4.1.6313_amd64.deb"] + ["-c" "--clickhouse-source URL" "URL for clickhouse deb or tgz package"] [nil "--bench-path path" "Path to keeper-bench util" :default "/home/alesap/code/cpp/BuildCH/utils/keeper-bench/keeper-bench"]]) diff --git a/tests/jepsen.clickhouse-keeper/src/jepsen/clickhouse_keeper/utils.clj b/tests/jepsen.clickhouse-keeper/src/jepsen/clickhouse_keeper/utils.clj index 596458a8f3e..3625b24b4f9 100644 --- a/tests/jepsen.clickhouse-keeper/src/jepsen/clickhouse_keeper/utils.clj +++ b/tests/jepsen.clickhouse-keeper/src/jepsen/clickhouse_keeper/utils.clj @@ -196,13 +196,14 @@ [url] (let [encoded-url (md5 url) expected-file-name (.getName (io/file url)) - dest-file (str binaries-cache-dir "/" encoded-url) + dest-folder (str binaries-cache-dir "/" encoded-url) + dest-file (str dest-folder "/clickhouse") dest-symlink (str common-prefix "/" expected-file-name) wget-opts (concat cu/std-wget-opts [:-O dest-file])] (when-not (cu/exists? dest-file) (info "Downloading" url) - (do (c/exec :mkdir :-p binaries-cache-dir) - (c/cd binaries-cache-dir + (do (c/exec :mkdir :-p dest-folder) + (c/cd dest-folder (cu/wget-helper! wget-opts url)))) (c/exec :rm :-rf dest-symlink) (c/exec :ln :-s dest-file dest-symlink) diff --git a/tests/queries/0_stateless/00900_long_parquet.reference b/tests/queries/0_stateless/00900_long_parquet.reference index bbdad7243bd..4dfc726145e 100644 --- a/tests/queries/0_stateless/00900_long_parquet.reference +++ b/tests/queries/0_stateless/00900_long_parquet.reference @@ -44,12 +44,12 @@ converted: diff: dest: 79 81 82 83 84 85 86 87 88 89 str01\0\0\0\0\0\0\0\0\0\0 fstr1\0\0\0\0\0\0\0\0\0\0 2003-03-04 2004-05-06 00:00:00 2004-05-06 07:08:09.012000000 -80 81 82 83 84 85 86 87 88 89 str02 fstr2\0\0\0\0\0\0\0\0\0\0 2005-03-04 2006-08-09 10:11:12 2006-08-09 10:11:12.345000000 +80 81 82 83 84 85 86 87 88 89 str02 fstr2\0\0\0\0\0\0\0\0\0\0 2149-06-06 2006-08-09 10:11:12 2006-08-09 10:11:12.345000000 min: --128 0 0 0 0 0 0 0 -1 -1 string-1\0\0\0\0\0\0\0 fixedstring-1\0\0 2003-04-05 2003-02-03 2003-02-03 04:05:06.789000000 --108 108 8 92 -8 108 -40 -116 -1 -1 string-0\0\0\0\0\0\0\0 fixedstring\0\0\0\0 2001-02-03 2002-02-03 2002-02-03 04:05:06.789000000 +-128 0 0 0 0 0 0 0 -1 -1 string-1\0\0\0\0\0\0\0 fixedstring-1\0\0 2003-04-05 2149-06-06 2003-02-03 04:05:06.789000000 +-108 108 8 92 -8 108 -40 -116 -1 -1 string-0\0\0\0\0\0\0\0 fixedstring\0\0\0\0 2001-02-03 2149-06-06 2002-02-03 04:05:06.789000000 79 81 82 83 84 85 86 87 88 89 str01\0\0\0\0\0\0\0\0\0\0 fstr1\0\0\0\0\0\0\0\0\0\0 2003-03-04 2004-05-06 2004-05-06 07:08:09.012000000 -127 -1 -1 -1 -1 -1 -1 -1 -1 -1 string-2\0\0\0\0\0\0\0 fixedstring-2\0\0 2004-06-07 2004-02-03 2004-02-03 04:05:06.789000000 +127 -1 -1 -1 -1 -1 -1 -1 -1 -1 string-2\0\0\0\0\0\0\0 fixedstring-2\0\0 2004-06-07 2149-06-06 2004-02-03 04:05:06.789000000 max: -128 0 -32768 0 -2147483648 0 -9223372036854775808 0 -1 -1 string-1 fixedstring-1\0\0 2003-04-05 00:00:00 2003-02-03 04:05:06 2003-02-03 04:05:06.789000000 -108 108 -1016 1116 -1032 1132 -1064 1164 -1 -1 string-0 fixedstring\0\0\0\0 2001-02-03 00:00:00 2002-02-03 04:05:06 2002-02-03 04:05:06.789000000 diff --git a/tests/queries/0_stateless/00941_to_custom_week.sql b/tests/queries/0_stateless/00941_to_custom_week.sql index 4dd5d209306..04ff08d4117 100644 --- a/tests/queries/0_stateless/00941_to_custom_week.sql +++ b/tests/queries/0_stateless/00941_to_custom_week.sql @@ -49,4 +49,3 @@ SELECT toStartOfWeek(x, 3) AS w3, toStartOfWeek(x_t, 3) AS wt3 FROM numbers(10); - diff --git a/tests/queries/0_stateless/00963_temporary_live_view_watch_live_timeout.py.disabled b/tests/queries/0_stateless/00963_temporary_live_view_watch_live_timeout.py.disabled deleted file mode 100755 index 0b6ac65e77c..00000000000 --- a/tests/queries/0_stateless/00963_temporary_live_view_watch_live_timeout.py.disabled +++ /dev/null @@ -1,55 +0,0 @@ -#!/usr/bin/env python3 -# Tags: disabled, no-replicated-database, no-parallel, no-fasttest - -import os -import sys -import signal - -CURDIR = os.path.dirname(os.path.realpath(__file__)) -sys.path.insert(0, os.path.join(CURDIR, 'helpers')) - -from client import client, prompt, end_of_block - -log = None -# uncomment the line below for debugging -#log=sys.stdout - -with client(name='client1>', log=log) as client1, client(name='client2>', log=log) as client2: - client1.expect(prompt) - client2.expect(prompt) - - client1.send('SET allow_experimental_live_view = 1') - client1.expect(prompt) - client2.send('SET allow_experimental_live_view = 1') - client2.expect(prompt) - - client1.send('DROP TABLE IF EXISTS test.lv') - client1.expect(prompt) - client1.send('DROP TABLE IF EXISTS test.mt') - client1.expect(prompt) - client1.send('CREATE TABLE test.mt (a Int32) Engine=MergeTree order by tuple()') - client1.expect(prompt) - client1.send('CREATE LIVE VIEW test.lv WITH TIMEOUT 1 AS SELECT sum(a) FROM test.mt') - client1.expect(prompt) - client1.send('WATCH test.lv') - client1.expect('_version') - client1.expect(r'0.*1' + end_of_block) - client2.send('INSERT INTO test.mt VALUES (1),(2),(3)') - client2.expect(prompt) - client1.expect(r'6.*2' + end_of_block) - client2.send('INSERT INTO test.mt VALUES (4),(5),(6)') - client2.expect(prompt) - client1.expect(r'21.*3' + end_of_block) - # send Ctrl-C - client1.send('\x03', eol='') - match = client1.expect('(%s)|([#\$] )' % prompt) - if match.groups()[1]: - client1.send(client1.command) - client1.expect(prompt) - client1.send('SELECT sleep(1)') - client1.expect(prompt) - client1.send('DROP TABLE test.lv') - client1.expect('Table test.lv doesn\'t exist') - client1.expect(prompt) - client1.send('DROP TABLE test.mt') - client1.expect(prompt) diff --git a/tests/queries/0_stateless/00979_live_view_watch_live_moving_avg.py.disabled b/tests/queries/0_stateless/00979_live_view_watch_live_moving_avg.py.disabled deleted file mode 100755 index f545186d262..00000000000 --- a/tests/queries/0_stateless/00979_live_view_watch_live_moving_avg.py.disabled +++ /dev/null @@ -1,56 +0,0 @@ -#!/usr/bin/env python3 -# Tags: disabled, no-replicated-database, no-parallel, no-fasttest - -import os -import sys -import signal - -CURDIR = os.path.dirname(os.path.realpath(__file__)) -sys.path.insert(0, os.path.join(CURDIR, 'helpers')) - -from client import client, prompt, end_of_block - -log = None -# uncomment the line below for debugging -#log=sys.stdout - -with client(name='client1>', log=log) as client1, client(name='client2>', log=log) as client2: - client1.expect(prompt) - client2.expect(prompt) - - client1.send('SET allow_experimental_live_view = 1') - client1.expect(prompt) - client2.send('SET allow_experimental_live_view = 1') - client2.expect(prompt) - - client1.send('DROP TABLE IF EXISTS test.lv') - client1.expect(prompt) - client1.send(' DROP TABLE IF EXISTS test.mt') - client1.expect(prompt) - client1.send('CREATE TABLE test.mt (a Int32, id Int32) Engine=Memory') - client1.expect(prompt) - client1.send('CREATE LIVE VIEW test.lv AS SELECT sum(a)/2 FROM (SELECT a, id FROM ( SELECT a, id FROM test.mt ORDER BY id DESC LIMIT 2 ) ORDER BY id DESC LIMIT 2)') - client1.expect(prompt) - client1.send('WATCH test.lv') - client1.expect('_version') - client1.expect(r'0.*1' + end_of_block) - client2.send('INSERT INTO test.mt VALUES (1, 1),(2, 2),(3, 3)') - client1.expect(r'2\.5.*2' + end_of_block) - client2.expect(prompt) - client2.send('INSERT INTO test.mt VALUES (4, 4),(5, 5),(6, 6)') - client1.expect(r'5\.5.*3' + end_of_block) - client2.expect(prompt) - for v, i in enumerate(range(7,129)): - client2.send('INSERT INTO test.mt VALUES (%d, %d)' % (i, i)) - client1.expect(r'%.1f.*%d' % (i-0.5, 4+v) + end_of_block) - client2.expect(prompt) - # send Ctrl-C - client1.send('\x03', eol='') - match = client1.expect('(%s)|([#\$] )' % prompt) - if match.groups()[1]: - client1.send(client1.command) - client1.expect(prompt) - client1.send('DROP TABLE test.lv') - client1.expect(prompt) - client1.send('DROP TABLE test.mt') - client1.expect(prompt) diff --git a/tests/queries/0_stateless/00991_live_view_watch_event_live.sh.disabled b/tests/queries/0_stateless/00991_live_view_watch_event_live.sh.disabled deleted file mode 100755 index 92cc5b10e6b..00000000000 --- a/tests/queries/0_stateless/00991_live_view_watch_event_live.sh.disabled +++ /dev/null @@ -1,7 +0,0 @@ -#!/usr/bin/env bash -# Tags: disabled, no-replicated-database, no-parallel, no-fasttest - -CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) -. $CURDIR/../shell_config.sh - -python3 $CURDIR/00991_live_view_watch_event_live.python diff --git a/tests/queries/0_stateless/00991_live_view_watch_http.sh.disabled b/tests/queries/0_stateless/00991_live_view_watch_http.sh.disabled deleted file mode 100755 index abb1a94371d..00000000000 --- a/tests/queries/0_stateless/00991_live_view_watch_http.sh.disabled +++ /dev/null @@ -1,7 +0,0 @@ -#!/usr/bin/env bash -# Tags: disabled, no-replicated-database, no-parallel, no-fasttest - -CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) -. $CURDIR/../shell_config.sh - -python3 $CURDIR/00991_live_view_watch_http.python diff --git a/tests/queries/0_stateless/00991_temporary_live_view_watch_events_heartbeat.sh.disabled b/tests/queries/0_stateless/00991_temporary_live_view_watch_events_heartbeat.sh.disabled deleted file mode 100755 index 97c6c92b999..00000000000 --- a/tests/queries/0_stateless/00991_temporary_live_view_watch_events_heartbeat.sh.disabled +++ /dev/null @@ -1,7 +0,0 @@ -#!/usr/bin/env bash -# Tags: disabled, no-replicated-database, no-parallel, no-fasttest - -CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) -. $CURDIR/../shell_config.sh - -python3 $CURDIR/00991_temporary_live_view_watch_events_heartbeat.python diff --git a/tests/queries/0_stateless/00991_temporary_live_view_watch_live.sh.disabled b/tests/queries/0_stateless/00991_temporary_live_view_watch_live.sh.disabled deleted file mode 100755 index 2e784223186..00000000000 --- a/tests/queries/0_stateless/00991_temporary_live_view_watch_live.sh.disabled +++ /dev/null @@ -1,7 +0,0 @@ -#!/usr/bin/env bash -# Tags: disabled, no-replicated-database, no-parallel, no-fasttest - -CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) -. $CURDIR/../shell_config.sh - -python3 $CURDIR/00991_temporary_live_view_watch_live.python diff --git a/tests/queries/0_stateless/01030_storage_url_syntax.sql b/tests/queries/0_stateless/01030_storage_url_syntax.sql index e855383f0ac..9b31558eece 100644 --- a/tests/queries/0_stateless/01030_storage_url_syntax.sql +++ b/tests/queries/0_stateless/01030_storage_url_syntax.sql @@ -6,3 +6,61 @@ create table test_table_url_syntax (id UInt32) ENGINE = URL('','','','') ; -- { serverError 42 } drop table if exists test_table_url_syntax ; + +drop table if exists test_table_url +; + +create table test_table_url(id UInt32) ENGINE = URL('http://localhost/endpoint') +; -- { serverError 36 } + +create table test_table_url(id UInt32) ENGINE = URL('http://localhost/endpoint.json'); +drop table test_table_url; + +create table test_table_url(id UInt32) ENGINE = URL('http://localhost/endpoint', 'ErrorFormat') +; -- { serverError 73 } + +create table test_table_url(id UInt32) ENGINE = URL('http://localhost/endpoint', 'JSONEachRow', 'gzip'); +drop table test_table_url; + +create table test_table_url(id UInt32) ENGINE = URL('http://localhost/endpoint', 'JSONEachRow', 'gz'); +drop table test_table_url; + +create table test_table_url(id UInt32) ENGINE = URL('http://localhost/endpoint', 'JSONEachRow', 'deflate'); +drop table test_table_url; + +create table test_table_url(id UInt32) ENGINE = URL('http://localhost/endpoint', 'JSONEachRow', 'brotli'); +drop table test_table_url; + +create table test_table_url(id UInt32) ENGINE = URL('http://localhost/endpoint', 'JSONEachRow', 'lzma'); +drop table test_table_url; + +create table test_table_url(id UInt32) ENGINE = URL('http://localhost/endpoint', 'JSONEachRow', 'zstd'); +drop table test_table_url; + +create table test_table_url(id UInt32) ENGINE = URL('http://localhost/endpoint', 'JSONEachRow', 'lz4'); +drop table test_table_url; + +create table test_table_url(id UInt32) ENGINE = URL('http://localhost/endpoint', 'JSONEachRow', 'bz2'); +drop table test_table_url; + +create table test_table_url(id UInt32) ENGINE = URL('http://localhost/endpoint', 'JSONEachRow', 'snappy'); +drop table test_table_url; + +create table test_table_url(id UInt32) ENGINE = URL('http://localhost/endpoint', 'JSONEachRow', 'none'); +drop table test_table_url; + +create table test_table_url(id UInt32) ENGINE = URL('http://localhost/endpoint', 'JSONEachRow', 'auto'); +drop table test_table_url; + +create table test_table_url(id UInt32) ENGINE = URL('http://localhost/endpoint.gz', 'JSONEachRow'); +drop table test_table_url; + +create table test_table_url(id UInt32) ENGINE = URL('http://localhost/endpoint.fr', 'JSONEachRow'); +drop table test_table_url; + +create table test_table_url(id UInt32) ENGINE = URL('http://localhost/endpoint', 'JSONEachRow'); +drop table test_table_url; + +create table test_table_url(id UInt32) ENGINE = URL('http://localhost/endpoint', 'JSONEachRow', 'zip') +; -- { serverError 48 } + diff --git a/tests/queries/0_stateless/01103_check_cpu_instructions_at_startup.reference b/tests/queries/0_stateless/01103_check_cpu_instructions_at_startup.reference index e69de29bb2d..8984d35930a 100644 --- a/tests/queries/0_stateless/01103_check_cpu_instructions_at_startup.reference +++ b/tests/queries/0_stateless/01103_check_cpu_instructions_at_startup.reference @@ -0,0 +1,7 @@ +Instruction check fail. The CPU does not support SSSE3 instruction set. +Instruction check fail. The CPU does not support SSE4.1 instruction set. +Instruction check fail. The CPU does not support SSE4.2 instruction set. +Instruction check fail. The CPU does not support POPCNT instruction set. +: MADV_DONTNEED does not work (memset will be used instead) +: (This is the expected behaviour if you are running under QEMU) +1 diff --git a/tests/queries/0_stateless/01103_check_cpu_instructions_at_startup.sh b/tests/queries/0_stateless/01103_check_cpu_instructions_at_startup.sh index 9fb239e87b2..9b6e1e05f2d 100755 --- a/tests/queries/0_stateless/01103_check_cpu_instructions_at_startup.sh +++ b/tests/queries/0_stateless/01103_check_cpu_instructions_at_startup.sh @@ -2,18 +2,6 @@ # Tags: no-tsan, no-asan, no-ubsan, no-msan, no-debug, no-fasttest, no-cpu-aarch64 # Tag no-fasttest: avoid dependency on qemu -- invonvenient when running locally -# More than a decade after AVX was released, AVX is still not supported by QEMU, even if "-cpu help" pretends to. As a result, we cannot use -# QEMU to verify that a ClickHouse binary compiled for a SIMD level up to AVX runs on a system with a SIMD level up to AVX. The alternative -# is to disassemble the binary and grep for unwanted instructions (e.g. AVX512) which is just too fragile ... -# -# https://gitlab.com/qemu-project/qemu/-/issues/164 -# https://www.mail-archive.com/qemu-devel@nongnu.org/msg713932.html -# https://lore.kernel.org/all/CAObpvQmejWBh+RNz2vhk16-kcY_QveM_pSmM5ZeWqWv1d8AJzQ@mail.gmail.com/T/ - -exit 0 - -# keeping the original test because it is instructive and maybe QEMU will be fixed at some point ... - CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) # shellcheck source=../shell_config.sh . "$CURDIR"/../shell_config.sh diff --git a/tests/queries/0_stateless/01111_create_drop_replicated_db_stress.sh b/tests/queries/0_stateless/01111_create_drop_replicated_db_stress.sh index addf503e44a..992c655c760 100755 --- a/tests/queries/0_stateless/01111_create_drop_replicated_db_stress.sh +++ b/tests/queries/0_stateless/01111_create_drop_replicated_db_stress.sh @@ -51,7 +51,7 @@ function create_table() database=$($CLICKHOUSE_CLIENT -q "select name from system.databases where name like '${CLICKHOUSE_DATABASE}%' order by rand() limit 1") if [ -z "$database" ]; then return; fi $CLICKHOUSE_CLIENT --distributed_ddl_task_timeout=0 -q \ - "create table $database.rmt_$RANDOM (n int) engine=ReplicatedMergeTree order by tuple() -- suppress $CLICKHOUSE_TEST_ZOOKEEPER_PREFIX" \ + "create table $database.rmt_${RANDOM}_${RANDOM}_${RANDOM} (n int) engine=ReplicatedMergeTree order by tuple() -- suppress $CLICKHOUSE_TEST_ZOOKEEPER_PREFIX" \ 2>&1| grep -Fa "Exception: " | grep -Fv "Macro 'uuid' and empty arguments" | grep -Fv "Cannot enqueue query" | grep -Fv "ZooKeeper session expired" | grep -Fv UNKNOWN_DATABASE sleep 0.$RANDOM done diff --git a/tests/queries/0_stateless/01154_move_partition_long.sh b/tests/queries/0_stateless/01154_move_partition_long.sh index a8f12d6afbd..c68b0944407 100755 --- a/tests/queries/0_stateless/01154_move_partition_long.sh +++ b/tests/queries/0_stateless/01154_move_partition_long.sh @@ -123,7 +123,7 @@ timeout $TIMEOUT bash -c drop_part_thread & wait check_replication_consistency "dst_" "count(), sum(p), sum(k), sum(v)" -try_sync_replicas "src_" +try_sync_replicas "src_" 300 for ((i=0; i<16; i++)) do $CLICKHOUSE_CLIENT -q "DROP TABLE dst_$i" 2>&1| grep -Fv "is already started to be removing" & diff --git a/tests/queries/0_stateless/01221_system_settings.reference b/tests/queries/0_stateless/01221_system_settings.reference index 108d48d6051..a90b3eac6dc 100644 --- a/tests/queries/0_stateless/01221_system_settings.reference +++ b/tests/queries/0_stateless/01221_system_settings.reference @@ -1,4 +1,4 @@ -send_timeout 300 0 \N \N 0 Seconds +send_timeout 300 0 Timeout for sending data to network, in seconds. If client needs to sent some data, but it did not able to send any bytes in this interval, exception is thrown. If you set this setting on client, the \'receive_timeout\' for the socket will be also set on the corresponding connection end on the server. \N \N 0 Seconds storage_policy default 0 Name of storage disk policy String 1 1 diff --git a/tests/queries/0_stateless/01440_to_date_monotonicity.reference b/tests/queries/0_stateless/01440_to_date_monotonicity.reference index 2dbec540fbb..dd8545b721d 100644 --- a/tests/queries/0_stateless/01440_to_date_monotonicity.reference +++ b/tests/queries/0_stateless/01440_to_date_monotonicity.reference @@ -1,4 +1,4 @@ 0 -1970-01-01 2106-02-07 1970-04-11 1970-01-01 2149-06-06 +1970-01-01 2120-07-26 1970-04-11 1970-01-01 2149-06-06 1970-01-01 02:00:00 2106-02-07 09:28:15 1970-01-01 02:16:40 2000-01-01 13:12:12 diff --git a/tests/queries/0_stateless/01700_system_zookeeper_path_in.reference b/tests/queries/0_stateless/01700_system_zookeeper_path_in.reference index e491dd9e091..dcee18b33e0 100644 --- a/tests/queries/0_stateless/01700_system_zookeeper_path_in.reference +++ b/tests/queries/0_stateless/01700_system_zookeeper_path_in.reference @@ -12,3 +12,5 @@ blocks failed_parts last_part parallel +shared +shared diff --git a/tests/queries/0_stateless/01705_normalize_case_insensitive_function_names.reference b/tests/queries/0_stateless/01705_normalize_case_insensitive_function_names.reference index 5b0f7bdeb2d..682652152dc 100644 --- a/tests/queries/0_stateless/01705_normalize_case_insensitive_function_names.reference +++ b/tests/queries/0_stateless/01705_normalize_case_insensitive_function_names.reference @@ -3,8 +3,8 @@ SELECT ceil(1), ceil(1), char(49), - CHAR_LENGTH('1'), - CHARACTER_LENGTH('1'), + lengthUTF8('1'), + lengthUTF8('1'), coalesce(1), concat('1', '1'), corr(1, 1), @@ -12,7 +12,7 @@ SELECT count(), covarPop(1, 1), covarSamp(1, 1), - DATABASE(), + currentDatabase(), dateDiff('DAY', toDate('2020-10-24'), toDate('2019-10-24')), exp(1), arrayFlatten([[1]]), diff --git a/tests/queries/0_stateless/01710_projection_with_column_transformers.reference b/tests/queries/0_stateless/01710_projection_with_column_transformers.reference new file mode 100644 index 00000000000..8410d58406e --- /dev/null +++ b/tests/queries/0_stateless/01710_projection_with_column_transformers.reference @@ -0,0 +1 @@ +CREATE TABLE default.foo\n(\n `bar` String,\n PROJECTION p\n (\n SELECT * APPLY groupUniqArray(100)\n )\n)\nENGINE = MergeTree\nORDER BY bar\nSETTINGS index_granularity = 8192 diff --git a/tests/queries/0_stateless/01710_projection_with_column_transformers.sql b/tests/queries/0_stateless/01710_projection_with_column_transformers.sql new file mode 100644 index 00000000000..90d92f68fd1 --- /dev/null +++ b/tests/queries/0_stateless/01710_projection_with_column_transformers.sql @@ -0,0 +1,11 @@ +drop table if exists foo; + +create table foo(bar String, projection p (select * apply groupUniqArray(100))) engine MergeTree order by bar; + +show create foo; + +detach table foo; + +attach table foo; + +drop table foo; diff --git a/tests/queries/0_stateless/01750_parsing_exception.sh b/tests/queries/0_stateless/01750_parsing_exception.sh index cd50d769f6d..103e551b626 100755 --- a/tests/queries/0_stateless/01750_parsing_exception.sh +++ b/tests/queries/0_stateless/01750_parsing_exception.sh @@ -5,4 +5,4 @@ CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) . "$CURDIR"/../shell_config.sh # if it will not match, the exit code of grep will be non-zero and the test will fail -$CLICKHOUSE_CLIENT -q "SELECT toDateTime(format('{}-{}-01 00:00:00', '2021', '1'))" |& grep -F -q 'Cannot parse datetime 2021-1-01 00:00:00: Cannot parse DateTime from String:' +$CLICKHOUSE_CLIENT -q "SELECT toDateTime(format('{}-{}-01 00:00:00', '2021', '1'))" |& grep -F -q "Cannot parse string '2021-1-01 00:00:00' as DateTime" diff --git a/tests/queries/0_stateless/01889_sqlite_read_write.reference b/tests/queries/0_stateless/01889_sqlite_read_write.reference index e979b5816c5..d6b1e6f41c1 100644 --- a/tests/queries/0_stateless/01889_sqlite_read_write.reference +++ b/tests/queries/0_stateless/01889_sqlite_read_write.reference @@ -21,7 +21,7 @@ line3 3 2 text2 3 text3 test types -CREATE TABLE SQLite.table4\n(\n `a` Nullable(Int32),\n `b` Nullable(Int32),\n `c` Nullable(Int8),\n `d` Nullable(Int16),\n `e` Nullable(Int32),\n `bigint` Nullable(String),\n `int2` Nullable(String),\n `int8` Nullable(String)\n)\nENGINE = SQLite +CREATE TABLE SQLite.table4\n(\n `a` Nullable(Int32),\n `b` Nullable(Int32),\n `c` Nullable(Int8),\n `d` Nullable(Int16),\n `e` Nullable(Int32),\n `f` Nullable(Int64),\n `g` Nullable(Int32),\n `h` Nullable(Int64)\n)\nENGINE = SQLite CREATE TABLE SQLite.table5\n(\n `a` Nullable(String),\n `b` Nullable(String),\n `c` Nullable(Float64),\n `d` Nullable(Float64),\n `e` Nullable(Float64),\n `f` Nullable(Float32)\n)\nENGINE = SQLite create table engine with table3 CREATE TABLE default.sqlite_table3\n(\n `col1` String,\n `col2` Int32\n)\nENGINE = SQLite diff --git a/tests/queries/0_stateless/01889_sqlite_read_write.sh b/tests/queries/0_stateless/01889_sqlite_read_write.sh index fc87aa08fa7..e732155dbcc 100755 --- a/tests/queries/0_stateless/01889_sqlite_read_write.sh +++ b/tests/queries/0_stateless/01889_sqlite_read_write.sh @@ -42,7 +42,7 @@ sqlite3 "${DB_PATH}" "INSERT INTO table3 VALUES ('not a null', 2)" sqlite3 "${DB_PATH}" 'INSERT INTO table3 VALUES (NULL, 3)' sqlite3 "${DB_PATH}" "INSERT INTO table3 VALUES ('', 4)" -sqlite3 "${DB_PATH}" 'CREATE TABLE table4 (a int, b integer, c tinyint, d smallint, e mediumint, bigint, int2, int8)' +sqlite3 "${DB_PATH}" 'CREATE TABLE table4 (a int, b integer, c tinyint, d smallint, e mediumint, f bigint, g int2, h int8)' sqlite3 "${DB_PATH}" 'CREATE TABLE table5 (a character(20), b varchar(10), c real, d double, e double precision, f float)' diff --git a/tests/queries/0_stateless/01921_datatype_date32.reference b/tests/queries/0_stateless/01921_datatype_date32.reference index acb0cc4ca59..a33a96ffffb 100644 --- a/tests/queries/0_stateless/01921_datatype_date32.reference +++ b/tests/queries/0_stateless/01921_datatype_date32.reference @@ -43,16 +43,16 @@ -------toMinute--------- -------toSecond--------- -------toStartOfDay--------- -2036-02-07 07:31:20 -2036-02-07 07:31:20 -2027-10-01 11:03:28 -2027-10-17 11:03:28 +1970-01-01 02:00:00 +1970-01-01 02:00:00 +2106-02-07 00:00:00 +2106-02-07 00:00:00 2021-06-22 00:00:00 -------toMonday--------- -2079-06-07 -2079-06-07 -2120-07-06 -2120-07-20 +1970-01-01 +1970-01-01 +2149-06-02 +2149-06-02 2021-06-21 -------toISOWeek--------- 1 @@ -79,28 +79,28 @@ 229953 202125 -------toStartOfWeek--------- -2079-06-06 -2079-06-06 -2120-07-05 -2120-07-26 +1970-01-01 +1970-01-01 +2149-06-01 +2149-06-01 2021-06-20 -------toStartOfMonth--------- -2079-06-07 -2079-06-07 -2120-06-26 -2120-06-26 +1970-01-01 +1970-01-01 +2149-06-01 +2149-06-01 2021-06-01 -------toStartOfQuarter--------- -2079-06-07 -2079-06-07 -2120-04-26 -2120-04-26 +1970-01-01 +1970-01-01 +2149-04-01 +2149-04-01 2021-04-01 -------toStartOfYear--------- -2079-06-07 -2079-06-07 -2119-07-28 -2119-07-28 +1970-01-01 +1970-01-01 +2149-01-01 +2149-01-01 2021-01-01 -------toStartOfSecond--------- -------toStartOfMinute--------- diff --git a/tests/queries/0_stateless/02052_last_granula_adjust_LOGICAL_ERROR.sql.j2 b/tests/queries/0_stateless/02052_last_granula_adjust_LOGICAL_ERROR.sql.j2 index 53d970496b2..dee13a83b4c 100644 --- a/tests/queries/0_stateless/02052_last_granula_adjust_LOGICAL_ERROR.sql.j2 +++ b/tests/queries/0_stateless/02052_last_granula_adjust_LOGICAL_ERROR.sql.j2 @@ -12,7 +12,7 @@ as select number, repeat(toString(number), 5) from numbers({{ rows_in_table }}); -- avoid any optimizations with ignore(*) select * apply max from data_02052_{{ rows_in_table }}_wide{{ wide }} settings max_read_buffer_size=1, max_threads=1; -select * apply max from data_02052_{{ rows_in_table }}_wide{{ wide }} settings max_read_buffer_size=0, max_threads=1; -- { serverError CANNOT_READ_ALL_DATA } +select * apply max from data_02052_{{ rows_in_table }}_wide{{ wide }} settings max_read_buffer_size=0, max_threads=1; -- { serverError INVALID_SETTING_VALUE } drop table data_02052_{{ rows_in_table }}_wide{{ wide }}; {% endfor %} diff --git a/tests/queries/0_stateless/02117_show_create_table_system.reference b/tests/queries/0_stateless/02117_show_create_table_system.reference index 9edc2aa0cb4..9e2f676bb55 100644 --- a/tests/queries/0_stateless/02117_show_create_table_system.reference +++ b/tests/queries/0_stateless/02117_show_create_table_system.reference @@ -186,6 +186,7 @@ CREATE TABLE system.disks `total_space` UInt64, `keep_free_space` UInt64, `type` String, + `is_encrypted` UInt8, `cache_path` String ) ENGINE = SystemDisks @@ -269,7 +270,8 @@ CREATE TABLE system.functions `case_insensitive` UInt8, `alias_to` String, `create_query` String, - `origin` Enum8('System' = 0, 'SQLUserDefined' = 1, 'ExecutableUserDefined' = 2) + `origin` Enum8('System' = 0, 'SQLUserDefined' = 1, 'ExecutableUserDefined' = 2), + `description` String ) ENGINE = SystemFunctions COMMENT 'SYSTEM TABLE is built on the fly.' @@ -1021,7 +1023,8 @@ ENGINE = SystemTableEngines COMMENT 'SYSTEM TABLE is built on the fly.' CREATE TABLE system.table_functions ( - `name` String + `name` String, + `description` String ) ENGINE = SystemTableFunctions COMMENT 'SYSTEM TABLE is built on the fly.' diff --git a/tests/queries/0_stateless/02177_issue_31009.sql b/tests/queries/0_stateless/02177_issue_31009.sql index ab4aec60ce4..280627954d9 100644 --- a/tests/queries/0_stateless/02177_issue_31009.sql +++ b/tests/queries/0_stateless/02177_issue_31009.sql @@ -1,4 +1,4 @@ --- Tags: long +-- Tags: long, no-tsan, no-asan, no-msan, no-debug SET max_threads=0; diff --git a/tests/queries/0_stateless/02221_system_zookeeper_unrestricted.reference b/tests/queries/0_stateless/02221_system_zookeeper_unrestricted.reference index c53187a963c..bd0c9cee464 100644 --- a/tests/queries/0_stateless/02221_system_zookeeper_unrestricted.reference +++ b/tests/queries/0_stateless/02221_system_zookeeper_unrestricted.reference @@ -60,7 +60,15 @@ quorum quorum replicas replicas +shared +shared +shared +shared table_shared_id table_shared_id temp temp +zero_copy_hdfs +zero_copy_hdfs +zero_copy_s3 +zero_copy_s3 diff --git a/tests/queries/0_stateless/02221_system_zookeeper_unrestricted_like.reference b/tests/queries/0_stateless/02221_system_zookeeper_unrestricted_like.reference index 0ac6e838903..f95d60dc07b 100644 --- a/tests/queries/0_stateless/02221_system_zookeeper_unrestricted_like.reference +++ b/tests/queries/0_stateless/02221_system_zookeeper_unrestricted_like.reference @@ -29,8 +29,12 @@ pinned_part_uuids queue quorum replicas +shared +shared table_shared_id temp +zero_copy_hdfs +zero_copy_s3 ------------------------- 1 alter_partition_version @@ -63,5 +67,9 @@ pinned_part_uuids queue quorum replicas +shared +shared table_shared_id temp +zero_copy_hdfs +zero_copy_s3 diff --git a/tests/queries/0_stateless/02242_system_filesystem_cache_log_table.reference b/tests/queries/0_stateless/02242_system_filesystem_cache_log_table.reference index 5b37f1adb02..4a10ff02586 100644 --- a/tests/queries/0_stateless/02242_system_filesystem_cache_log_table.reference +++ b/tests/queries/0_stateless/02242_system_filesystem_cache_log_table.reference @@ -11,12 +11,12 @@ SYSTEM STOP MERGES test; INSERT INTO test SELECT number, toString(number) FROM numbers(100000); SELECT 2240, 's3_cache', * FROM test FORMAT Null; SYSTEM FLUSH LOGS; -SELECT file_segment_range, read_type FROM system.filesystem_cache_log WHERE query_id = (SELECT query_id from system.query_log where query LIKE '%SELECT 2240%s3_cache%' AND current_database = currentDatabase() AND type = 'QueryFinish' ORDER BY event_time desc LIMIT 1); +SELECT file_segment_range, read_type FROM system.filesystem_cache_log WHERE query_id = (SELECT query_id from system.query_log where query LIKE '%SELECT 2240%s3_cache%' AND current_database = currentDatabase() AND type = 'QueryFinish' ORDER BY event_time desc LIMIT 1) ORDER BY file_segment_range, read_type; (0,519) READ_FROM_FS_AND_DOWNLOADED_TO_CACHE (0,808110) READ_FROM_FS_AND_DOWNLOADED_TO_CACHE SELECT 2241, 's3_cache', * FROM test FORMAT Null; SYSTEM FLUSH LOGS; -SELECT file_segment_range, read_type FROM system.filesystem_cache_log WHERE query_id = (SELECT query_id from system.query_log where query LIKE '%SELECT 2241%s3_cache%' AND current_database = currentDatabase() AND type = 'QueryFinish' ORDER BY event_time desc LIMIT 1); +SELECT file_segment_range, read_type FROM system.filesystem_cache_log WHERE query_id = (SELECT query_id from system.query_log where query LIKE '%SELECT 2241%s3_cache%' AND current_database = currentDatabase() AND type = 'QueryFinish' ORDER BY event_time desc LIMIT 1) ORDER BY file_segment_range, read_type; (0,808110) READ_FROM_CACHE Using storage policy: local_cache @@ -32,11 +32,11 @@ SYSTEM STOP MERGES test; INSERT INTO test SELECT number, toString(number) FROM numbers(100000); SELECT 2240, 'local_cache', * FROM test FORMAT Null; SYSTEM FLUSH LOGS; -SELECT file_segment_range, read_type FROM system.filesystem_cache_log WHERE query_id = (SELECT query_id from system.query_log where query LIKE '%SELECT 2240%local_cache%' AND current_database = currentDatabase() AND type = 'QueryFinish' ORDER BY event_time desc LIMIT 1); +SELECT file_segment_range, read_type FROM system.filesystem_cache_log WHERE query_id = (SELECT query_id from system.query_log where query LIKE '%SELECT 2240%local_cache%' AND current_database = currentDatabase() AND type = 'QueryFinish' ORDER BY event_time desc LIMIT 1) ORDER BY file_segment_range, read_type; (0,519) READ_FROM_FS_AND_DOWNLOADED_TO_CACHE (0,808110) READ_FROM_FS_AND_DOWNLOADED_TO_CACHE SELECT 2241, 'local_cache', * FROM test FORMAT Null; SYSTEM FLUSH LOGS; -SELECT file_segment_range, read_type FROM system.filesystem_cache_log WHERE query_id = (SELECT query_id from system.query_log where query LIKE '%SELECT 2241%local_cache%' AND current_database = currentDatabase() AND type = 'QueryFinish' ORDER BY event_time desc LIMIT 1); +SELECT file_segment_range, read_type FROM system.filesystem_cache_log WHERE query_id = (SELECT query_id from system.query_log where query LIKE '%SELECT 2241%local_cache%' AND current_database = currentDatabase() AND type = 'QueryFinish' ORDER BY event_time desc LIMIT 1) ORDER BY file_segment_range, read_type; (0,808110) READ_FROM_CACHE diff --git a/tests/queries/0_stateless/02293_part_log_has_merge_reason.sh b/tests/queries/0_stateless/02293_part_log_has_merge_reason.sh new file mode 100755 index 00000000000..1a33e6db459 --- /dev/null +++ b/tests/queries/0_stateless/02293_part_log_has_merge_reason.sh @@ -0,0 +1,77 @@ +#!/usr/bin/env bash +set -euo pipefail + +CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) +# shellcheck source=../shell_config.sh +. "$CURDIR"/../shell_config.sh + +${CLICKHOUSE_CLIENT} -q 'DROP TABLE IF EXISTS t_part_log_has_merge_type_table' + +${CLICKHOUSE_CLIENT} -q ' + CREATE TABLE t_part_log_has_merge_type_table + ( + event_time DateTime, + UserID UInt64, + Comment String + ) + ENGINE = MergeTree() + ORDER BY tuple() + TTL event_time + INTERVAL 3 MONTH + SETTINGS min_bytes_for_wide_part = 0, materialize_ttl_recalculate_only = true, max_number_of_merges_with_ttl_in_pool = 100 +' + +${CLICKHOUSE_CLIENT} -q "INSERT INTO t_part_log_has_merge_type_table VALUES (now(), 1, 'username1');" +${CLICKHOUSE_CLIENT} -q "INSERT INTO t_part_log_has_merge_type_table VALUES (now() - INTERVAL 4 MONTH, 2, 'username2');" + +function get_parts_count() { + table_name=$1 + ${CLICKHOUSE_CLIENT} -q ' + SELECT + count(*) + FROM + system.parts + WHERE + table = '"'${table_name}'"' + AND + active = 1 + AND + database = '"'${CLICKHOUSE_DATABASE}'"' + ' +} + +function wait_table_parts_are_merged_into_one_part() { + table_name=$1 + + while true + do + count=$(get_parts_count $table_name) + if [[ count -gt 1 ]] + then + sleep 1 + else + break + fi + done +} + +export -f get_parts_count +export -f wait_table_parts_are_merged_into_one_part + +timeout 30 bash -c 'wait_table_parts_are_merged_into_one_part t_part_log_has_merge_type_table' + +${CLICKHOUSE_CLIENT} -q 'SYSTEM FLUSH LOGS' + +${CLICKHOUSE_CLIENT} -q ' + SELECT + event_type, + merge_reason + FROM + system.part_log + WHERE + event_type = '"'MergeParts'"' + AND + table = '"'t_part_log_has_merge_type_table'"' + AND + database = '"'${CLICKHOUSE_DATABASE}'"' + ORDER BY event_type, merge_reason +' diff --git a/tests/queries/0_stateless/02293_part_log_has_merge_reason.sql b/tests/queries/0_stateless/02293_part_log_has_merge_reason.sql deleted file mode 100644 index 9d2575314d4..00000000000 --- a/tests/queries/0_stateless/02293_part_log_has_merge_reason.sql +++ /dev/null @@ -1,33 +0,0 @@ -DROP TABLE IF EXISTS t_part_log_has_merge_type_table; - -CREATE TABLE t_part_log_has_merge_type_table -( - event_time DateTime, - UserID UInt64, - Comment String -) -ENGINE = MergeTree() -ORDER BY tuple() -TTL event_time + INTERVAL 3 MONTH -SETTINGS min_bytes_for_wide_part = 0, materialize_ttl_recalculate_only = true, max_number_of_merges_with_ttl_in_pool = 100; - -INSERT INTO t_part_log_has_merge_type_table VALUES (now(), 1, 'username1'); -INSERT INTO t_part_log_has_merge_type_table VALUES (now() - INTERVAL 4 MONTH, 2, 'username2'); - -OPTIMIZE TABLE t_part_log_has_merge_type_table FINAL; - -SYSTEM FLUSH LOGS; - -SELECT - event_type, - merge_reason -FROM - system.part_log -WHERE - table = 't_part_log_has_merge_type_table' - AND - merge_reason = 'TTLDeleteMerge' - AND - database = currentDatabase(); - -DROP TABLE t_part_log_has_merge_type_table; diff --git a/tests/queries/0_stateless/02314_csv_tsv_skip_first_lines.sql b/tests/queries/0_stateless/02314_csv_tsv_skip_first_lines.sql index ff913a2a3ca..4a0cef35310 100644 --- a/tests/queries/0_stateless/02314_csv_tsv_skip_first_lines.sql +++ b/tests/queries/0_stateless/02314_csv_tsv_skip_first_lines.sql @@ -7,6 +7,6 @@ select * from file(data_02314.csv) settings input_format_csv_skip_first_lines=5; insert into function file(data_02314.tsv) select number, number + 1 from numbers(5) settings engine_file_truncate_on_insert=1; insert into function file(data_02314.tsv) select number, number + 1, number + 2 from numbers(5); -desc file(data_02314.tsv) settings input_format_csv_skip_first_lines=5; -select * from file(data_02314.tsv) settings input_format_csv_skip_first_lines=5; +desc file(data_02314.tsv) settings input_format_tsv_skip_first_lines=5; +select * from file(data_02314.tsv) settings input_format_tsv_skip_first_lines=5; diff --git a/tests/queries/0_stateless/02318_template_schema_inference_bug.sql b/tests/queries/0_stateless/02318_template_schema_inference_bug.sql index 42646013dd5..fe1c11943d6 100644 --- a/tests/queries/0_stateless/02318_template_schema_inference_bug.sql +++ b/tests/queries/0_stateless/02318_template_schema_inference_bug.sql @@ -1,2 +1,2 @@ insert into function file(data_02318.tsv) select * from numbers(10); -desc file('data_02318.tsv', 'Template') SETTINGS format_template_row='nonexist', format_template_resultset='nonexist'; -- {serverError CANNOT_EXTRACT_TABLE_STRUCTURE} +desc file('data_02318.tsv', 'Template') SETTINGS format_template_row='nonexist', format_template_resultset='nonexist'; -- {serverError FILE_DOESNT_EXIST} diff --git a/tests/queries/0_stateless/02337_base58.reference b/tests/queries/0_stateless/02337_base58.reference index bc666044388..20b9124c150 100644 --- a/tests/queries/0_stateless/02337_base58.reference +++ b/tests/queries/0_stateless/02337_base58.reference @@ -21,3 +21,6 @@ foo foob fooba foobar + +1 +1 diff --git a/tests/queries/0_stateless/02337_base58.sql b/tests/queries/0_stateless/02337_base58.sql index 9c9379a2854..42b032c7601 100644 --- a/tests/queries/0_stateless/02337_base58.sql +++ b/tests/queries/0_stateless/02337_base58.sql @@ -9,4 +9,7 @@ SELECT base58Decode('Hold my beer...'); -- { serverError 36 } SELECT base58Decode(encoded) FROM (SELECT base58Encode(val) as encoded FROM (select arrayJoin(['', 'f', 'fo', 'foo', 'foob', 'fooba', 'foobar', 'Hello world!']) val)); SELECT base58Encode(val) FROM (select arrayJoin(['', 'f', 'fo', 'foo', 'foob', 'fooba', 'foobar']) val); -SELECT base58Decode(val) FROM (select arrayJoin(['', '2m', '8o8', 'bQbp', '3csAg9', 'CZJRhmz', 't1Zv2yaZ']) val); +SELECT base58Decode(val) FROM (select arrayJoin(['', '2m', '8o8', 'bQbp', '3csAg9', 'CZJRhmz', 't1Zv2yaZ', '']) val); + +SELECT base58Encode(base58Decode('1BWutmTvYPwDtmw9abTkS4Ssr8no61spGAvW1X6NDix')) == '1BWutmTvYPwDtmw9abTkS4Ssr8no61spGAvW1X6NDix'; +select base58Encode('\x00\x0b\xe3\xe1\xeb\xa1\x7a\x47\x3f\x89\xb0\xf7\xe8\xe2\x49\x40\xf2\x0a\xeb\x8e\xbc\xa7\x1a\x88\xfd\xe9\x5d\x4b\x83\xb7\x1a\x09') == '1BWutmTvYPwDtmw9abTkS4Ssr8no61spGAvW1X6NDix'; diff --git a/tests/queries/0_stateless/02353_format_settings.reference b/tests/queries/0_stateless/02353_format_settings.reference new file mode 100644 index 00000000000..8bd61bbd7cd --- /dev/null +++ b/tests/queries/0_stateless/02353_format_settings.reference @@ -0,0 +1,17 @@ +SELECT 1 +FORMAT CSV +SETTINGS max_execution_time = 0.001 +SELECT 1 +SETTINGS max_execution_time = 0.001 +FORMAT CSV +SELECT 1 +UNION ALL +SELECT 2 +FORMAT CSV +SETTINGS max_execution_time = 0.001 +SELECT 1 +SETTINGS max_threads = 1 +UNION ALL +SELECT 2 +SETTINGS max_execution_time = 2 +FORMAT `Null` diff --git a/tests/queries/0_stateless/02353_format_settings.sh b/tests/queries/0_stateless/02353_format_settings.sh new file mode 100755 index 00000000000..6d65f143633 --- /dev/null +++ b/tests/queries/0_stateless/02353_format_settings.sh @@ -0,0 +1,16 @@ +#!/usr/bin/env bash + +CUR_DIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) +# shellcheck source=../shell_config.sh +. "$CUR_DIR"/../shell_config.sh + +set -e + +format="$CLICKHOUSE_FORMAT" + +echo "select 1 format CSV settings max_execution_time = 0.001" | $format +echo "select 1 settings max_execution_time = 0.001 format CSV" | $format +echo "select 1 UNION ALL Select 2 format CSV settings max_execution_time = 0.001" | $format + +# I don't think having multiple settings makes sense, but it's supported so test that it still works +echo "select 1 settings max_threads=1 UNION ALL select 2 settings max_execution_time=2 format Null" | $format diff --git a/tests/queries/0_stateless/02382_filesystem_cache_persistent_files.reference b/tests/queries/0_stateless/02382_filesystem_cache_persistent_files.reference index d3d9ce25569..7f79a172f4b 100644 --- a/tests/queries/0_stateless/02382_filesystem_cache_persistent_files.reference +++ b/tests/queries/0_stateless/02382_filesystem_cache_persistent_files.reference @@ -9,7 +9,7 @@ INSERT INTO nopers SELECT number, toString(number) FROM numbers(10); SELECT * FROM nopers FORMAT Null; SELECT sum(size) FROM system.filesystem_cache; 194 -SELECT substring(local_path, 58, 10) as file, substring(cache_path, 53, 20) as cache, size +SELECT extract(local_path, '.*/([\w.]+)') as file, extract(cache_path, '.*/([\w.]+)') as cache, size FROM ( SELECT arrayJoin(cache_paths) AS cache_path, local_path, remote_path @@ -19,8 +19,8 @@ INNER JOIN system.filesystem_cache AS caches ON data_paths.cache_path = caches.cache_path ORDER BY file, cache, size; -data.bin 114 -data.mrk3 80 +data.bin 0 114 +data.mrk3 0 80 DROP TABLE IF EXISTS test; CREATE TABLE test (key UInt32, value String) Engine=MergeTree() ORDER BY key SETTINGS storage_policy='s3_cache_small', min_bytes_for_wide_part = 10485760; SYSTEM STOP MERGES test; @@ -32,7 +32,7 @@ SELECT count() FROM (SELECT arrayJoin(cache_paths) AS cache_path, local_path, re 4 SELECT count() FROM system.filesystem_cache; 4 -SELECT substring(local_path, 58, 10) as file, substring(cache_path, 53, 20) as cache, size +SELECT extract(local_path, '.*/([\w.]+)') as file, extract(cache_path, '.*/([\w.]+)') as cache, size FROM ( SELECT arrayJoin(cache_paths) AS cache_path, local_path, remote_path @@ -42,9 +42,9 @@ INNER JOIN system.filesystem_cache AS caches ON data_paths.cache_path = caches.cache_path ORDER BY file, cache, size; -data.bin 114 +data.bin 0 114 data.bin 0 746 -data.mrk3 80 +data.mrk3 0 80 data.mrk3 0_persistent 80 DROP TABLE IF EXISTS test2; CREATE TABLE test2 (key UInt32, value String) Engine=MergeTree() ORDER BY key SETTINGS storage_policy='s3_cache_small', min_bytes_for_wide_part = 10485760; @@ -57,7 +57,7 @@ SELECT count() FROM (SELECT arrayJoin(cache_paths) AS cache_path, local_path, re 4 SELECT count() FROM system.filesystem_cache; 4 -SELECT substring(local_path, 58, 10) as file, substring(cache_path, 53, 20) as cache, size +SELECT extract(local_path, '.*/([\w.]+)') as file, extract(cache_path, '.*/([\w.]+)') as cache, size FROM ( SELECT arrayJoin(cache_paths) AS cache_path, local_path, remote_path @@ -67,8 +67,8 @@ INNER JOIN system.filesystem_cache AS caches ON data_paths.cache_path = caches.cache_path ORDER BY file, cache, size; -data.bin 114 -data.mrk3 80 +data.bin 0 114 +data.mrk3 0 80 data.mrk3 0_persistent 80 data.mrk3 0_persistent 520 DROP TABLE test; diff --git a/tests/queries/0_stateless/02382_filesystem_cache_persistent_files.sql b/tests/queries/0_stateless/02382_filesystem_cache_persistent_files.sql index 470006054e5..d7171de48ad 100644 --- a/tests/queries/0_stateless/02382_filesystem_cache_persistent_files.sql +++ b/tests/queries/0_stateless/02382_filesystem_cache_persistent_files.sql @@ -14,7 +14,7 @@ INSERT INTO nopers SELECT number, toString(number) FROM numbers(10); SELECT * FROM nopers FORMAT Null; SELECT sum(size) FROM system.filesystem_cache; -SELECT substring(local_path, 58, 10) as file, substring(cache_path, 53, 20) as cache, size +SELECT extract(local_path, '.*/([\w.]+)') as file, extract(cache_path, '.*/([\w.]+)') as cache, size FROM ( SELECT arrayJoin(cache_paths) AS cache_path, local_path, remote_path @@ -37,7 +37,7 @@ SELECT sum(size) FROM system.filesystem_cache; SELECT count() FROM (SELECT arrayJoin(cache_paths) AS cache_path, local_path, remote_path FROM system.remote_data_paths ) AS data_paths INNER JOIN system.filesystem_cache AS caches ON data_paths.cache_path = caches.cache_path; SELECT count() FROM system.filesystem_cache; -SELECT substring(local_path, 58, 10) as file, substring(cache_path, 53, 20) as cache, size +SELECT extract(local_path, '.*/([\w.]+)') as file, extract(cache_path, '.*/([\w.]+)') as cache, size FROM ( SELECT arrayJoin(cache_paths) AS cache_path, local_path, remote_path @@ -60,7 +60,7 @@ SELECT sum(size) FROM system.filesystem_cache; SELECT count() FROM (SELECT arrayJoin(cache_paths) AS cache_path, local_path, remote_path FROM system.remote_data_paths ) AS data_paths INNER JOIN system.filesystem_cache AS caches ON data_paths.cache_path = caches.cache_path; SELECT count() FROM system.filesystem_cache; -SELECT substring(local_path, 58, 10) as file, substring(cache_path, 53, 20) as cache, size +SELECT extract(local_path, '.*/([\w.]+)') as file, extract(cache_path, '.*/([\w.]+)') as cache, size FROM ( SELECT arrayJoin(cache_paths) AS cache_path, local_path, remote_path diff --git a/tests/queries/0_stateless/02387_parse_date_as_datetime.reference b/tests/queries/0_stateless/02387_parse_date_as_datetime.reference new file mode 100644 index 00000000000..226d8c26438 --- /dev/null +++ b/tests/queries/0_stateless/02387_parse_date_as_datetime.reference @@ -0,0 +1,4 @@ +123 2022-05-03 00:00:00 +456 2022-05-03 01:02:03 +123 2022-05-03 00:00:00.000 +456 2022-05-03 01:02:03.000 diff --git a/tests/queries/0_stateless/02387_parse_date_as_datetime.sql b/tests/queries/0_stateless/02387_parse_date_as_datetime.sql new file mode 100644 index 00000000000..9727f677be2 --- /dev/null +++ b/tests/queries/0_stateless/02387_parse_date_as_datetime.sql @@ -0,0 +1,13 @@ +CREATE TEMPORARY TABLE test (`i` Int64, `d` DateTime); +INSERT INTO test FORMAT JSONEachRow {"i": 123, "d": "2022-05-03"}; +INSERT INTO test FORMAT JSONEachRow {"i": 456, "d": "2022-05-03 01:02:03"}; +SELECT * FROM test ORDER BY i; + +DROP TABLE test; + +CREATE TEMPORARY TABLE test (`i` Int64, `d` DateTime64); +INSERT INTO test FORMAT JSONEachRow {"i": 123, "d": "2022-05-03"}; +INSERT INTO test FORMAT JSONEachRow {"i": 456, "d": "2022-05-03 01:02:03"}; +SELECT * FROM test ORDER BY i; + +DROP TABLE test; diff --git a/tests/queries/0_stateless/02388_conversion_from_string_with_datetime64_to_date_and_date32.reference b/tests/queries/0_stateless/02388_conversion_from_string_with_datetime64_to_date_and_date32.reference new file mode 100644 index 00000000000..78cdbaf0202 --- /dev/null +++ b/tests/queries/0_stateless/02388_conversion_from_string_with_datetime64_to_date_and_date32.reference @@ -0,0 +1,12 @@ +2022-08-22 +2022-08-22 +2022-08-22 +2022-08-22 +2022-08-22 +2022-08-22 +2022-08-22 +2022-08-22 +2022-08-22 +2022-08-22 +2022-08-22 +2022-08-22 diff --git a/tests/queries/0_stateless/02388_conversion_from_string_with_datetime64_to_date_and_date32.sql b/tests/queries/0_stateless/02388_conversion_from_string_with_datetime64_to_date_and_date32.sql new file mode 100644 index 00000000000..b1f905993b4 --- /dev/null +++ b/tests/queries/0_stateless/02388_conversion_from_string_with_datetime64_to_date_and_date32.sql @@ -0,0 +1,33 @@ +SELECT toDate('2022-08-22 01:02:03'); +SELECT toDate32('2022-08-22 01:02:03'); + +SELECT toDate('2022-08-22 01:02:03.1'); +SELECT toDate32('2022-08-22 01:02:03.1'); + +SELECT toDate('2022-08-22 01:02:03.123456'); +SELECT toDate32('2022-08-22 01:02:03.123456'); + +SELECT toDate('2022-08-22T01:02:03'); +SELECT toDate32('2022-08-22T01:02:03'); + +SELECT toDate('2022-08-22T01:02:03.1'); +SELECT toDate32('2022-08-22T01:02:03.1'); + +SELECT toDate('2022-08-22T01:02:03.123456'); +SELECT toDate32('2022-08-22T01:02:03.123456'); + + +SELECT toDate('2022-08-22+01:02:03'); -- { serverError 6 } +SELECT toDate32('2022-08-22+01:02:03'); -- { serverError 6 } + +SELECT toDate('2022-08-22 01:02:0'); -- { serverError 6 } +SELECT toDate32('2022-08-22 01:02:0'); -- { serverError 6 } + +SELECT toDate('2022-08-22 01:02:03.'); -- { serverError 6 } +SELECT toDate32('2022-08-22 01:02:03.'); -- { serverError 6 } + +SELECT toDate('2022-08-22 01:02:03.111a'); -- { serverError 6 } +SELECT toDate32('2022-08-22 01:02:03.2b'); -- { serverError 6 } + +SELECT toDate('2022-08-22 01:02:03.a'); -- { serverError 6 } +SELECT toDate32('2022-08-22 01:02:03.b'); -- { serverError 6 } diff --git a/tests/queries/0_stateless/02389_dashboard.reference b/tests/queries/0_stateless/02389_dashboard.reference new file mode 100644 index 00000000000..bcde69ce24a --- /dev/null +++ b/tests/queries/0_stateless/02389_dashboard.reference @@ -0,0 +1,2 @@ +🌚 +leeoniya diff --git a/tests/queries/0_stateless/02389_dashboard.sh b/tests/queries/0_stateless/02389_dashboard.sh new file mode 100755 index 00000000000..9250663e3e8 --- /dev/null +++ b/tests/queries/0_stateless/02389_dashboard.sh @@ -0,0 +1,8 @@ +#!/usr/bin/env bash + +CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) +# shellcheck source=../shell_config.sh +. "$CURDIR"/../shell_config.sh + +${CLICKHOUSE_CURL} -sS "${CLICKHOUSE_PORT_HTTP_PROTO}://${CLICKHOUSE_HOST}:${CLICKHOUSE_PORT_HTTP}/dashboard" | grep -oF '🌚' +${CLICKHOUSE_CURL} -sS "${CLICKHOUSE_PORT_HTTP_PROTO}://${CLICKHOUSE_HOST}:${CLICKHOUSE_PORT_HTTP}/js/uplot.js" | grep -oF 'leeoniya' diff --git a/tests/integration/test_nlp/__init__.py b/tests/queries/0_stateless/02391_recursive_buffer.reference similarity index 100% rename from tests/integration/test_nlp/__init__.py rename to tests/queries/0_stateless/02391_recursive_buffer.reference diff --git a/tests/queries/0_stateless/02391_recursive_buffer.sql b/tests/queries/0_stateless/02391_recursive_buffer.sql new file mode 100644 index 00000000000..c0954ed834b --- /dev/null +++ b/tests/queries/0_stateless/02391_recursive_buffer.sql @@ -0,0 +1,18 @@ +-- Tags: no-parallel +-- because of system.tables poisoning + +DROP TABLE IF EXISTS test; +CREATE TABLE test (key UInt32) Engine = Buffer(currentDatabase(), test, 16, 10, 100, 10000, 1000000, 10000000, 100000000); +SELECT * FROM test; -- { serverError 269 } +SELECT * FROM system.tables WHERE table = 'test' AND database = currentDatabase() FORMAT Null; -- { serverError 269 } +DROP TABLE test; + +DROP TABLE IF EXISTS test1; +DROP TABLE IF EXISTS test2; +CREATE TABLE test1 (key UInt32) Engine = Buffer(currentDatabase(), test2, 16, 10, 100, 10000, 1000000, 10000000, 100000000); +CREATE TABLE test2 (key UInt32) Engine = Buffer(currentDatabase(), test1, 16, 10, 100, 10000, 1000000, 10000000, 100000000); +SELECT * FROM test1; -- { serverError 306 } +SELECT * FROM test2; -- { serverError 306 } +SELECT * FROM system.tables WHERE table IN ('test1', 'test2') AND database = currentDatabase(); -- { serverError 306 } +DROP TABLE test1; +DROP TABLE test2; diff --git a/tests/integration/test_no_local_metadata_node/__init__.py b/tests/queries/0_stateless/02392_every_setting_must_have_documentation.reference similarity index 100% rename from tests/integration/test_no_local_metadata_node/__init__.py rename to tests/queries/0_stateless/02392_every_setting_must_have_documentation.reference diff --git a/tests/queries/0_stateless/02392_every_setting_must_have_documentation.sql b/tests/queries/0_stateless/02392_every_setting_must_have_documentation.sql new file mode 100644 index 00000000000..87c6ccaa089 --- /dev/null +++ b/tests/queries/0_stateless/02392_every_setting_must_have_documentation.sql @@ -0,0 +1 @@ +SELECT name FROM system.settings WHERE length(description) < 10; diff --git a/tests/queries/0_stateless/02393_every_metric_must_have_documentation.reference b/tests/queries/0_stateless/02393_every_metric_must_have_documentation.reference new file mode 100644 index 00000000000..e69de29bb2d diff --git a/tests/queries/0_stateless/02393_every_metric_must_have_documentation.sql b/tests/queries/0_stateless/02393_every_metric_must_have_documentation.sql new file mode 100644 index 00000000000..cf98b6b2cb2 --- /dev/null +++ b/tests/queries/0_stateless/02393_every_metric_must_have_documentation.sql @@ -0,0 +1,2 @@ +SET system_events_show_zero_values = true; +SELECT metric FROM system.metrics WHERE length(description) < 10; diff --git a/tests/queries/0_stateless/02394_every_profile_event_must_have_documentation.reference b/tests/queries/0_stateless/02394_every_profile_event_must_have_documentation.reference new file mode 100644 index 00000000000..e69de29bb2d diff --git a/tests/queries/0_stateless/02394_every_profile_event_must_have_documentation.sql b/tests/queries/0_stateless/02394_every_profile_event_must_have_documentation.sql new file mode 100644 index 00000000000..1dad6c432ec --- /dev/null +++ b/tests/queries/0_stateless/02394_every_profile_event_must_have_documentation.sql @@ -0,0 +1,2 @@ +SET system_events_show_zero_values = true; +SELECT event FROM system.events WHERE length(description) < 10; diff --git a/tests/queries/0_stateless/02395_every_merge_tree_setting_must_have_documentation.reference b/tests/queries/0_stateless/02395_every_merge_tree_setting_must_have_documentation.reference new file mode 100644 index 00000000000..e69de29bb2d diff --git a/tests/queries/0_stateless/02395_every_merge_tree_setting_must_have_documentation.sql b/tests/queries/0_stateless/02395_every_merge_tree_setting_must_have_documentation.sql new file mode 100644 index 00000000000..6a7d4ad5177 --- /dev/null +++ b/tests/queries/0_stateless/02395_every_merge_tree_setting_must_have_documentation.sql @@ -0,0 +1 @@ +SELECT name FROM system.merge_tree_settings WHERE length(description) < 10; diff --git a/tests/queries/0_stateless/02399_merge_tree_mutate_in_partition.reference b/tests/queries/0_stateless/02399_merge_tree_mutate_in_partition.reference new file mode 100644 index 00000000000..ac06d80f896 --- /dev/null +++ b/tests/queries/0_stateless/02399_merge_tree_mutate_in_partition.reference @@ -0,0 +1,9 @@ +1 1 1_1_1_0 +1 2 1_3_3_0 +2 1 2_2_2_0 +2 2 2_4_4_0 +mutation_5.txt UPDATE n = n + (n NOT IN (default.m)) IN PARTITION ID \'1\' WHERE 1 [] 0 1 +1 2 +1 3 +2 1 +2 2 diff --git a/tests/queries/0_stateless/02399_merge_tree_mutate_in_partition.sql b/tests/queries/0_stateless/02399_merge_tree_mutate_in_partition.sql new file mode 100644 index 00000000000..c56acf9c334 --- /dev/null +++ b/tests/queries/0_stateless/02399_merge_tree_mutate_in_partition.sql @@ -0,0 +1,18 @@ + +drop table if exists mt; +drop table if exists m; + +create table mt (p int, n int) engine=MergeTree order by tuple() partition by p; +create table m (n int) engine=Memory; +insert into mt values (1, 1), (2, 1); +insert into mt values (1, 2), (2, 2); +select *, _part from mt order by _part; + +alter table mt update n = n + (n not in m) in partition id '1' where 1 settings mutations_sync=1; +drop table m; +optimize table mt final; + +select mutation_id, command, parts_to_do_names, parts_to_do, is_done from system.mutations where database=currentDatabase(); +select * from mt order by p, n; + +drop table mt; diff --git a/tests/queries/0_stateless/02402_external_disk_mertrics.reference b/tests/queries/0_stateless/02402_external_disk_mertrics.reference new file mode 100644 index 00000000000..e8183f05f5d --- /dev/null +++ b/tests/queries/0_stateless/02402_external_disk_mertrics.reference @@ -0,0 +1,3 @@ +1 +1 +1 diff --git a/tests/queries/0_stateless/02402_external_disk_mertrics.sql b/tests/queries/0_stateless/02402_external_disk_mertrics.sql new file mode 100644 index 00000000000..b675c05f45c --- /dev/null +++ b/tests/queries/0_stateless/02402_external_disk_mertrics.sql @@ -0,0 +1,78 @@ +-- Tags: no-parallel, no-fasttest, long, no-random-settings + +SET max_bytes_before_external_sort = 33554432; +set max_block_size = 1048576; + +SELECT number FROM (SELECT number FROM numbers(2097152)) ORDER BY number * 1234567890123456789 LIMIT 2097142, 10 +SETTINGS log_comment='02402_external_disk_mertrics/sort' +FORMAT Null; + +SET max_bytes_before_external_group_by = '100M'; +SET max_memory_usage = '410M'; +SET group_by_two_level_threshold = '100K'; +SET group_by_two_level_threshold_bytes = '50M'; + +SELECT sum(k), sum(c) FROM (SELECT number AS k, count() AS c FROM (SELECT * FROM system.numbers LIMIT 2097152) GROUP BY k) +SETTINGS log_comment='02402_external_disk_mertrics/aggregation' +FORMAT Null; + +SET join_algorithm = 'partial_merge'; +SET default_max_bytes_in_join = 0; +SET max_bytes_in_join = 10000000; + +SELECT number * 200000 as n, j * 2097152 FROM numbers(5) nums +ANY LEFT JOIN ( SELECT number * 2 AS n, number AS j FROM numbers(1000000) ) js2 +USING n +ORDER BY n +SETTINGS log_comment='02402_external_disk_mertrics/join' +FORMAT Null; + +SYSTEM FLUSH LOGS; + +SELECT + any(ProfileEvents['ExternalProcessingFilesTotal']) >= 1 AND + any(ProfileEvents['ExternalProcessingCompressedBytesTotal']) >= 100000 AND + any(ProfileEvents['ExternalProcessingUncompressedBytesTotal']) >= 100000 AND + any(ProfileEvents['ExternalSortWritePart']) >= 1 AND + any(ProfileEvents['ExternalSortMerge']) >= 1 AND + any(ProfileEvents['ExternalSortCompressedBytes']) >= 100000 AND + any(ProfileEvents['ExternalSortUncompressedBytes']) >= 100000 AND + count() == 1 + FROM system.query_log WHERE current_database = currentDatabase() + AND log_comment = '02402_external_disk_mertrics/sort' + AND query ILIKE 'SELECT%2097152%' AND type = 'QueryFinish'; + +SELECT + any(ProfileEvents['ExternalProcessingFilesTotal']) >= 1 AND + any(ProfileEvents['ExternalProcessingCompressedBytesTotal']) >= 100000 AND + any(ProfileEvents['ExternalProcessingUncompressedBytesTotal']) >= 100000 AND + any(ProfileEvents['ExternalAggregationWritePart']) >= 1 AND + any(ProfileEvents['ExternalAggregationMerge']) >= 1 AND + any(ProfileEvents['ExternalAggregationCompressedBytes']) >= 100000 AND + any(ProfileEvents['ExternalAggregationUncompressedBytes']) >= 100000 AND + count() == 1 + FROM system.query_log WHERE current_database = currentDatabase() + AND log_comment = '02402_external_disk_mertrics/aggregation' + AND query ILIKE 'SELECT%2097152%' AND type = 'QueryFinish'; + +SELECT + any(ProfileEvents['ExternalProcessingFilesTotal']) >= 1 AND + any(ProfileEvents['ExternalProcessingCompressedBytesTotal']) >= 100000 AND + any(ProfileEvents['ExternalProcessingUncompressedBytesTotal']) >= 100000 AND + any(ProfileEvents['ExternalJoinWritePart']) >= 1 AND + any(ProfileEvents['ExternalJoinMerge']) >= 0 AND + any(ProfileEvents['ExternalJoinCompressedBytes']) >= 100000 AND + any(ProfileEvents['ExternalJoinUncompressedBytes']) >= 100000 AND + count() == 1 + FROM system.query_log WHERE current_database = currentDatabase() + AND log_comment = '02402_external_disk_mertrics/join' + AND query ILIKE 'SELECT%2097152%' AND type = 'QueryFinish'; + +-- Do not check values because they can be not recorded, just existence +SELECT + CurrentMetric_TemporaryFilesForAggregation, + CurrentMetric_TemporaryFilesForJoin, + CurrentMetric_TemporaryFilesForSort +FROM system.metric_log +ORDER BY event_time DESC LIMIT 5 +FORMAT Null; diff --git a/tests/queries/0_stateless/02402_merge_engine_with_view.reference b/tests/queries/0_stateless/02402_merge_engine_with_view.reference new file mode 100644 index 00000000000..4a869286f82 --- /dev/null +++ b/tests/queries/0_stateless/02402_merge_engine_with_view.reference @@ -0,0 +1,6 @@ +2 +2 +3 +3 +4 +4 diff --git a/tests/queries/0_stateless/02402_merge_engine_with_view.sql b/tests/queries/0_stateless/02402_merge_engine_with_view.sql new file mode 100644 index 00000000000..613f76e24c9 --- /dev/null +++ b/tests/queries/0_stateless/02402_merge_engine_with_view.sql @@ -0,0 +1,10 @@ + +CREATE TABLE m0 (id UInt64) ENGINE=MergeTree ORDER BY id SETTINGS index_granularity = 1; +INSERT INTO m0 SELECT number FROM numbers(10); +CREATE TABLE m1 (id UInt64, s String) ENGINE=MergeTree ORDER BY id SETTINGS index_granularity = 1; +INSERT INTO m1 SELECT number, 'boo' FROM numbers(10); +CREATE VIEW m1v AS SELECT id FROM m1; + +CREATE TABLE m2 (id UInt64) ENGINE=Merge(currentDatabase(),'m0|m1v'); + +SELECT * FROM m2 WHERE id > 1 AND id < 5 ORDER BY id SETTINGS force_primary_key=1, max_bytes_to_read=64; \ No newline at end of file diff --git a/tests/queries/0_stateless/02403_big_http_chunk_size.python b/tests/queries/0_stateless/02403_big_http_chunk_size.python new file mode 100644 index 00000000000..72aa4da0bac --- /dev/null +++ b/tests/queries/0_stateless/02403_big_http_chunk_size.python @@ -0,0 +1,50 @@ +#!/usr/bin/env python3 +from socket import socket, AF_INET, SOCK_STREAM +import os + + +EXCEPTION_CODE_HEADER = "X-ClickHouse-Exception-Code" +TRANSFER_ENCODING_HEADER = "Transfer-Encoding" + + +def main(): + host = os.environ['CLICKHOUSE_HOST'] + port = int(os.environ['CLICKHOUSE_PORT_HTTP']) + + sock = socket(AF_INET, SOCK_STREAM) + sock.connect((host, port)) + sock.settimeout(5) + s = "POST /play HTTP/1.1\r\n" + s += "Host: %s\r\n" % host + s += "Content-type: multipart/form-data\r\n" + s += "Transfer-encoding: chunked\r\n" + s += "\r\n" + s += "ffffffffffffffff" + s += "\r\n" + s += "X" * 100000 + sock.sendall(s.encode()) + data = sock.recv(10000).decode() + sock.close() + + lines = data.splitlines() + + print(lines.pop(0)) + + headers = {} + for x in lines: + x = x.strip() + if not x: + continue + tokens = x.split(":", 1) + if len(tokens) < 2: + continue + key, val = tokens + headers[key.strip()] = val.strip() + + print("encoding type", headers[TRANSFER_ENCODING_HEADER]) + print("error code", headers[EXCEPTION_CODE_HEADER]) + + +if __name__ == "__main__": + main() + diff --git a/tests/queries/0_stateless/02403_big_http_chunk_size.reference b/tests/queries/0_stateless/02403_big_http_chunk_size.reference new file mode 100644 index 00000000000..d7970bd2eb1 --- /dev/null +++ b/tests/queries/0_stateless/02403_big_http_chunk_size.reference @@ -0,0 +1,3 @@ +HTTP/1.1 200 OK +encoding type chunked +error code 1000 diff --git a/tests/queries/0_stateless/02403_big_http_chunk_size.sh b/tests/queries/0_stateless/02403_big_http_chunk_size.sh new file mode 100755 index 00000000000..4434861ea46 --- /dev/null +++ b/tests/queries/0_stateless/02403_big_http_chunk_size.sh @@ -0,0 +1,8 @@ +#!/usr/bin/env bash + +CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) +# shellcheck source=../shell_config.sh +. "$CURDIR"/../shell_config.sh + +# We should have correct env vars from shell_config.sh to run this test +python3 "$CURDIR"/02403_big_http_chunk_size.python diff --git a/tests/queries/0_stateless/02403_date_time_narrowing.reference b/tests/queries/0_stateless/02403_date_time_narrowing.reference new file mode 100644 index 00000000000..7d6e91c61b8 --- /dev/null +++ b/tests/queries/0_stateless/02403_date_time_narrowing.reference @@ -0,0 +1,20 @@ +1970-01-01 2149-06-06 1970-01-01 2149-06-06 1900-01-01 1970-01-02 1970-01-01 00:00:00 2106-02-07 06:28:15 +1970-01-01 2149-06-06 +1970-01-01 2149-06-06 +1970-01-01 00:00:00 2106-02-07 06:28:15 +1970-01-01 00:00:00 2106-02-07 06:28:15 +2106-02-07 06:28:15 +toStartOfDay +2106-02-07 00:00:00 1970-01-01 00:00:00 2106-02-07 00:00:00 1970-01-01 00:00:00 2106-02-07 00:00:00 +toStartOfWeek +1970-01-01 1970-01-01 1970-01-01 1970-01-01 1970-01-01 2149-06-01 1970-01-01 2149-06-02 +toMonday +1970-01-01 1970-01-01 2149-06-02 1970-01-01 2149-06-02 +toStartOfMonth +1970-01-01 2149-06-01 1970-01-01 2149-06-01 +toLastDayOfMonth +2149-05-31 1970-01-01 2149-05-31 1970-01-01 2149-05-31 +toStartOfQuarter +1970-01-01 2149-04-01 1970-01-01 2149-04-01 +toStartOfYear +1970-01-01 2149-01-01 1970-01-01 2149-01-01 diff --git a/tests/queries/0_stateless/02403_date_time_narrowing.sql b/tests/queries/0_stateless/02403_date_time_narrowing.sql new file mode 100644 index 00000000000..07cbba6f31c --- /dev/null +++ b/tests/queries/0_stateless/02403_date_time_narrowing.sql @@ -0,0 +1,74 @@ +-- check conversion of numbers to date/time -- +SELECT toDate(toInt32(toDate32('1930-01-01', 'UTC')), 'UTC'), + toDate(toInt32(toDate32('2151-01-01', 'UTC')), 'UTC'), + toDate(toInt64(toDateTime64('1930-01-01 12:12:12.123', 3, 'UTC')), 'UTC'), + toDate(toInt64(toDateTime64('2151-01-01 12:12:12.123', 3, 'UTC')), 'UTC'), + toDate32(toInt32(toDate32('1900-01-01', 'UTC')) - 1, 'UTC'), + toDate32(toInt32(toDate32('2299-12-31', 'UTC')) + 1, 'UTC'), + toDateTime(toInt64(toDateTime64('1930-01-01 12:12:12.123', 3, 'UTC')), 'UTC'), + toDateTime(toInt64(toDateTime64('2151-01-01 12:12:12.123', 3, 'UTC')), 'UTC'); + +-- check conversion of extended range type to normal range type -- +SELECT toDate(toDate32('1930-01-01', 'UTC'), 'UTC'), + toDate(toDate32('2151-01-01', 'UTC'), 'UTC'); + +SELECT toDate(toDateTime64('1930-01-01 12:12:12.12', 3, 'UTC'), 'UTC'), + toDate(toDateTime64('2151-01-01 12:12:12.12', 3, 'UTC'), 'UTC'); + +SELECT toDateTime(toDateTime64('1930-01-01 12:12:12.12', 3, 'UTC'), 'UTC'), + toDateTime(toDateTime64('2151-01-01 12:12:12.12', 3, 'UTC'), 'UTC'); + +SELECT toDateTime(toDate32('1930-01-01', 'UTC'), 'UTC'), + toDateTime(toDate32('2151-01-01', 'UTC'), 'UTC'); + +SELECT toDateTime(toDate('2141-01-01', 'UTC'), 'UTC'); + +-- test DateTimeTransforms -- +SELECT 'toStartOfDay'; +SELECT toStartOfDay(toDate('2141-01-01', 'UTC'), 'UTC'), + toStartOfDay(toDate32('1930-01-01', 'UTC'), 'UTC'), + toStartOfDay(toDate32('2141-01-01', 'UTC'), 'UTC'), + toStartOfDay(toDateTime64('1930-01-01 12:12:12.123', 3, 'UTC'), 'UTC'), + toStartOfDay(toDateTime64('2141-01-01 12:12:12.123', 3, 'UTC'), 'UTC'); + +SELECT 'toStartOfWeek'; +SELECT toStartOfWeek(toDate('1970-01-01', 'UTC')), + toStartOfWeek(toDate32('1970-01-01', 'UTC')), + toStartOfWeek(toDateTime('1970-01-01 10:10:10', 'UTC'), 0, 'UTC'), + toStartOfWeek(toDateTime64('1970-01-01 10:10:10.123', 3, 'UTC'), 1, 'UTC'), + toStartOfWeek(toDate32('1930-01-01', 'UTC')), + toStartOfWeek(toDate32('2151-01-01', 'UTC')), + toStartOfWeek(toDateTime64('1930-01-01 12:12:12.123', 3, 'UTC'), 2, 'UTC'), + toStartOfWeek(toDateTime64('2151-01-01 12:12:12.123', 3, 'UTC'), 3, 'UTC'); + +SELECT 'toMonday'; +SELECT toMonday(toDate('1970-01-02', 'UTC')), + toMonday(toDate32('1930-01-01', 'UTC')), + toMonday(toDate32('2151-01-01', 'UTC')), + toMonday(toDateTime64('1930-01-01 12:12:12.123', 3, 'UTC'), 'UTC'), + toMonday(toDateTime64('2151-01-01 12:12:12.123', 3, 'UTC'), 'UTC'); + +SELECT 'toStartOfMonth'; +SELECT toStartOfMonth(toDate32('1930-01-01', 'UTC')), + toStartOfMonth(toDate32('2151-01-01', 'UTC')), + toStartOfMonth(toDateTime64('1930-01-01 12:12:12.123', 3, 'UTC'), 'UTC'), + toStartOfMonth(toDateTime64('2151-01-01 12:12:12.123', 3, 'UTC'), 'UTC'); + +SELECT 'toLastDayOfMonth'; +SELECT toLastDayOfMonth(toDate('2149-06-03', 'UTC')), + toLastDayOfMonth(toDate32('1930-01-01', 'UTC')), + toLastDayOfMonth(toDate32('2151-01-01', 'UTC')), + toLastDayOfMonth(toDateTime64('1930-01-01 12:12:12.123', 3, 'UTC'), 'UTC'), + toLastDayOfMonth(toDateTime64('2151-01-01 12:12:12.123', 3, 'UTC'), 'UTC'); + +SELECT 'toStartOfQuarter'; +SELECT toStartOfQuarter(toDate32('1930-01-01', 'UTC')), + toStartOfQuarter(toDate32('2151-01-01', 'UTC')), + toStartOfQuarter(toDateTime64('1930-01-01 12:12:12.123', 3, 'UTC'), 'UTC'), + toStartOfQuarter(toDateTime64('2151-01-01 12:12:12.123', 3, 'UTC'), 'UTC'); + +SELECT 'toStartOfYear'; +SELECT toStartOfYear(toDate32('1930-01-01', 'UTC')), + toStartOfYear(toDate32('2151-01-01', 'UTC')), + toStartOfYear(toDateTime64('1930-01-01 12:12:12.123', 3, 'UTC'), 'UTC'), + toStartOfYear(toDateTime64('2151-01-01 12:12:12.123', 3, 'UTC'), 'UTC'); diff --git a/tests/queries/0_stateless/02403_ttl_column_multiple_times.reference b/tests/queries/0_stateless/02403_ttl_column_multiple_times.reference new file mode 100644 index 00000000000..5695a080619 --- /dev/null +++ b/tests/queries/0_stateless/02403_ttl_column_multiple_times.reference @@ -0,0 +1,3 @@ +2020-10-01 144 +2020-10-01 0 +2020-10-01 0 diff --git a/tests/queries/0_stateless/02403_ttl_column_multiple_times.sql b/tests/queries/0_stateless/02403_ttl_column_multiple_times.sql new file mode 100644 index 00000000000..a1114eb15b1 --- /dev/null +++ b/tests/queries/0_stateless/02403_ttl_column_multiple_times.sql @@ -0,0 +1,28 @@ +DROP TABLE IF EXISTS ttl_table; + +CREATE TABLE ttl_table +( + EventDate Date, + Longitude Float64 TTL EventDate + toIntervalWeek(2) +) +ENGINE = MergeTree() +ORDER BY EventDate +SETTINGS vertical_merge_algorithm_min_rows_to_activate=1, vertical_merge_algorithm_min_columns_to_activate=1; + +SYSTEM STOP MERGES ttl_table; + +INSERT INTO ttl_table VALUES(toDate('2020-10-01'), 144); + +SELECT * FROM ttl_table; + +SYSTEM START MERGES ttl_table; + +OPTIMIZE TABLE ttl_table FINAL; + +SELECT * FROM ttl_table; + +OPTIMIZE TABLE ttl_table FINAL; + +SELECT * FROM ttl_table; + +DROP TABLE IF EXISTS ttl_table; diff --git a/tests/queries/0_stateless/02404_lightweight_delete_vertical_merge.reference b/tests/queries/0_stateless/02404_lightweight_delete_vertical_merge.reference new file mode 100644 index 00000000000..d57e99412b5 --- /dev/null +++ b/tests/queries/0_stateless/02404_lightweight_delete_vertical_merge.reference @@ -0,0 +1,147 @@ +0 0 +1 1 +2 2 +3 3 +4 4 +5 5 +6 6 +7 7 +8 8 +9 9 +all_1_1_0 Wide +all_1_1_0 id UInt64 10 +all_1_1_0 value String 10 +1 1 +2 2 +4 4 +5 5 +7 7 +8 8 +all_1_1_0_2 Wide +all_1_1_0_2 _row_exists UInt8 10 +all_1_1_0_2 id UInt64 10 +all_1_1_0_2 value String 10 +1 1 +2 2 +4 4 +5 5 +7 7 +8 8 +all_1_1_1_2 Wide +all_1_1_1_2 id UInt64 6 +all_1_1_1_2 value String 6 +1 1 +5 5 +7 7 +all_1_1_1_3 Wide +all_1_1_1_3 _row_exists UInt8 6 +all_1_1_1_3 id UInt64 6 +all_1_1_1_3 value String 6 +0 100 +1 1 +1 101 +2 102 +3 103 +4 104 +5 105 +5 5 +6 106 +7 107 +7 7 +8 108 +9 109 +all_1_1_1_3 Wide +all_4_4_0 Wide +all_1_1_1_3 _row_exists UInt8 6 +all_1_1_1_3 id UInt64 6 +all_1_1_1_3 value String 6 +all_4_4_0 id UInt64 10 +all_4_4_0 value String 10 +0 100 +1 1 +1 101 +2 102 +3 103 +4 104 +5 105 +5 5 +6 106 +7 107 +7 7 +8 108 +9 109 +all_1_4_2_3 Wide +all_1_4_2_3 id UInt64 13 +all_1_4_2_3 value String 13 +0 100 +0 200 +1 1 +1 101 +1 201 +2 102 +2 202 +3 103 +3 203 +4 104 +4 204 +5 105 +5 205 +5 5 +6 106 +6 206 +7 107 +7 207 +7 7 +8 108 +8 208 +9 109 +9 209 +all_1_4_2_3 Wide +all_5_5_0 Wide +all_1_4_2_3 id UInt64 13 +all_1_4_2_3 value String 13 +all_5_5_0 id UInt64 10 +all_5_5_0 value String 10 +0 100 +0 200 +1 1 +1 101 +1 201 +3 103 +3 203 +4 104 +4 204 +6 106 +6 206 +7 107 +7 207 +7 7 +9 109 +9 209 +all_1_4_2_6 Wide +all_5_5_0_6 Wide +all_1_4_2_6 _row_exists UInt8 13 +all_1_4_2_6 id UInt64 13 +all_1_4_2_6 value String 13 +all_5_5_0_6 _row_exists UInt8 10 +all_5_5_0_6 id UInt64 10 +all_5_5_0_6 value String 10 +0 100 +0 200 +1 1 +1 101 +1 201 +3 103 +3 203 +4 104 +4 204 +6 106 +6 206 +7 107 +7 207 +7 7 +9 109 +9 209 +all_1_5_3_6 Wide +all_1_5_3_6 id UInt64 16 +all_1_5_3_6 value String 16 diff --git a/tests/queries/0_stateless/02404_lightweight_delete_vertical_merge.sql b/tests/queries/0_stateless/02404_lightweight_delete_vertical_merge.sql new file mode 100644 index 00000000000..970db50282f --- /dev/null +++ b/tests/queries/0_stateless/02404_lightweight_delete_vertical_merge.sql @@ -0,0 +1,184 @@ +DROP TABLE IF EXISTS lwd_test; + +CREATE TABLE lwd_test +( + `id` UInt64, + `value` String +) +ENGINE = MergeTree +ORDER BY id +SETTINGS + vertical_merge_algorithm_min_rows_to_activate = 1, + vertical_merge_algorithm_min_columns_to_activate = 1, + min_rows_for_wide_part = 1, + min_bytes_for_wide_part = 1; + + +INSERT INTO lwd_test SELECT number AS id, toString(number) AS value FROM numbers(10); + +SELECT * FROM lwd_test ORDER BY id, value; + +SELECT name, part_type +FROM system.parts +WHERE (database = currentDatabase()) AND (table = 'lwd_test') AND active +ORDER BY name; + + +SELECT name, column, type, rows +FROM system.parts_columns +WHERE (database = currentDatabase()) AND (table = 'lwd_test') AND active +ORDER BY name, column; + + + +SET mutations_sync = 2; +SET allow_experimental_lightweight_delete = 1; + +-- delete some rows using LWD +DELETE FROM lwd_test WHERE (id % 3) = 0; + + +SELECT * FROM lwd_test ORDER BY id, value; + + +SELECT name, part_type +FROM system.parts +WHERE (database = currentDatabase()) AND (table = 'lwd_test') AND active +ORDER BY name; + + +SELECT name, column, type, rows +FROM system.parts_columns +WHERE (database = currentDatabase()) AND (table = 'lwd_test') AND active +ORDER BY name, column; + + +-- optimize table to physically delete the rows +OPTIMIZE TABLE lwd_test FINAL; + +SELECT * FROM lwd_test ORDER BY id, value; + + +SELECT name, part_type +FROM system.parts +WHERE (database = currentDatabase()) AND (table = 'lwd_test') AND active +ORDER BY name; + + +SELECT name, column, type, rows +FROM system.parts_columns +WHERE (database = currentDatabase()) AND (table = 'lwd_test') AND active +ORDER BY name, column; + + +-- delete more rows +DELETE FROM lwd_test WHERE (id % 2) = 0; + + +SELECT * FROM lwd_test ORDER BY id, value; + + +SELECT name, part_type +FROM system.parts +WHERE (database = currentDatabase()) AND (table = 'lwd_test') AND active +ORDER BY name; + + +SELECT name, column, type, rows +FROM system.parts_columns +WHERE (database = currentDatabase()) AND (table = 'lwd_test') AND active +ORDER BY name, column; + + +-- add another part that doesn't have deleted rows +INSERT INTO lwd_test SELECT number AS id, toString(number+100) AS value FROM numbers(10); + +SELECT * FROM lwd_test ORDER BY id, value; + +SELECT name, part_type +FROM system.parts +WHERE (database = currentDatabase()) AND (table = 'lwd_test') AND active +ORDER BY name; + + +SELECT name, column, type, rows +FROM system.parts_columns +WHERE (database = currentDatabase()) AND (table = 'lwd_test') AND active +ORDER BY name, column; + + +-- optimize table to merge 2 parts together: the 1st has LDW rows and the 2nd doesn't have LWD rows +-- physically delete the rows +OPTIMIZE TABLE lwd_test FINAL; + +SELECT * FROM lwd_test ORDER BY id, value; + + +SELECT name, part_type +FROM system.parts +WHERE (database = currentDatabase()) AND (table = 'lwd_test') AND active +ORDER BY name; + + +SELECT name, column, type, rows +FROM system.parts_columns +WHERE (database = currentDatabase()) AND (table = 'lwd_test') AND active +ORDER BY name, column; + + +-- add another part that doesn't have deleted rows +INSERT INTO lwd_test SELECT number AS id, toString(number+200) AS value FROM numbers(10); + +SELECT * FROM lwd_test ORDER BY id, value; + +SELECT name, part_type +FROM system.parts +WHERE (database = currentDatabase()) AND (table = 'lwd_test') AND active +ORDER BY name; + + +SELECT name, column, type, rows +FROM system.parts_columns +WHERE (database = currentDatabase()) AND (table = 'lwd_test') AND active +ORDER BY name, column; + + +-- delete more rows +DELETE FROM lwd_test WHERE (id % 3) = 2; + + +SELECT * FROM lwd_test ORDER BY id, value; + + +SELECT name, part_type +FROM system.parts +WHERE (database = currentDatabase()) AND (table = 'lwd_test') AND active +ORDER BY name; + + +SELECT name, column, type, rows +FROM system.parts_columns +WHERE (database = currentDatabase()) AND (table = 'lwd_test') AND active +ORDER BY name, column; + + +-- optimize table to merge 2 parts together, both of them have LWD rows +-- physically delete the rows +OPTIMIZE TABLE lwd_test FINAL; + +SELECT * FROM lwd_test ORDER BY id, value; + + +SELECT name, part_type +FROM system.parts +WHERE (database = currentDatabase()) AND (table = 'lwd_test') AND active +ORDER BY name; + + +SELECT name, column, type, rows +FROM system.parts_columns +WHERE (database = currentDatabase()) AND (table = 'lwd_test') AND active +ORDER BY name, column; + + +DROP TABLE lwd_test; diff --git a/tests/queries/0_stateless/02404_schema_inference_cache_respect_format_settings.reference b/tests/queries/0_stateless/02404_schema_inference_cache_respect_format_settings.reference new file mode 100644 index 00000000000..049603328d9 --- /dev/null +++ b/tests/queries/0_stateless/02404_schema_inference_cache_respect_format_settings.reference @@ -0,0 +1,90 @@ +TSV +c1 Nullable(Int64) +c2 Nullable(Date) +c1 Nullable(Float64) +c2 Nullable(Date) +c1 Nullable(Int64) +c2 Nullable(DateTime64(9)) +c1 UInt8 +c2 Nullable(Date) +4 +TSVWithNames +number Nullable(Int64) +toDate(number) Nullable(Date) +number Nullable(Float64) +toDate(number) Nullable(Date) +number Nullable(Int64) +toDate(number) Nullable(DateTime64(9)) +number Nullable(Int64) +toDate(number) Nullable(Date) +4 +CSV +c1 Nullable(Int64) +c2 Nullable(Date) +c1 Nullable(Float64) +c2 Nullable(Date) +c1 Nullable(Int64) +c2 Nullable(DateTime64(9)) +c1 UInt8 +c2 Nullable(Date) +4 +CSVWithNames +number Nullable(Int64) +toDate(number) Nullable(Date) +number Nullable(Float64) +toDate(number) Nullable(Date) +number Nullable(Int64) +toDate(number) Nullable(DateTime64(9)) +number Nullable(Int64) +toDate(number) Nullable(Date) +4 +TSKV +number Nullable(Int64) +toDate(number) Nullable(Date) +number Nullable(Float64) +toDate(number) Nullable(Date) +number Nullable(Int64) +toDate(number) Nullable(DateTime64(9)) +number Nullable(Int64) +toDate(number) Nullable(Date) +4 +CustomSeparated +c1 Nullable(Int64) +c2 Nullable(Date) +c1 Nullable(Float64) +c2 Nullable(Date) +c1 Nullable(Int64) +c2 Nullable(DateTime64(9)) +c1 UInt8 +c2 Nullable(Date) +4 +JSONEachRow +number Nullable(Int64) +toDate(number) Nullable(Date) +number Nullable(Float64) +toDate(number) Nullable(Date) +number Nullable(Int64) +toDate(number) Nullable(DateTime64(9)) +number Nullable(Int64) +toDate(number) Nullable(Date) +4 +JSONCompactEachRow +c1 Nullable(Int64) +c2 Nullable(Date) +c1 Nullable(Float64) +c2 Nullable(Date) +c1 Nullable(Int64) +c2 Nullable(DateTime64(9)) +c1 UInt8 +c2 Nullable(Date) +4 +Values +c1 Nullable(Int64) +c2 Nullable(Date) +c1 Nullable(Float64) +c2 Nullable(Date) +c1 Nullable(Int64) +c2 Nullable(DateTime64(9)) +c1 UInt8 +c2 Nullable(Date) +4 diff --git a/tests/queries/0_stateless/02404_schema_inference_cache_respect_format_settings.sql.j2 b/tests/queries/0_stateless/02404_schema_inference_cache_respect_format_settings.sql.j2 new file mode 100644 index 00000000000..9fff19192c0 --- /dev/null +++ b/tests/queries/0_stateless/02404_schema_inference_cache_respect_format_settings.sql.j2 @@ -0,0 +1,16 @@ +-- Tags: no-parallel, no-fasttest + +system drop schema cache for file; + +{% for format in ['TSV', 'TSVWithNames', 'CSV', 'CSVWithNames', 'TSKV', 'CustomSeparated', 'JSONEachRow', 'JSONCompactEachRow', 'Values'] -%} + +select '{{ format }}'; +insert into function file(02404_data.{{ format }}) select number, toDate(number) from numbers(10); +desc file(02404_data.{{ format }}); +desc file(02404_data.{{ format }}) settings input_format_try_infer_integers=0; +desc file(02404_data.{{ format }}) settings input_format_try_infer_dates=0; +desc file(02404_data.{{ format }}) settings schema_inference_hints='c1 UInt8'; +select count() from system.schema_inference_cache where countSubstrings(source, '02404_data.{{ format }}') > 0; + +{% endfor -%} + diff --git a/tests/queries/0_stateless/02405_avro_read_nested.reference b/tests/queries/0_stateless/02405_avro_read_nested.reference new file mode 100644 index 00000000000..2103067ec72 --- /dev/null +++ b/tests/queries/0_stateless/02405_avro_read_nested.reference @@ -0,0 +1,2 @@ +[1,2] ['aa','bb'] +[1,5] [(2,['aa','bb']),(6,['ee','ff'])] [[(3,'cc'),(4,'dd')],[(7,'gg'),(8,'hh')]] diff --git a/tests/queries/0_stateless/02405_avro_read_nested.sql b/tests/queries/0_stateless/02405_avro_read_nested.sql new file mode 100644 index 00000000000..512c48a2ff7 --- /dev/null +++ b/tests/queries/0_stateless/02405_avro_read_nested.sql @@ -0,0 +1,9 @@ +-- Tags: no-fasttest, no-parallel + +set flatten_nested = 1; + +insert into function file(02405_data.avro) select [(1, 'aa'), (2, 'bb')]::Nested(x UInt32, y String) as nested settings engine_file_truncate_on_insert=1; +select * from file(02405_data.avro, auto, 'nested Nested(x UInt32, y String)'); + +insert into function file(02405_data.avro) select [(1, (2, ['aa', 'bb']), [(3, 'cc'), (4, 'dd')]), (5, (6, ['ee', 'ff']), [(7, 'gg'), (8, 'hh')])]::Nested(x UInt32, y Tuple(y1 UInt32, y2 Array(String)), z Nested(z1 UInt32, z2 String)) as nested settings engine_file_truncate_on_insert=1; +select * from file(02405_data.avro, auto, 'nested Nested(x UInt32, y Tuple(y1 UInt32, y2 Array(String)), z Nested(z1 UInt32, z2 String))'); diff --git a/tests/queries/0_stateless/02405_pmj_issue_40335.reference b/tests/queries/0_stateless/02405_pmj_issue_40335.reference new file mode 100644 index 00000000000..94053253394 --- /dev/null +++ b/tests/queries/0_stateless/02405_pmj_issue_40335.reference @@ -0,0 +1,5 @@ +a +b +c +d +e diff --git a/tests/queries/0_stateless/02405_pmj_issue_40335.sql b/tests/queries/0_stateless/02405_pmj_issue_40335.sql new file mode 100644 index 00000000000..e50e27b82ab --- /dev/null +++ b/tests/queries/0_stateless/02405_pmj_issue_40335.sql @@ -0,0 +1,15 @@ +DROP TABLE IF EXISTS t1; +DROP TABLE IF EXISTS t2; + +CREATE TABLE t1 (x UInt64) ENGINE = TinyLog; +INSERT INTO t1 VALUES (1), (2), (3); + +CREATE TABLE t2 (x UInt64, value String) ENGINE = TinyLog; +INSERT INTO t2 VALUES (1, 'a'), (2, 'b'), (2, 'c'); +INSERT INTO t2 VALUES (3, 'd'), (3, 'e'), (4, 'f'); + +SET max_block_size=3; +SET max_joined_block_size_rows = 2; +SET join_algorithm='partial_merge'; + +SELECT value FROM t1 LEFT JOIN t2 ON t1.x = t2.x ORDER BY value; diff --git a/tests/queries/0_stateless/02406_try_read_datetime64_bug.reference b/tests/queries/0_stateless/02406_try_read_datetime64_bug.reference new file mode 100644 index 00000000000..c31db065c01 --- /dev/null +++ b/tests/queries/0_stateless/02406_try_read_datetime64_bug.reference @@ -0,0 +1,2 @@ +1970-01-01 00:00:00.000000000 +c1 Nullable(String) diff --git a/tests/queries/0_stateless/02406_try_read_datetime64_bug.sql b/tests/queries/0_stateless/02406_try_read_datetime64_bug.sql new file mode 100644 index 00000000000..846e732ba8e --- /dev/null +++ b/tests/queries/0_stateless/02406_try_read_datetime64_bug.sql @@ -0,0 +1,2 @@ +select toDateTime64OrDefault('Aaaa e a.a.aaaaaaaaa', 9, 'UTC'); +desc format(CSV, '"Aaaa e a.a.aaaaaaaaa"'); diff --git a/tests/queries/0_stateless/02407_array_element_from_map_wrong_type.reference b/tests/queries/0_stateless/02407_array_element_from_map_wrong_type.reference new file mode 100644 index 00000000000..e69de29bb2d diff --git a/tests/queries/0_stateless/02407_array_element_from_map_wrong_type.sql b/tests/queries/0_stateless/02407_array_element_from_map_wrong_type.sql new file mode 100644 index 00000000000..0e8d7f4f7fe --- /dev/null +++ b/tests/queries/0_stateless/02407_array_element_from_map_wrong_type.sql @@ -0,0 +1 @@ +select m[0], materialize(map('key', 42)) as m; -- {serverError ILLEGAL_TYPE_OF_ARGUMENT} diff --git a/tests/queries/0_stateless/02408_to_fixed_string_short_circuit.reference b/tests/queries/0_stateless/02408_to_fixed_string_short_circuit.reference new file mode 100644 index 00000000000..96e34d5a44c --- /dev/null +++ b/tests/queries/0_stateless/02408_to_fixed_string_short_circuit.reference @@ -0,0 +1,2 @@ +\N +\N diff --git a/tests/queries/0_stateless/02408_to_fixed_string_short_circuit.sql b/tests/queries/0_stateless/02408_to_fixed_string_short_circuit.sql new file mode 100644 index 00000000000..9e817bbe5ca --- /dev/null +++ b/tests/queries/0_stateless/02408_to_fixed_string_short_circuit.sql @@ -0,0 +1 @@ +select if(number < 0, toFixedString(materialize('123'), 2), NULL) from numbers(2); diff --git a/tests/queries/0_stateless/02409_url_format_detection.reference b/tests/queries/0_stateless/02409_url_format_detection.reference new file mode 100644 index 00000000000..604b1535301 --- /dev/null +++ b/tests/queries/0_stateless/02409_url_format_detection.reference @@ -0,0 +1 @@ +x UInt32 diff --git a/tests/queries/0_stateless/02409_url_format_detection.sql b/tests/queries/0_stateless/02409_url_format_detection.sql new file mode 100644 index 00000000000..c94700cfdb5 --- /dev/null +++ b/tests/queries/0_stateless/02409_url_format_detection.sql @@ -0,0 +1 @@ +desc url('http://localhost:8888/test/data.tsv?get=parameterHere', auto, 'x UInt32'); diff --git a/tests/queries/0_stateless/02410_to_decimal_or_default.reference b/tests/queries/0_stateless/02410_to_decimal_or_default.reference new file mode 100644 index 00000000000..10ad131d596 --- /dev/null +++ b/tests/queries/0_stateless/02410_to_decimal_or_default.reference @@ -0,0 +1,8 @@ +111 Decimal(9, 3) +222 Decimal(18, 3) +333 Decimal(38, 3) +444 Decimal(76, 3) +123.456 Decimal(9, 3) +123.456 Decimal(18, 3) +123.456 Decimal(38, 3) +123.456 Decimal(76, 3) diff --git a/tests/queries/0_stateless/02410_to_decimal_or_default.sql b/tests/queries/0_stateless/02410_to_decimal_or_default.sql new file mode 100644 index 00000000000..8db464038b2 --- /dev/null +++ b/tests/queries/0_stateless/02410_to_decimal_or_default.sql @@ -0,0 +1,9 @@ +SELECT toDecimal32OrDefault(111, 3, 123.456::Decimal32(3)) AS x, toTypeName(x); +SELECT toDecimal64OrDefault(222, 3, 123.456::Decimal64(3)) AS x, toTypeName(x); +SELECT toDecimal128OrDefault(333, 3, 123.456::Decimal128(3)) AS x, toTypeName(x); +SELECT toDecimal256OrDefault(444, 3, 123.456::Decimal256(3)) AS x, toTypeName(x); + +SELECT toDecimal32OrDefault('Hello', 3, 123.456::Decimal32(3)) AS x, toTypeName(x); +SELECT toDecimal64OrDefault('Hello', 3, 123.456::Decimal64(3)) AS x, toTypeName(x); +SELECT toDecimal128OrDefault('Hello', 3, 123.456::Decimal128(3)) AS x, toTypeName(x); +SELECT toDecimal256OrDefault('Hello', 3, 123.456::Decimal256(3)) AS x, toTypeName(x); diff --git a/tests/queries/0_stateless/02411_merge_tree_zero_max_read_buffer_size.reference b/tests/queries/0_stateless/02411_merge_tree_zero_max_read_buffer_size.reference new file mode 100644 index 00000000000..e69de29bb2d diff --git a/tests/queries/0_stateless/02411_merge_tree_zero_max_read_buffer_size.sql b/tests/queries/0_stateless/02411_merge_tree_zero_max_read_buffer_size.sql new file mode 100644 index 00000000000..72b80cc70c8 --- /dev/null +++ b/tests/queries/0_stateless/02411_merge_tree_zero_max_read_buffer_size.sql @@ -0,0 +1,17 @@ +-- Tags: no-parallel +-- Tag no-parallel: due to SYSTEM DROP MARK CACHE + +--- Regression test for possible LOGICAL_ERROR with max_read_buffer_size=0 +--- (when marks was reading with max_read_buffer_size=0, hence DROP MARK CACHE is required) + +DROP TABLE IF EXISTS data_02411; +CREATE TABLE data_02411 +( + key Int32 +) +ENGINE = MergeTree +ORDER BY key +SETTINGS min_bytes_for_wide_part = 0, index_granularity = 8192; +INSERT INTO data_02411 SELECT * FROM numbers(100); +SYSTEM DROP MARK CACHE; +SELECT * FROM data_02411 FORMAT Null SETTINGS max_read_buffer_size=0; -- { serverError INVALID_SETTING_VALUE } diff --git a/tests/queries/0_stateless/02412_nlp.reference b/tests/queries/0_stateless/02412_nlp.reference new file mode 100644 index 00000000000..337022b9fb2 --- /dev/null +++ b/tests/queries/0_stateless/02412_nlp.reference @@ -0,0 +1,15 @@ +wolf +dog +look +take +import +tokenize +fly +['important','big','critical','crucial','essential'] +['happy','cheerful','delighted','ecstatic'] +['however','nonetheless','but','yet'] +['quiz','query','check','exam'] +['важный','большой','высокий','хороший','главный'] +['веселый','счастливый','живой','яркий','смешной'] +['хотя','однако','но','правда'] +['экзамен','испытание','проверка'] diff --git a/tests/queries/0_stateless/02412_nlp.sql b/tests/queries/0_stateless/02412_nlp.sql new file mode 100644 index 00000000000..42c3f108764 --- /dev/null +++ b/tests/queries/0_stateless/02412_nlp.sql @@ -0,0 +1,20 @@ +-- Tags: no-fasttest + +SET allow_experimental_nlp_functions = 1; + +SELECT lemmatize('en', 'wolves'); +SELECT lemmatize('en', 'dogs'); +SELECT lemmatize('en', 'looking'); +SELECT lemmatize('en', 'took'); +SELECT lemmatize('en', 'imported'); +SELECT lemmatize('en', 'tokenized'); +SELECT lemmatize('en', 'flown'); + +SELECT synonyms('en', 'crucial'); +SELECT synonyms('en', 'cheerful'); +SELECT synonyms('en', 'yet'); +SELECT synonyms('en', 'quiz'); +SELECT synonyms('ru', 'главный'); +SELECT synonyms('ru', 'веселый'); +SELECT synonyms('ru', 'правда'); +SELECT synonyms('ru', 'экзамен'); diff --git a/tests/queries/0_stateless/02413_model_evaluate_smoke.reference b/tests/queries/0_stateless/02413_model_evaluate_smoke.reference new file mode 100644 index 00000000000..e69de29bb2d diff --git a/tests/queries/0_stateless/02413_model_evaluate_smoke.sql b/tests/queries/0_stateless/02413_model_evaluate_smoke.sql new file mode 100644 index 00000000000..3b20067abfe --- /dev/null +++ b/tests/queries/0_stateless/02413_model_evaluate_smoke.sql @@ -0,0 +1,2 @@ +-- This model does not exist: +SELECT modelEvaluate('hello', 1, 2, 3); -- { serverError 36 } diff --git a/tests/queries/0_stateless/02414_all_new_table_functions_must_be_documented.reference b/tests/queries/0_stateless/02414_all_new_table_functions_must_be_documented.reference new file mode 100644 index 00000000000..bb8c8c2228a --- /dev/null +++ b/tests/queries/0_stateless/02414_all_new_table_functions_must_be_documented.reference @@ -0,0 +1,22 @@ +cluster +clusterAllReplicas +dictionary +executable +file +format +generateRandom +input +jdbc +meilisearch +merge +mongodb +null +numbers +numbers_mt +odbc +remote +remoteSecure +url +values +view +viewIfPermitted diff --git a/tests/queries/0_stateless/02414_all_new_table_functions_must_be_documented.sql b/tests/queries/0_stateless/02414_all_new_table_functions_must_be_documented.sql new file mode 100644 index 00000000000..5d43ec6f0c2 --- /dev/null +++ b/tests/queries/0_stateless/02414_all_new_table_functions_must_be_documented.sql @@ -0,0 +1,6 @@ +-- This outputs the list of undocumented table functions. No new items in the list should appear. +-- Please help shorten this list down to zero elements. +SELECT name FROM system.table_functions WHERE length(description) < 10 +AND name NOT IN ( + 'cosn', 'hdfs', 'hdfsCluster', 'hive', 'mysql', 'postgresql', 's3', 's3Cluster', 'sqlite' -- these functions are not enabled in fast test +) ORDER BY name; diff --git a/tests/queries/0_stateless/02415_all_new_functions_must_be_documented.reference b/tests/queries/0_stateless/02415_all_new_functions_must_be_documented.reference new file mode 100644 index 00000000000..cbd92d0e8f4 --- /dev/null +++ b/tests/queries/0_stateless/02415_all_new_functions_must_be_documented.reference @@ -0,0 +1,952 @@ +CAST +CRC32 +CRC32IEEE +CRC64 +DATE +FQDN +IPv4CIDRToRange +IPv4NumToString +IPv4NumToStringClassC +IPv4StringToNum +IPv4StringToNumOrDefault +IPv4StringToNumOrNull +IPv4ToIPv6 +IPv6CIDRToRange +IPv6NumToString +IPv6StringToNum +IPv6StringToNumOrDefault +IPv6StringToNumOrNull +JSONExtract +JSONExtractArrayRaw +JSONExtractBool +JSONExtractFloat +JSONExtractInt +JSONExtractKeys +JSONExtractKeysAndValues +JSONExtractKeysAndValuesRaw +JSONExtractRaw +JSONExtractString +JSONExtractUInt +JSONHas +JSONKey +JSONLength +JSONType +JSON_EXISTS +JSON_QUERY +JSON_VALUE +L1Distance +L1Norm +L1Normalize +L2Distance +L2Norm +L2Normalize +L2SquaredDistance +L2SquaredNorm +LinfDistance +LinfNorm +LinfNormalize +LpDistance +LpNorm +LpNormalize +MACNumToString +MACStringToNum +MACStringToOUI +URLHash +URLHierarchy +URLPathHierarchy +UUIDNumToString +UUIDStringToNum +_CAST +__bitBoolMaskAnd +__bitBoolMaskOr +__bitSwapLastTwo +__bitWrapperFunc +__getScalar +abs +accurateCast +accurateCastOrDefault +accurateCastOrNull +acos +acosh +addDays +addHours +addMicroseconds +addMilliseconds +addMinutes +addMonths +addNanoseconds +addQuarters +addSeconds +addWeeks +addYears +addressToLine +addressToLineWithInlines +addressToSymbol +alphaTokens +and +appendTrailingCharIfAbsent +array +arrayAUC +arrayAll +arrayAvg +arrayCompact +arrayConcat +arrayCount +arrayCumSum +arrayCumSumNonNegative +arrayDifference +arrayDistinct +arrayElement +arrayEnumerate +arrayEnumerateDense +arrayEnumerateDenseRanked +arrayEnumerateUniq +arrayEnumerateUniqRanked +arrayExists +arrayFill +arrayFilter +arrayFirst +arrayFirstIndex +arrayFirstOrNull +arrayFlatten +arrayIntersect +arrayJoin +arrayLast +arrayLastIndex +arrayLastOrNull +arrayMap +arrayMax +arrayMin +arrayPopBack +arrayPopFront +arrayProduct +arrayPushBack +arrayPushFront +arrayReduce +arrayReduceInRanges +arrayResize +arrayReverse +arrayReverseFill +arrayReverseSort +arrayReverseSplit +arraySlice +arraySort +arraySplit +arrayStringConcat +arraySum +arrayUniq +arrayWithConstant +arrayZip +asinh +assumeNotNull +atan +atan2 +atanh +bar +base58Decode +base58Encode +basename +bin +bitAnd +bitCount +bitHammingDistance +bitNot +bitOr +bitPositionsToArray +bitRotateLeft +bitRotateRight +bitShiftLeft +bitShiftRight +bitSlice +bitTest +bitTestAll +bitTestAny +bitXor +bitmapAnd +bitmapAndCardinality +bitmapAndnot +bitmapAndnotCardinality +bitmapBuild +bitmapCardinality +bitmapContains +bitmapHasAll +bitmapHasAny +bitmapMax +bitmapMin +bitmapOr +bitmapOrCardinality +bitmapSubsetInRange +bitmapSubsetLimit +bitmapToArray +bitmapTransform +bitmapXor +bitmapXorCardinality +bitmaskToArray +bitmaskToList +blockNumber +blockSerializedSize +blockSize +buildId +byteSize +caseWithExpr +caseWithExpression +caseWithoutExpr +caseWithoutExpression +cbrt +ceil +char +cityHash64 +coalesce +concat +concatAssumeInjective +connectionId +cos +cosh +cosineDistance +countDigits +countEqual +countMatches +countMatchesCaseInsensitive +countSubstrings +countSubstringsCaseInsensitive +countSubstringsCaseInsensitiveUTF8 +currentDatabase +currentProfiles +currentRoles +currentUser +cutFragment +cutIPv6 +cutQueryString +cutQueryStringAndFragment +cutToFirstSignificantSubdomain +cutToFirstSignificantSubdomainCustom +cutToFirstSignificantSubdomainCustomWithWWW +cutToFirstSignificantSubdomainWithWWW +cutURLParameter +cutWWW +dateDiff +dateName +dateTime64ToSnowflake +dateTimeToSnowflake +dateTrunc +decodeURLComponent +decodeURLFormComponent +decodeXMLComponent +defaultProfiles +defaultRoles +defaultValueOfArgumentType +defaultValueOfTypeName +degrees +demangle +detectCharset +detectLanguageUnknown +detectProgrammingLanguage +detectTonality +dictGet +dictGetChildren +dictGetDate +dictGetDateOrDefault +dictGetDateTime +dictGetDateTimeOrDefault +dictGetDescendants +dictGetFloat32 +dictGetFloat32OrDefault +dictGetFloat64 +dictGetFloat64OrDefault +dictGetHierarchy +dictGetInt16 +dictGetInt16OrDefault +dictGetInt32 +dictGetInt32OrDefault +dictGetInt64 +dictGetInt64OrDefault +dictGetInt8 +dictGetInt8OrDefault +dictGetOrDefault +dictGetOrNull +dictGetString +dictGetStringOrDefault +dictGetUInt16 +dictGetUInt16OrDefault +dictGetUInt32 +dictGetUInt32OrDefault +dictGetUInt64 +dictGetUInt64OrDefault +dictGetUInt8 +dictGetUInt8OrDefault +dictGetUUID +dictGetUUIDOrDefault +dictHas +dictIsIn +divide +domain +domainWithoutWWW +dotProduct +dumpColumnStructure +e +empty +emptyArrayDate +emptyArrayDateTime +emptyArrayFloat32 +emptyArrayFloat64 +emptyArrayInt16 +emptyArrayInt32 +emptyArrayInt64 +emptyArrayInt8 +emptyArrayString +emptyArrayToSingle +emptyArrayUInt16 +emptyArrayUInt32 +emptyArrayUInt64 +emptyArrayUInt8 +enabledProfiles +enabledRoles +encodeURLComponent +encodeURLFormComponent +encodeXMLComponent +endsWith +equals +erf +erfc +errorCodeToName +evalMLMethod +exp +exp10 +exp2 +extract +extractAll +extractAllGroupsHorizontal +extractAllGroupsVertical +extractGroups +extractTextFromHTML +extractURLParameter +extractURLParameterNames +extractURLParameters +farmFingerprint64 +farmHash64 +file +filesystemAvailable +filesystemCapacity +filesystemFree +finalizeAggregation +firstSignificantSubdomain +firstSignificantSubdomainCustom +flattenTuple +floor +format +formatDateTime +formatReadableQuantity +formatReadableSize +formatReadableTimeDelta +formatRow +formatRowNoNewline +fragment +fromModifiedJulianDay +fromModifiedJulianDayOrNull +fromUnixTimestamp +fromUnixTimestamp64Micro +fromUnixTimestamp64Milli +fromUnixTimestamp64Nano +fullHostName +fuzzBits +gccMurmurHash +gcd +generateUUIDv4 +geoDistance +geohashDecode +geohashEncode +geohashesInBox +getMacro +getOSKernelVersion +getServerPort +getSetting +getSizeOfEnumType +getTypeSerializationStreams +globalIn +globalInIgnoreSet +globalNotIn +globalNotInIgnoreSet +globalNotNullIn +globalNotNullInIgnoreSet +globalNullIn +globalNullInIgnoreSet +globalVariable +greatCircleAngle +greatCircleDistance +greater +greaterOrEquals +greatest +has +hasAll +hasAny +hasColumnInTable +hasSubstr +hasThreadFuzzer +hasToken +hasTokenCaseInsensitive +hashid +hex +hiveHash +hop +hopEnd +hopStart +hostName +hypot +identity +if +ifNotFinite +ifNull +ignore +ilike +in +inIgnoreSet +indexHint +indexOf +initialQueryID +initializeAggregation +intDiv +intDivOrZero +intExp10 +intExp2 +intHash32 +intHash64 +isConstant +isDecimalOverflow +isFinite +isIPAddressInRange +isIPv4String +isIPv6String +isInfinite +isNaN +isNotNull +isNull +isNullable +isValidJSON +isValidUTF8 +isZeroOrNull +javaHash +javaHashUTF16LE +joinGet +joinGetOrNull +jumpConsistentHash +kostikConsistentHash +lcm +least +left +leftPad +leftPadUTF8 +leftUTF8 +lengthUTF8 +less +lessOrEquals +lgamma +like +log +log10 +log1p +log2 +logTrace +lowCardinalityIndices +lowCardinalityKeys +lower +lowerUTF8 +makeDate +makeDate32 +makeDateTime +makeDateTime64 +map +mapAdd +mapApply +mapContains +mapContainsKeyLike +mapExtractKeyLike +mapFilter +mapKeys +mapPopulateSeries +mapSubtract +mapUpdate +mapValues +match +materialize +max2 +meiliMatch +metroHash64 +min2 +minSampleSizeContinous +minSampleSizeConversion +minus +modelEvaluate +modulo +moduloLegacy +moduloOrZero +monthName +multiFuzzyMatchAllIndices +multiFuzzyMatchAny +multiFuzzyMatchAnyIndex +multiIf +multiMatchAllIndices +multiMatchAny +multiMatchAnyIndex +multiSearchAllPositions +multiSearchAllPositionsCaseInsensitive +multiSearchAllPositionsCaseInsensitiveUTF8 +multiSearchAllPositionsUTF8 +multiSearchAny +multiSearchAnyCaseInsensitive +multiSearchAnyCaseInsensitiveUTF8 +multiSearchAnyUTF8 +multiSearchFirstIndex +multiSearchFirstIndexCaseInsensitive +multiSearchFirstIndexCaseInsensitiveUTF8 +multiSearchFirstIndexUTF8 +multiSearchFirstPosition +multiSearchFirstPositionCaseInsensitive +multiSearchFirstPositionCaseInsensitiveUTF8 +multiSearchFirstPositionUTF8 +multiply +murmurHash2_32 +murmurHash2_64 +murmurHash3_128 +murmurHash3_32 +murmurHash3_64 +negate +neighbor +netloc +ngramDistance +ngramDistanceCaseInsensitive +ngramDistanceCaseInsensitiveUTF8 +ngramDistanceUTF8 +ngramMinHash +ngramMinHashArg +ngramMinHashArgCaseInsensitive +ngramMinHashArgCaseInsensitiveUTF8 +ngramMinHashArgUTF8 +ngramMinHashCaseInsensitive +ngramMinHashCaseInsensitiveUTF8 +ngramMinHashUTF8 +ngramSearch +ngramSearchCaseInsensitive +ngramSearchCaseInsensitiveUTF8 +ngramSearchUTF8 +ngramSimHash +ngramSimHashCaseInsensitive +ngramSimHashCaseInsensitiveUTF8 +ngramSimHashUTF8 +ngrams +normalizeQuery +normalizeQueryKeepNames +normalizedQueryHash +normalizedQueryHashKeepNames +not +notEmpty +notEquals +notILike +notIn +notInIgnoreSet +notLike +notNullIn +notNullInIgnoreSet +now +now64 +nowInBlock +nullIf +nullIn +nullInIgnoreSet +or +parseDateTime32BestEffort +parseDateTime32BestEffortOrNull +parseDateTime32BestEffortOrZero +parseDateTime64BestEffort +parseDateTime64BestEffortOrNull +parseDateTime64BestEffortOrZero +parseDateTime64BestEffortUS +parseDateTime64BestEffortUSOrNull +parseDateTime64BestEffortUSOrZero +parseDateTimeBestEffort +parseDateTimeBestEffortOrNull +parseDateTimeBestEffortOrZero +parseDateTimeBestEffortUS +parseDateTimeBestEffortUSOrNull +parseDateTimeBestEffortUSOrZero +parseTimeDelta +partitionId +path +pathFull +pi +plus +pointInEllipses +pointInPolygon +polygonAreaCartesian +polygonAreaSpherical +polygonConvexHullCartesian +polygonPerimeterCartesian +polygonPerimeterSpherical +polygonsDistanceCartesian +polygonsDistanceSpherical +polygonsEqualsCartesian +polygonsIntersectionCartesian +polygonsIntersectionSpherical +polygonsSymDifferenceCartesian +polygonsSymDifferenceSpherical +polygonsUnionCartesian +polygonsUnionSpherical +polygonsWithinCartesian +polygonsWithinSpherical +port +position +positionCaseInsensitive +positionCaseInsensitiveUTF8 +positionUTF8 +pow +proportionsZTest +protocol +queryID +queryString +queryStringAndFragment +radians +rand +rand64 +randConstant +randomFixedString +randomPrintableASCII +randomString +randomStringUTF8 +range +readWKTMultiPolygon +readWKTPoint +readWKTPolygon +readWKTRing +regexpQuoteMeta +regionHierarchy +regionIn +regionToArea +regionToCity +regionToContinent +regionToCountry +regionToDistrict +regionToName +regionToPopulation +regionToTopContinent +reinterpret +reinterpretAsDate +reinterpretAsDateTime +reinterpretAsFixedString +reinterpretAsFloat32 +reinterpretAsFloat64 +reinterpretAsInt128 +reinterpretAsInt16 +reinterpretAsInt256 +reinterpretAsInt32 +reinterpretAsInt64 +reinterpretAsInt8 +reinterpretAsString +reinterpretAsUInt128 +reinterpretAsUInt16 +reinterpretAsUInt256 +reinterpretAsUInt32 +reinterpretAsUInt64 +reinterpretAsUInt8 +reinterpretAsUUID +repeat +replaceAll +replaceOne +replaceRegexpAll +replaceRegexpOne +replicate +reverse +reverseUTF8 +revision +right +rightPad +rightPadUTF8 +rightUTF8 +round +roundAge +roundBankers +roundDown +roundDuration +roundToExp2 +rowNumberInAllBlocks +rowNumberInBlock +runningAccumulate +runningConcurrency +runningDifference +runningDifferenceStartingWithFirstValue +serverUUID +shardCount +shardNum +showCertificate +sigmoid +sign +simpleJSONExtractBool +simpleJSONExtractFloat +simpleJSONExtractInt +simpleJSONExtractRaw +simpleJSONExtractString +simpleJSONExtractUInt +simpleJSONHas +sin +sinh +sipHash128 +sipHash64 +sleep +sleepEachRow +snowflakeToDateTime +snowflakeToDateTime64 +splitByChar +splitByNonAlpha +splitByRegexp +splitByString +splitByWhitespace +sqrt +startsWith +subBitmap +substring +substringUTF8 +subtractDays +subtractHours +subtractMicroseconds +subtractMilliseconds +subtractMinutes +subtractMonths +subtractNanoseconds +subtractQuarters +subtractSeconds +subtractWeeks +subtractYears +svg +tan +tanh +tcpPort +tgamma +throwIf +tid +timeSlot +timeSlots +timezone +timezoneOf +timezoneOffset +toBool +toColumnTypeName +toDate +toDate32 +toDate32OrDefault +toDate32OrNull +toDate32OrZero +toDateOrDefault +toDateOrNull +toDateOrZero +toDateTime +toDateTime32 +toDateTime64 +toDateTime64OrDefault +toDateTime64OrNull +toDateTime64OrZero +toDateTimeOrDefault +toDateTimeOrNull +toDateTimeOrZero +toDayOfMonth +toDayOfWeek +toDayOfYear +toDecimal128 +toDecimal128OrDefault +toDecimal128OrNull +toDecimal128OrZero +toDecimal256 +toDecimal256OrDefault +toDecimal256OrNull +toDecimal256OrZero +toDecimal32 +toDecimal32OrDefault +toDecimal32OrNull +toDecimal32OrZero +toDecimal64 +toDecimal64OrDefault +toDecimal64OrNull +toDecimal64OrZero +toFixedString +toFloat32 +toFloat32OrDefault +toFloat32OrNull +toFloat32OrZero +toFloat64 +toFloat64OrDefault +toFloat64OrNull +toFloat64OrZero +toHour +toIPv4 +toIPv4OrDefault +toIPv4OrNull +toIPv6 +toIPv6OrDefault +toIPv6OrNull +toISOWeek +toISOYear +toInt128 +toInt128OrDefault +toInt128OrNull +toInt128OrZero +toInt16 +toInt16OrDefault +toInt16OrNull +toInt16OrZero +toInt256 +toInt256OrDefault +toInt256OrNull +toInt256OrZero +toInt32 +toInt32OrDefault +toInt32OrNull +toInt32OrZero +toInt64 +toInt64OrDefault +toInt64OrNull +toInt64OrZero +toInt8 +toInt8OrDefault +toInt8OrNull +toInt8OrZero +toIntervalDay +toIntervalHour +toIntervalMicrosecond +toIntervalMillisecond +toIntervalMinute +toIntervalMonth +toIntervalNanosecond +toIntervalQuarter +toIntervalSecond +toIntervalWeek +toIntervalYear +toJSONString +toLastDayOfMonth +toLowCardinality +toMinute +toModifiedJulianDay +toModifiedJulianDayOrNull +toMonday +toMonth +toNullable +toQuarter +toRelativeDayNum +toRelativeHourNum +toRelativeMinuteNum +toRelativeMonthNum +toRelativeQuarterNum +toRelativeSecondNum +toRelativeWeekNum +toRelativeYearNum +toSecond +toStartOfDay +toStartOfFifteenMinutes +toStartOfFiveMinutes +toStartOfHour +toStartOfISOYear +toStartOfInterval +toStartOfMicrosecond +toStartOfMillisecond +toStartOfMinute +toStartOfMonth +toStartOfNanosecond +toStartOfQuarter +toStartOfSecond +toStartOfTenMinutes +toStartOfWeek +toStartOfYear +toString +toStringCutToZero +toTime +toTimezone +toTypeName +toUInt128 +toUInt128OrNull +toUInt128OrZero +toUInt16 +toUInt16OrDefault +toUInt16OrNull +toUInt16OrZero +toUInt256 +toUInt256OrDefault +toUInt256OrNull +toUInt256OrZero +toUInt32 +toUInt32OrDefault +toUInt32OrNull +toUInt32OrZero +toUInt64 +toUInt64OrDefault +toUInt64OrNull +toUInt64OrZero +toUInt8 +toUInt8OrDefault +toUInt8OrNull +toUInt8OrZero +toUUID +toUUIDOrDefault +toUUIDOrNull +toUUIDOrZero +toUnixTimestamp +toUnixTimestamp64Micro +toUnixTimestamp64Milli +toUnixTimestamp64Nano +toValidUTF8 +toWeek +toYYYYMM +toYYYYMMDD +toYYYYMMDDhhmmss +toYear +toYearWeek +today +tokens +topLevelDomain +transactionID +transactionLatestSnapshot +transactionOldestSnapshot +transform +translate +translateUTF8 +trimBoth +trimLeft +trimRight +trunc +tumble +tumbleEnd +tumbleStart +tuple +tupleDivide +tupleDivideByNumber +tupleElement +tupleHammingDistance +tupleMinus +tupleMultiply +tupleMultiplyByNumber +tupleNegate +tuplePlus +tupleToNameValuePairs +unbin +unhex +upper +upperUTF8 +uptime +validateNestedArraySizes +version +visibleWidth +windowID +wkt +wordShingleMinHash +wordShingleMinHashArg +wordShingleMinHashArgCaseInsensitive +wordShingleMinHashArgCaseInsensitiveUTF8 +wordShingleMinHashArgUTF8 +wordShingleMinHashCaseInsensitive +wordShingleMinHashCaseInsensitiveUTF8 +wordShingleMinHashUTF8 +wordShingleSimHash +wordShingleSimHashCaseInsensitive +wordShingleSimHashCaseInsensitiveUTF8 +wordShingleSimHashUTF8 +wyHash64 +xor +xxHash32 +xxHash64 +yesterday +zookeeperSessionUptime diff --git a/tests/queries/0_stateless/02415_all_new_functions_must_be_documented.sql b/tests/queries/0_stateless/02415_all_new_functions_must_be_documented.sql new file mode 100644 index 00000000000..3202a28cdd0 --- /dev/null +++ b/tests/queries/0_stateless/02415_all_new_functions_must_be_documented.sql @@ -0,0 +1,19 @@ +-- This outputs the list of undocumented functions. No new items in the list should appear. +-- Please help shorten this list down to zero elements. +SELECT name FROM system.functions WHERE NOT is_aggregate AND origin = 'System' AND alias_to = '' AND length(description) < 10 +AND name NOT IN ( + 'MD4', 'MD5', 'SHA1', 'SHA224', 'SHA256', 'SHA384', 'SHA512', 'halfMD5', + 'aes_decrypt_mysql', 'aes_encrypt_mysql', 'decrypt', 'encrypt', + 'base64Decode', 'base64Encode', 'tryBase64Decode', + 'convertCharset', + 'detectLanguage', 'detectLanguageMixed', + 'geoToH3', + 'h3CellAreaM2', 'h3CellAreaRads2', 'h3Distance', 'h3EdgeAngle', 'h3EdgeLengthKm', 'h3EdgeLengthM', 'h3ExactEdgeLengthKm', 'h3ExactEdgeLengthM', 'h3ExactEdgeLengthRads', 'h3GetBaseCell', + 'h3GetDestinationIndexFromUnidirectionalEdge', 'h3GetFaces', 'h3GetIndexesFromUnidirectionalEdge', 'h3GetOriginIndexFromUnidirectionalEdge', 'h3GetPentagonIndexes', 'h3GetRes0Indexes', + 'h3GetResolution', 'h3GetUnidirectionalEdge', 'h3GetUnidirectionalEdgeBoundary', 'h3GetUnidirectionalEdgesFromHexagon', 'h3HexAreaKm2', 'h3HexAreaM2', 'h3HexRing', 'h3IndexesAreNeighbors', + 'h3IsPentagon', 'h3IsResClassIII', 'h3IsValid', 'h3Line', 'h3NumHexagons', 'h3PointDistKm', 'h3PointDistM', 'h3PointDistRads', 'h3ToCenterChild', 'h3ToChildren', 'h3ToGeo', + 'h3ToGeoBoundary', 'h3ToParent', 'h3ToString', 'h3UnidirectionalEdgeIsValid', 'h3kRing', 'stringToH3', + 'geoToS2', 's2CapContains', 's2CapUnion', 's2CellsIntersect', 's2GetNeighbors', 's2RectAdd', 's2RectContains', 's2RectIntersection', 's2RectUnion', 's2ToGeo', + 'normalizeUTF8NFC', 'normalizeUTF8NFD', 'normalizeUTF8NFKC', 'normalizeUTF8NFKD', + 'lemmatize', 'tokenize', 'stem', 'synonyms' +) ORDER BY name; diff --git a/tests/queries/0_stateless/add-test b/tests/queries/0_stateless/add-test new file mode 100755 index 00000000000..2173a4d8cc2 --- /dev/null +++ b/tests/queries/0_stateless/add-test @@ -0,0 +1,28 @@ +#!/bin/bash + +if [ -z "$1" ]; then + echo "Helper script to create empty test and reference files and assign a new number." + echo "Usage: $0 " + exit 1 +fi + +TESTS_PATH=$(dirname ${BASH_SOURCE[0]}) +set -ue + +# shellcheck disable=SC2010 +LAST_TEST_NO=$(ls -1 ${TESTS_PATH} | grep -P -o '^\d+' | sort -nr | head -1) + +# remove leading zeros, increment and add padding zeros to 5 digits +NEW_TEST_NO=$(printf "%05d\n" $((10#$LAST_TEST_NO + 1))) + +# if extension is not provided, use `.sql` +FILENAME="${1}" +FILEEXT="sql" +if [[ $1 == *.* ]] ; then + FILENAME="${1%.*}" + FILEEXT="${1##*.}" +fi + +set -x +touch ${TESTS_PATH}/${NEW_TEST_NO}_${FILENAME}.${FILEEXT} +touch ${TESTS_PATH}/${NEW_TEST_NO}_${FILENAME}.reference diff --git a/tests/queries/0_stateless/filesystem_cache_queries/02242_system_filesystem_cache_log_table.queries b/tests/queries/0_stateless/filesystem_cache_queries/02242_system_filesystem_cache_log_table.queries index b6b1291f83f..386a1792ea4 100644 --- a/tests/queries/0_stateless/filesystem_cache_queries/02242_system_filesystem_cache_log_table.queries +++ b/tests/queries/0_stateless/filesystem_cache_queries/02242_system_filesystem_cache_log_table.queries @@ -12,8 +12,8 @@ INSERT INTO test SELECT number, toString(number) FROM numbers(100000); SELECT 2240, '_storagePolicy', * FROM test FORMAT Null; SYSTEM FLUSH LOGS; -SELECT file_segment_range, read_type FROM system.filesystem_cache_log WHERE query_id = (SELECT query_id from system.query_log where query LIKE '%SELECT 2240%_storagePolicy%' AND current_database = currentDatabase() AND type = 'QueryFinish' ORDER BY event_time desc LIMIT 1); +SELECT file_segment_range, read_type FROM system.filesystem_cache_log WHERE query_id = (SELECT query_id from system.query_log where query LIKE '%SELECT 2240%_storagePolicy%' AND current_database = currentDatabase() AND type = 'QueryFinish' ORDER BY event_time desc LIMIT 1) ORDER BY file_segment_range, read_type; SELECT 2241, '_storagePolicy', * FROM test FORMAT Null; SYSTEM FLUSH LOGS; -SELECT file_segment_range, read_type FROM system.filesystem_cache_log WHERE query_id = (SELECT query_id from system.query_log where query LIKE '%SELECT 2241%_storagePolicy%' AND current_database = currentDatabase() AND type = 'QueryFinish' ORDER BY event_time desc LIMIT 1); +SELECT file_segment_range, read_type FROM system.filesystem_cache_log WHERE query_id = (SELECT query_id from system.query_log where query LIKE '%SELECT 2241%_storagePolicy%' AND current_database = currentDatabase() AND type = 'QueryFinish' ORDER BY event_time desc LIMIT 1) ORDER BY file_segment_range, read_type; diff --git a/tests/queries/0_stateless/filesystem_cache_queries/02313_filesystem_cache_seeks.queries b/tests/queries/0_stateless/filesystem_cache_queries/02313_filesystem_cache_seeks.queries index 389b9dfaa99..7f343fb83bd 100644 --- a/tests/queries/0_stateless/filesystem_cache_queries/02313_filesystem_cache_seeks.queries +++ b/tests/queries/0_stateless/filesystem_cache_queries/02313_filesystem_cache_seeks.queries @@ -1,4 +1,5 @@ SYSTEM DROP FILESYSTEM CACHE; +SET send_logs_level = 'fatal'; -- Ignore retriable errors like "AWSClient: Failed to make request" DROP TABLE IF EXISTS test_02313; CREATE TABLE test_02313 (id Int32, val String) diff --git a/tests/queries/0_stateless/replication.lib b/tests/queries/0_stateless/replication.lib index 8da21a3fdbe..72d51c48656 100755 --- a/tests/queries/0_stateless/replication.lib +++ b/tests/queries/0_stateless/replication.lib @@ -5,6 +5,7 @@ function try_sync_replicas() { table_name_prefix=$1 + time_left=$2 readarray -t empty_partitions_arr < <(${CLICKHOUSE_CLIENT} -q \ "SELECT DISTINCT substr(new_part_name, 1, position(new_part_name, '_') - 1) AS partition_id @@ -29,7 +30,7 @@ function try_sync_replicas() for t in "${tables_arr[@]}" do # The size of log may be big, so increase timeout. - $CLICKHOUSE_CLIENT --receive_timeout 300 -q "SYSTEM SYNC REPLICA $t" || ($CLICKHOUSE_CLIENT -q \ + $CLICKHOUSE_CLIENT --receive_timeout $time_left -q "SYSTEM SYNC REPLICA $t" || ($CLICKHOUSE_CLIENT -q \ "select 'sync failed, queue:', * from system.replication_queue where database=currentDatabase() and table='$t' order by database, table, node_name" && exit 1) & pids[${i}]=$! i=$((i + 1)) @@ -48,13 +49,14 @@ function check_replication_consistency() # Wait for all queries to finish (query may still be running if thread is killed by timeout) num_tries=0 while [[ $($CLICKHOUSE_CLIENT -q "SELECT count() FROM system.processes WHERE current_database=currentDatabase() AND query LIKE '%$table_name_prefix%'") -ne 1 ]]; do - sleep 0.5; + sleep 1; num_tries=$((num_tries+1)) - if [ $num_tries -eq 200 ]; then + if [ $num_tries -eq 250 ]; then $CLICKHOUSE_CLIENT -q "SELECT * FROM system.processes WHERE current_database=currentDatabase() AND query LIKE '%$table_name_prefix%' FORMAT Vertical" break fi done + time_left=$((300 - num_tries)) # Do not check anything if all replicas are readonly, # because is this case all replicas are probably lost (it may happen and it's not a bug) @@ -78,7 +80,7 @@ function check_replication_consistency() # SYNC REPLICA is not enough if some MUTATE_PARTs are not assigned yet wait_for_all_mutations "$table_name_prefix%" - try_sync_replicas "$table_name_prefix" || exit 1 + try_sync_replicas "$table_name_prefix" "$time_left" || exit 1 res=$($CLICKHOUSE_CLIENT -q \ "SELECT diff --git a/utils/CMakeLists.txt b/utils/CMakeLists.txt index 3e09b3ecd64..a79982bbd61 100644 --- a/utils/CMakeLists.txt +++ b/utils/CMakeLists.txt @@ -23,7 +23,6 @@ if (NOT DEFINED ENABLE_UTILS OR ENABLE_UTILS) add_subdirectory (iotest) add_subdirectory (corrector_utf8) add_subdirectory (zookeeper-cli) - add_subdirectory (zookeeper-test) add_subdirectory (zookeeper-dump-tree) add_subdirectory (zookeeper-remove-by-list) add_subdirectory (zookeeper-create-entry-to-download-part) @@ -35,7 +34,6 @@ if (NOT DEFINED ENABLE_UTILS OR ENABLE_UTILS) add_subdirectory (wal-dump) add_subdirectory (check-mysql-binlog) add_subdirectory (keeper-bench) - add_subdirectory (graphite-rollup) if (TARGET ch_contrib::nuraft) add_subdirectory (keeper-data-dumper) diff --git a/utils/check-style/aspell-ignore/en/aspell-dict.txt b/utils/check-style/aspell-ignore/en/aspell-dict.txt index b6778fce0f0..fa178e764da 100644 --- a/utils/check-style/aspell-ignore/en/aspell-dict.txt +++ b/utils/check-style/aspell-ignore/en/aspell-dict.txt @@ -406,6 +406,7 @@ requestor resultset rethrow risc +riscv ro rocksdb rowNumberInBlock diff --git a/utils/check-style/check-style b/utils/check-style/check-style index dac016003d5..a4810701dee 100755 --- a/utils/check-style/check-style +++ b/utils/check-style/check-style @@ -86,6 +86,7 @@ EXTERN_TYPES_EXCLUDES=( CurrentMetrics::Increment CurrentMetrics::Metric CurrentMetrics::values + CurrentMetrics::Value ) for extern_type in ${!EXTERN_TYPES[@]}; do type_of_extern=${EXTERN_TYPES[$extern_type]} diff --git a/utils/graphite-rollup/CMakeLists.txt b/utils/graphite-rollup/CMakeLists.txt deleted file mode 100644 index 4b05a18bac5..00000000000 --- a/utils/graphite-rollup/CMakeLists.txt +++ /dev/null @@ -1,23 +0,0 @@ -clickhouse_add_executable(graphite-rollup-bench graphite-rollup-bench.cpp) -target_link_libraries( - graphite-rollup-bench - PRIVATE - clickhouse_storages_system - clickhouse_aggregate_functions - clickhouse_common_config - dbms -) -target_include_directories( - graphite-rollup-bench - SYSTEM PRIVATE - ${ClickHouse_SOURCE_DIR}/src ${CMAKE_BINARY_DIR}/src - ${ClickHouse_SOURCE_DIR}/base ${ClickHouse_SOURCE_DIR}/base/pcg-random - ${CMAKE_BINARY_DIR}/src/Core/include - ${POCO_INCLUDE_DIR} - ${ClickHouse_SOURCE_DIR}/contrib/double-conversion ${ClickHouse_SOURCE_DIR}/contrib/dragonbox/include - ${ClickHouse_SOURCE_DIR}/contrib/fmtlib/include - ${ClickHouse_SOURCE_DIR}/contrib/cityhash102/include - ${RE2_INCLUDE_DIR} ${CMAKE_BINARY_DIR}/contrib/re2-cmake -) - -target_compile_definitions(graphite-rollup-bench PRIVATE RULES_DIR="${CMAKE_CURRENT_SOURCE_DIR}") diff --git a/utils/graphite-rollup/graphite-rollup-bench.cpp b/utils/graphite-rollup/graphite-rollup-bench.cpp deleted file mode 100644 index a46d214edbf..00000000000 --- a/utils/graphite-rollup/graphite-rollup-bench.cpp +++ /dev/null @@ -1,130 +0,0 @@ -#include -#include -#include -#include -#include -#include - -#include - -#include -#include -#include -#include -#include -#include -#include -#include -#include - -using namespace DB; - -static SharedContextHolder shared_context = Context::createShared(); - -auto loadMetrics(const std::string & metrics_file) -{ - std::vector metrics; - ReadBufferFromFile in(metrics_file); - String line; - - while (!in.eof()) - { - readEscapedStringUntilEOL(line, in); - if (!in.eof()) - { - ++in.position(); - } - if (!line.empty() && line.back() == '\n') - { - line.pop_back(); - } - if (!line.empty()) - { - metrics.emplace_back(line); - } - } - - return metrics; -} - -ConfigProcessor::LoadedConfig loadConfiguration(const std::string & config_path) -{ - ConfigProcessor config_processor(config_path, true, true); - ConfigProcessor::LoadedConfig config = config_processor.loadConfig(false); - return config; -} - -void bench(const std::string & config_path, const std::string & metrics_file, size_t n, bool verbose) -{ - auto config = loadConfiguration(config_path); - - auto context = Context::createGlobal(shared_context.get()); - context->setConfig(config.configuration.get()); - - Graphite::Params params; - setGraphitePatternsFromConfig(context, "graphite_rollup", params); - - auto metrics = loadMetrics(metrics_file); - - std::vector durations(metrics.size()); - size_t j, i; - for (j = 0; j < n; j++) - { - for (i = 0; i < metrics.size(); i++) - { - auto start = std::chrono::high_resolution_clock::now(); - - auto rule = DB::Graphite::selectPatternForPath(params, metrics[i]); - (void)rule; - - auto end = std::chrono::high_resolution_clock::now(); - double duration = (duration_cast>(end - start)).count() * 1E9; - durations[i] += duration; - - if (j == 0 && verbose) - { - std::cout << metrics[i] << ": rule with regexp '" << rule.second->regexp_str << "' found\n"; - } - } - } - - for (i = 0; i < metrics.size(); i++) - { - std::cout << metrics[i] << " " << durations[i] / n << " ns\n"; - } -} - -int main(int argc, char ** argv) -{ - registerAggregateFunctions(); - - std::string config_file, metrics_file; - - using namespace std::literals; - - std::string config_default = RULES_DIR + "/rollup.xml"s; - std::string metrics_default = RULES_DIR + "/metrics.txt"s; - - namespace po = boost::program_options; - po::variables_map vm; - - po::options_description desc; - desc.add_options()("help,h", "produce help")( - "config,c", po::value()->default_value(config_default), "XML config with rollup rules")( - "metrics,m", po::value()->default_value(metrics_default), "metrcis files (one metric per line) for run benchmark")( - "verbose,V", po::bool_switch()->default_value(false), "verbose output (print found rule)"); - - po::parsed_options parsed = po::command_line_parser(argc, argv).options(desc).run(); - po::store(parsed, vm); - po::notify(vm); - - if (vm.count("help")) - { - std::cout << desc << '\n'; - exit(1); - } - - bench(vm["config"].as(), vm["metrics"].as(), 10000, vm["verbose"].as()); - - return 0; -} diff --git a/utils/graphite-rollup/metrics.txt b/utils/graphite-rollup/metrics.txt deleted file mode 100644 index 199c3791310..00000000000 --- a/utils/graphite-rollup/metrics.txt +++ /dev/null @@ -1,11 +0,0 @@ -test.sum -sum?env=test&tag=Fake3 -test.max -max?env=test&tag=Fake4 -test.min -min?env=test&tag=Fake5 -fake5?env=test&tag=Fake5 -test.p95 -p95?env=test&tag=FakeNo -default -default?env=test&tag=FakeNo diff --git a/utils/graphite-rollup/rollup-tag-list.xml b/utils/graphite-rollup/rollup-tag-list.xml deleted file mode 100644 index edab2f16436..00000000000 --- a/utils/graphite-rollup/rollup-tag-list.xml +++ /dev/null @@ -1,167 +0,0 @@ - - - - plain - \.sum$ - sum - - 0 - 60 - - - 86400 - 3600 - - - - tagged - ^((.*)|.)sum\? - sum - - 0 - 60 - - - 86400 - 3600 - - - - plain - \.max$ - max - - 0 - 60 - - - 86400 - 3600 - - - - tagged - ^((.*)|.)max\? - max - - 0 - 60 - - - 86400 - 3600 - - - - plain - \.min$ - min - - 0 - 60 - - - 86400 - 3600 - - - - tagged - ^((.*)|.)min\? - min - - 0 - 60 - - - 86400 - 3600 - - - - plain - \.fake1\..*\.Fake1\. - sum - - - tag_list - fake1;tag=Fake1 - sum - - - plain - \.fake2\..*\.Fake2\. - sum - - - tag_list - fake2;tag=Fake2 - sum - - - plain - \.fake3\..*\.Fake3\. - sum - - - tag_list - fake3;tag=Fake3 - sum - - - plain - \.fake4\..*\.Fake4\. - sum - - - tag_list - fake4;tag=Fake4 - sum - - - plain - \.fake5\..*\.Fake5\. - sum - - - tag_list - fake5;tag=Fake5 - sum - - - plain - \.fake6\..*\.Fake6\. - sum - - - tag_list - fake6;tag=Fake6 - sum - - - plain - \.fake7\..*\.Fake7\. - sum - - - tag_list - fake7;tag=Fake7 - sum - - - avg - - 0 - 60 - - - 3600 - 300 - - - 86400 - 3600 - - - - diff --git a/utils/graphite-rollup/rollup-typed.xml b/utils/graphite-rollup/rollup-typed.xml deleted file mode 100644 index ace439dba4a..00000000000 --- a/utils/graphite-rollup/rollup-typed.xml +++ /dev/null @@ -1,167 +0,0 @@ - - - - plain - \.sum$ - sum - - 0 - 60 - - - 86400 - 3600 - - - - tagged - ^((.*)|.)sum\? - sum - - 0 - 60 - - - 86400 - 3600 - - - - plain - \.max$ - max - - 0 - 60 - - - 86400 - 3600 - - - - tagged - ^((.*)|.)max\? - max - - 0 - 60 - - - 86400 - 3600 - - - - plain - \.min$ - min - - 0 - 60 - - - 86400 - 3600 - - - - tagged - ^((.*)|.)min\? - min - - 0 - 60 - - - 86400 - 3600 - - - - plain - \.fake1\..*\.Fake1\. - sum - - - tagged - - sum - - - plain - \.fake2\..*\.Fake2\. - sum - - - tagged - - sum - - - plain - \.fake3\..*\.Fake3\. - sum - - - tagged - - sum - - - plain - \.fake4\..*\.Fake4\. - sum - - - tagged - - sum - - - plain - \.fake5\..*\.Fake5\. - sum - - - tagged - - sum - - - plain - \.fake6\..*\.Fake6\. - sum - - - tagged - - sum - - - plain - \.fake7\..*\.Fake7\. - sum - - - tagged - - sum - - - avg - - 0 - 60 - - - 3600 - 300 - - - 86400 - 3600 - - - - diff --git a/utils/graphite-rollup/rollup.xml b/utils/graphite-rollup/rollup.xml deleted file mode 100644 index 2089605c8bf..00000000000 --- a/utils/graphite-rollup/rollup.xml +++ /dev/null @@ -1,147 +0,0 @@ - - - - \.sum$ - sum - - 0 - 60 - - - 86400 - 3600 - - - - ^((.*)|.)sum\? - sum - - 0 - 60 - - - 86400 - 3600 - - - - \.max$ - max - - 0 - 60 - - - 86400 - 3600 - - - - ^((.*)|.)max\? - max - - 0 - 60 - - - 86400 - 3600 - - - - \.min$ - min - - 0 - 60 - - - 86400 - 3600 - - - - ^((.*)|.)min\? - min - - 0 - 60 - - - 86400 - 3600 - - - - \.fake1\..*\.Fake1\. - sum - - - - sum - - - \.fake2\..*\.Fake2\. - sum - - - - sum - - - \.fake3\..*\.Fake3\. - sum - - - - sum - - - \.fake4\..*\.Fake4\. - sum - - - - sum - - - \.fake5\..*\.Fake5\. - sum - - - - sum - - - \.fake6\..*\.Fake6\. - sum - - - - sum - - - \.fake7\..*\.Fake7\. - sum - - - - sum - - - avg - - 0 - 60 - - - 3600 - 300 - - - 86400 - 3600 - - - - diff --git a/utils/list-versions/version_date.tsv b/utils/list-versions/version_date.tsv index 0ab5349b97b..bc676be1888 100644 --- a/utils/list-versions/version_date.tsv +++ b/utils/list-versions/version_date.tsv @@ -1,6 +1,10 @@ +v22.8.2.11-lts 2022-08-23 +v22.8.1.2097-lts 2022-08-18 +v22.7.4.16-stable 2022-08-23 v22.7.3.5-stable 2022-08-10 v22.7.2.15-stable 2022-08-03 v22.7.1.2484-stable 2022-07-21 +v22.6.6.16-stable 2022-08-23 v22.6.5.22-stable 2022-08-09 v22.6.4.35-stable 2022-07-25 v22.6.3.35-stable 2022-07-06 diff --git a/utils/self-extracting-executable/compressor.cpp b/utils/self-extracting-executable/compressor.cpp index f40c4725c32..d8b4fdbb038 100644 --- a/utils/self-extracting-executable/compressor.cpp +++ b/utils/self-extracting-executable/compressor.cpp @@ -7,6 +7,7 @@ #include #include #include +#include #include #include @@ -76,9 +77,10 @@ int doCompress(char * input, char * output, off_t & in_offset, off_t & out_offse } /// compress data from opened file into output file -int compress(int in_fd, int out_fd, int level, off_t & pointer, const struct stat & info_in) +int compress(int in_fd, int out_fd, int level, off_t & pointer, const struct stat & info_in, uint64_t & compressed_size) { off_t in_offset = 0; + compressed_size = 0; /// mmap files char * input = static_cast(mmap(nullptr, info_in.st_size, PROT_READ, MAP_PRIVATE, in_fd, 0)); @@ -141,6 +143,8 @@ int compress(int in_fd, int out_fd, int level, off_t & pointer, const struct sta return 1; } + uint64_t total_size = 0; + /// Compress data while (in_offset < info_in.st_size) { @@ -171,8 +175,15 @@ int compress(int in_fd, int out_fd, int level, off_t & pointer, const struct sta } pointer += current_block_size; printf("...block compression rate: %.2f%%\n", static_cast(current_block_size) / size * 100); + total_size += size; + compressed_size += current_block_size; current_block_size = 0; } + std::cout << + "Compressed size: " << compressed_size << + ", compression rate: " << std::fixed << std::setprecision(2) << + static_cast(compressed_size) / total_size * 100 << "%" + << std::endl; if (0 != munmap(input, info_in.st_size) || 0 != munmap(output, 2 * max_block_size)) @@ -187,7 +198,7 @@ int compress(int in_fd, int out_fd, int level, off_t & pointer, const struct sta } /// Save Metadata at the end of file -int saveMetaData(char* filenames[], int count, int output_fd, const MetaData& metadata, +int saveMetaData(const char* filenames[], int count, int output_fd, const MetaData& metadata, FileData* files_data, size_t pointer, size_t sum_file_size) { /// Allocate memory for metadata @@ -228,21 +239,36 @@ int saveMetaData(char* filenames[], int count, int output_fd, const MetaData& me } /// Fills metadata and calls compression function for each file -int compressFiles(char* filenames[], int count, int output_fd, int level, const struct stat& info_out) +int compressFiles(const char* out_name, const char* exec, char* filenames[], int count, int output_fd, int level, const struct stat& info_out) { MetaData metadata; size_t sum_file_size = 0; - metadata.number_of_files = htole64(count); + int is_exec = exec && *exec ? 1 : 0; + metadata.number_of_files = htole64(count + is_exec); off_t pointer = info_out.st_size; - /// Store information about each file and compress it - FileData* files_data = new FileData[count]; - char * names[count]; - for (int i = 0; i < count; ++i) - { - printf("Compressing: %s\n", filenames[i]); + uint64_t total_size = 0; + uint64_t total_compressed_size = 0; - int input_fd = open(filenames[i], O_RDONLY); + /// Store information about each file and compress it + FileData* files_data = new FileData[count + is_exec]; + const char * names[count + is_exec]; + for (int i = 0; i <= count; ++i) + { + const char* filename = nullptr; + if (i == count) + { + if (!is_exec) + continue; + filename = exec; + files_data[i].exec = true; + } + else + filename = filenames[i]; + + printf("Compressing: %s\n", filename); + + int input_fd = open(filename, O_RDONLY); if (input_fd == -1) { perror(nullptr); @@ -253,14 +279,17 @@ int compressFiles(char* filenames[], int count, int output_fd, int level, const /// Remember information about file name /// This should be made after the file is opened /// because filename should be extracted from path - names[i] = strrchr(filenames[i], '/'); + names[i] = strrchr(filename, '/'); if (names[i]) ++names[i]; else - names[i] = filenames[i]; + names[i] = filename; size_t nlen = strlen(names[i]) + 1; files_data[i].name_length = htole64(nlen); sum_file_size += nlen; + /// if no --exec is specified nor it's empty - file which is matching output name is executable + if (!is_exec && !exec && strcmp(names[i], out_name) == 0) + files_data[i].exec = true; /// read data about input file struct stat info_in; @@ -278,6 +307,7 @@ int compressFiles(char* filenames[], int count, int output_fd, int level, const } std::cout << "Size: " << info_in.st_size << std::endl; + total_size += info_in.st_size; /// Save umask files_data[i].umask = htole64(info_in.st_mode); @@ -287,15 +317,19 @@ int compressFiles(char* filenames[], int count, int output_fd, int level, const files_data[i].uncompressed_size = htole64(info_in.st_size); files_data[i].start = htole64(pointer); + uint64_t compressed_size = 0; + /// Compressed data will be added to the end of file /// It will allow to create self extracting executable from file - if (0 != compress(input_fd, output_fd, level, pointer, info_in)) + if (0 != compress(input_fd, output_fd, level, pointer, info_in, compressed_size)) { perror(nullptr); delete [] files_data; return 1; } + total_compressed_size += compressed_size; + /// This error is less important, than others. /// If file cannot be closed, in some cases it will lead to /// error in other function that will stop compression process @@ -308,12 +342,16 @@ int compressFiles(char* filenames[], int count, int output_fd, int level, const /// save location of files information metadata.start_of_files_data = htole64(pointer); - if (0 != saveMetaData(names, count, output_fd, metadata, files_data, pointer, sum_file_size)) + if (0 != saveMetaData(names, count + is_exec, output_fd, metadata, files_data, pointer, sum_file_size)) { delete [] files_data; return 1; } + std::cout << "Compression rate: " << std::fixed << std::setprecision(2) << + static_cast(total_compressed_size) / total_size * 100 << "%" + << std::endl; + delete [] files_data; return 0; } @@ -424,10 +462,14 @@ int copy_decompressor_file(const char *path, int output_fd) inline void usage(FILE * out, const char * name) { (void)fprintf(out, - "%s [--level=] [--decompressor=] [... ]\n" + "%s [--level=] [--decompressor=] [--exec=] [ [... ]]\n" "\t--level - compression level, max is %d, negative - prefer speed over compression\n" "\t default is 5\n" - "\t--decompressor - path to decompressor\n", + "\t--decompressor - path to decompressor\n" + "\t--exec - path to an input file to execute after decompression, if omitted then\n" + "\t an having the same name as becomes such executable.\n" + "\t This executable upon decompression will substitute started compressed preserving compressed name.\n" + "\t If no is specified - nothing will be run - only decompression will be performed.\n", name, ZSTD_maxCLevel()); } @@ -497,7 +539,12 @@ int main(int argc, char* argv[]) ++start_of_files; } - if (argc < start_of_files + 1) + /// Specified executable + const char * exec = get_param(argc, argv, "exec"); + if (exec != nullptr) + ++start_of_files; + + if (argc < start_of_files + (exec == nullptr || *exec == 0 ? 1 : 0)) { usage(stderr, argv[0]); return 1; @@ -516,6 +563,12 @@ int main(int argc, char* argv[]) perror(nullptr); return 1; } + + const char* out_name = strrchr(argv[start_of_files], '/'); + if (out_name) + ++out_name; + else + out_name = argv[start_of_files]; ++start_of_files; if (decompressor != nullptr) @@ -536,7 +589,7 @@ int main(int argc, char* argv[]) } std::cout << "Compression with level: " << level << std::endl; - if (0 != compressFiles(&argv[start_of_files], argc - start_of_files, output_fd, level, info_out)) + if (0 != compressFiles(out_name, exec, &argv[start_of_files], argc - start_of_files, output_fd, level, info_out)) { printf("Compression failed.\n"); close(output_fd); diff --git a/utils/self-extracting-executable/decompressor.cpp b/utils/self-extracting-executable/decompressor.cpp index c0c97b1d711..8d8d137a2ac 100644 --- a/utils/self-extracting-executable/decompressor.cpp +++ b/utils/self-extracting-executable/decompressor.cpp @@ -101,8 +101,8 @@ int decompress(char * input, char * output, off_t start, off_t end, size_t max_n { /// Decompress data in child process. if (0 != doDecompress(input, output, in_pointer, out_pointer, size, decompressed_size, dctx)) - exit(1); - exit(0); + _exit(1); + _exit(0); } else { @@ -168,7 +168,7 @@ int decompress(char * input, char * output, off_t start, off_t end, size_t max_n /// Read data about files and decomrpess them. -int decompressFiles(int input_fd, char * path, char * name, bool & have_compressed_analoge, char * decompressed_suffix, uint64_t * decompressed_umask) +int decompressFiles(int input_fd, char * path, char * name, bool & have_compressed_analoge, bool & has_exec, char * decompressed_suffix, uint64_t * decompressed_umask) { /// Read data about output file. /// Compressed data will replace data in file @@ -226,8 +226,8 @@ int decompressFiles(int input_fd, char * path, char * name, bool & have_compress file_info = *reinterpret_cast(input + files_pointer); files_pointer += sizeof(FileData); - size_t file_name_len = - (strcmp(input + files_pointer, name) ? le64toh(file_info.name_length) : le64toh(file_info.name_length) + 13 + 7); + /// for output filename matching compressed allow additional 13 + 7 symbols for ".decompressed.XXXXXX" suffix + size_t file_name_len = file_info.exec ? strlen(name) + 13 + 7 + 1 : le64toh(file_info.name_length); size_t file_path_len = path ? strlen(path) + 1 + file_name_len : file_name_len; @@ -238,9 +238,22 @@ int decompressFiles(int input_fd, char * path, char * name, bool & have_compress strcat(file_name, path); strcat(file_name, "/"); } - strcat(file_name, input + files_pointer); + + bool same_name = false; + if (file_info.exec) + { + has_exec = true; + strcat(file_name, name); + } + else + { + if (strcmp(name, input + files_pointer) == 0) + same_name = true; + strcat(file_name, input + files_pointer); + } + files_pointer += le64toh(file_info.name_length); - if (file_name_len != le64toh(file_info.name_length)) + if (file_info.exec || same_name) { strcat(file_name, ".decompressed.XXXXXX"); int fd = mkstemp(file_name); @@ -377,11 +390,12 @@ int main(int/* argc*/, char* argv[]) } bool have_compressed_analoge = false; + bool has_exec = false; char decompressed_suffix[7] = {0}; uint64_t decompressed_umask = 0; /// Decompress all files - if (0 != decompressFiles(input_fd, path, name, have_compressed_analoge, decompressed_suffix, &decompressed_umask)) + if (0 != decompressFiles(input_fd, path, name, have_compressed_analoge, has_exec, decompressed_suffix, &decompressed_umask)) { printf("Error happened during decompression.\n"); if (0 != close(input_fd)) @@ -427,10 +441,15 @@ int main(int/* argc*/, char* argv[]) return 1; } - execv(self, argv); + if (has_exec) + { + execv(self, argv); - /// This part of code will be reached only if error happened - perror("execv"); - return 1; + /// This part of code will be reached only if error happened + perror("execv"); + return 1; + } + + printf("No target executable - decompression only was performed.\n"); } } diff --git a/utils/self-extracting-executable/types.h b/utils/self-extracting-executable/types.h index 7fc9c05a48a..95202810ba6 100644 --- a/utils/self-extracting-executable/types.h +++ b/utils/self-extracting-executable/types.h @@ -1,5 +1,6 @@ #pragma once +#include #include #include @@ -43,5 +44,6 @@ struct FileData uint64_t name_length = 0; uint64_t uncompressed_size = 0; uint64_t umask = 0; + bool exec = false; }; diff --git a/utils/zookeeper-test/CMakeLists.txt b/utils/zookeeper-test/CMakeLists.txt deleted file mode 100644 index 57d9106d8db..00000000000 --- a/utils/zookeeper-test/CMakeLists.txt +++ /dev/null @@ -1,2 +0,0 @@ -clickhouse_add_executable(zk-test main.cpp) -target_link_libraries(zk-test PRIVATE clickhouse_common_zookeeper_no_log) diff --git a/utils/zookeeper-test/main.cpp b/utils/zookeeper-test/main.cpp deleted file mode 100644 index 54d8c69bb36..00000000000 --- a/utils/zookeeper-test/main.cpp +++ /dev/null @@ -1,364 +0,0 @@ -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#include -#include -#include -#include - -using namespace std; - -/// TODO: Remove ME - -void checkEq(zkutil::ZooKeeper & zk, const std::string & path, const std::string & expected) -{ - auto result = zk.get(path); - if (result != expected) - throw std::runtime_error(fmt::format("Data on path '{}' = '{}' doesn't match expected '{}'", - path, result, expected)); -} - -void checkExists(zkutil::ZooKeeper & zk, const std::string & path) -{ - if (!zk.exists(path)) - throw std::runtime_error(fmt::format("Path '{}' doesn't exists", path)); -} - -void testCreateGetExistsNode(zkutil::ZooKeeper & zk) -{ - zk.create("/data", "test_string", zkutil::CreateMode::Persistent); - zk.create("/data/seq-", "another_string", zkutil::CreateMode::PersistentSequential); - checkEq(zk, "/data", "test_string"); - checkExists(zk, "/data/seq-0000000000"); - checkEq(zk, "/data/seq-0000000000", "another_string"); -} - -void testCreateSetNode(zkutil::ZooKeeper & zk) -{ - zk.create("/data/set", "sssss", zkutil::CreateMode::Persistent); - checkEq(zk, "/data/set", "sssss"); - zk.set("/data/set", "qqqqq"); - checkEq(zk, "/data/set", "qqqqq"); -} - -void testCreateList(zkutil::ZooKeeper & zk) -{ - zk.create("/data/lst", "", zkutil::CreateMode::Persistent); - zk.create("/data/lst/d1", "", zkutil::CreateMode::Persistent); - zk.create("/data/lst/d2", "", zkutil::CreateMode::Persistent); - zk.create("/data/lst/d3", "", zkutil::CreateMode::Persistent); - auto children = zk.getChildren("/data/lst"); - if (children.size() != 3) - throw std::runtime_error("Children of /data/lst doesn't equal to three"); - for (size_t i = 0; i < children.size(); ++i) - { - std::cerr << "children:" << children[i] << std::endl; - std::cerr << "children size:" << children[i].size() << std::endl; - if (children[i] != "d" + std::to_string(i + 1)) - throw std::runtime_error(fmt::format("Incorrect children #{} got {}, expected {}", i, children[i], "d" + std::to_string(i + 1))); - } -} - -void testCreateSetVersionRequest(zkutil::ZooKeeper & zk) -{ - zk.create("/data/check_data", "d", zkutil::CreateMode::Persistent); - Coordination::Stat stat{}; - try - { - zk.set("/data/check_data", "e", stat.version + 2); - std::terminate(); - } - catch (...) - { - std::cerr << "Got exception on incorrect version (it's ok)\n"; - } - - checkEq(zk, "/data/check_data", "d"); - zk.set("/data/check_data", "e", stat.version); - - checkEq(zk, "/data/check_data", "e"); -} - -void testCreateSetWatchEvent(zkutil::ZooKeeper & zk) -{ - - std::shared_ptr event = std::make_shared(); - zk.create("/data/nodeforwatch", "", zkutil::CreateMode::Persistent); - Coordination::Stat stat; - zk.get("/data/nodeforwatch", &stat, event); - - if (event->tryWait(300)) - throw std::runtime_error(fmt::format("Event for path {} was set without any actions", "/data/nodeforwatch")); - - zk.set("/data/nodeforwatch", "x"); - if (!event->tryWait(300)) - throw std::runtime_error(fmt::format("Event for path {} was not set after set", "/data/nodeforwatch")); - else - std::cerr << "Event was set well\n"; -} - -void testCreateListWatchEvent(zkutil::ZooKeeper & zk) -{ - std::shared_ptr event = std::make_shared(); - std::string path = "/data/pathforwatch"; - zk.create(path, "", zkutil::CreateMode::Persistent); - zk.create(path + "/n1", "", zkutil::CreateMode::Persistent); - zk.create(path + "/n2", "", zkutil::CreateMode::Persistent); - zk.getChildren(path, nullptr, event); - - if (event->tryWait(300)) - throw std::runtime_error(fmt::format("ListEvent for path {} was set without any actions", path)); - - zk.create(path + "/n3", "", zkutil::CreateMode::Persistent); - if (!event->tryWait(300)) - throw std::runtime_error(fmt::format("ListEvent for path {} was not set after create", path)); - else - std::cerr << "ListEvent was set well\n"; -} - -void testMultiRequest(zkutil::ZooKeeper & zk) -{ - std::cerr << "Testing multi request\n"; - Coordination::Requests requests; - requests.push_back(zkutil::makeCreateRequest("/data/multirequest", "aaa", zkutil::CreateMode::Persistent)); - requests.push_back(zkutil::makeSetRequest("/data/multirequest", "bbb", -1)); - zk.multi(requests); - std::cerr << "Multi executed\n"; - - try - { - requests.clear(); - std::cerr << "Testing bad multi\n"; - requests.push_back(zkutil::makeCreateRequest("/data/multirequest", "qweqwe", zkutil::CreateMode::Persistent)); - requests.push_back(zkutil::makeSetRequest("/data/multirequest", "bbb", -1)); - requests.push_back(zkutil::makeSetRequest("/data/multirequest", "ccc", -1)); - zk.multi(requests); - std::cerr << "Bad multi executed\n"; - std::terminate(); - } - catch (...) - { - std::cerr << "Got exception on multy request (it's ok)\n"; - } - - checkEq(zk, "/data/multirequest", "bbb"); - std::cerr << "Multi request finished\n"; -} - -std::mutex elements_mutex; -std::vector current_elements; -std::atomic watches_triggered = 0; - -void triggerWatch(const Coordination::WatchResponse &) -{ - watches_triggered++; -} - -template -Iter select_randomly(Iter start, Iter end, RandomGenerator& g) -{ - std::uniform_int_distribution<> dis(0, std::distance(start, end) - 1); - std::advance(start, dis(g)); - return start; -} - -template -Iter select_randomly(Iter start, Iter end) -{ - static std::random_device rd; - static std::mt19937 gen(rd()); - return select_randomly(start, end, gen); -} - -std::atomic element_counter = 0; -std::atomic failed_setup_counter = 0; - -void createPathAndSetWatch(zkutil::ZooKeeper & zk, const String & path_prefix, size_t total) -{ - for (size_t i = 0; i < total; ++i) - { - int element = element_counter++; - zk.createIfNotExists(path_prefix + "/" + std::to_string(element), ""); - - std::string result; - if (!zk.tryGetWatch(path_prefix + "/" + std::to_string(element), result, nullptr, triggerWatch)) - failed_setup_counter++; - - { - std::lock_guard lock(elements_mutex); - current_elements.push_back(element); - } - - std::this_thread::sleep_for(std::chrono::milliseconds(200)); - - { - std::lock_guard lock(elements_mutex); - if (current_elements.empty()) - continue; - element = *select_randomly(current_elements.begin(), current_elements.end()); - current_elements.erase(std::remove(current_elements.begin(), current_elements.end(), element), current_elements.end()); - } - zk.tryRemove(path_prefix + "/" + std::to_string(element)); - } - -} - -std::string random_string(size_t length) -{ - auto randchar = []() -> char - { - const char charset[] = - "0123456789" - "ABCDEFGHIJKLMNOPQRSTUVWXYZ" - "abcdefghijklmnopqrstuvwxyz"; - const size_t max_index = (sizeof(charset) - 1); - return charset[rand() % max_index]; /// NOLINT - }; - std::string str(length, 0); - std::generate_n(str.begin(), length, randchar); - return str; -} - -std::string currentDateTime() -{ - time_t now = time(nullptr); - tm tstruct; - char buf[80]; - tstruct = *localtime(&now); - // Visit http://en.cppreference.com/w/cpp/chrono/c/strftime - // for more information about date/time format - size_t size = strftime(buf, sizeof(buf), "%Y-%m-%d %H:%M:%S", &tstruct); - - return std::string(buf, size); -} - - -void createOnPrefix(const std::string & zkhost, const String & path_prefix, size_t datasize, size_t total) -{ - zkutil::ZooKeeper zk(zkhost); - std::vector> holder_futures; - using namespace std::chrono; - try - { - for (size_t i = 0; i < total; ++i) - { - std::cerr << currentDateTime() << "] Request:" << i << std::endl; - std::string path = path_prefix + "/element" + std::to_string(i); - holder_futures.push_back(zk.asyncCreate(path, random_string(datasize), zkutil::CreateMode::Persistent)); - } - - for (auto & future : holder_futures) - future.get(); - } - catch (...) - { - ::exit(-1); - } -} - - -void createConcurrent(zkutil::ZooKeeper & testzk, const std::string & zkhost, size_t threads, size_t requests, size_t blobsize) -{ - std::vector> asyncs; - for (size_t i = 0; i < threads; ++i) - { - std::string path_prefix = "/data/create_test" + std::to_string(i); - testzk.createIfNotExists(path_prefix, ""); - auto callback = [&zkhost, path_prefix, requests, blobsize] () - { - createOnPrefix(zkhost, path_prefix, blobsize, requests); - }; - asyncs.push_back(std::async(std::launch::async, callback)); - } - - for (auto & async : asyncs) - { - async.wait(); - } -} - -void tryConcurrentWatches(zkutil::ZooKeeper & zk) -{ - std::string path_prefix = "/concurrent_watches"; - std::vector> asyncs; - zk.createIfNotExists(path_prefix, ""); - for (size_t i = 0; i < 100; ++i) - { - auto callback = [&zk, path_prefix] () - { - createPathAndSetWatch(zk, path_prefix, 100); - }; - asyncs.push_back(std::async(std::launch::async, callback)); - } - - for (auto & async : asyncs) - { - async.wait(); - } - - size_t counter = 0; - while (watches_triggered != 100 * 100) - { - std::this_thread::sleep_for(std::chrono::milliseconds(100)); - if (counter++ > 20) - break; - } - - std::cerr << "Failed setup counter:" << failed_setup_counter << std::endl; - std::cerr << "Current elements size:" << current_elements.size() << std::endl; - std::cerr << "WatchesTriggered:" << watches_triggered << std::endl; -} - - -int main(int argc, char *argv[]) -{ - if (argc != 2) - { - std::cerr << "usage: " << argv[0] << " hosts" << std::endl; - return 2; - } - Poco::AutoPtr channel = new Poco::ConsoleChannel(std::cerr); - Poco::Logger::root().setChannel(channel); - Poco::Logger::root().setLevel("trace"); - - zkutil::ZooKeeper zk{zkutil::ZooKeeperArgs(argv[1])}; - - try - { - std::cerr << "Removing\n"; - zk.tryRemoveRecursive("/data"); - std::cerr << "Creating\n"; - zk.createIfNotExists("/data", ""); - std::cerr << "Created\n"; - - Stopwatch watch; - createConcurrent(zk, argv[1], 1, 1005000, 10); - std::cerr << "Finished in: " << watch.elapsedMilliseconds() << "ms" << std::endl; - - //testCreateGetExistsNode(zk); - //testCreateSetNode(zk); - //testCreateList(zk); - //testCreateSetVersionRequest(zk); - //testMultiRequest(zk); - //testCreateSetWatchEvent(zk); - //testCreateListWatchEvent(zk); - //tryConcurrentWatches(zk); - } - catch (...) - { - zk.tryRemoveRecursive("/data"); - throw; - } - return 0; -}