mirror of
https://github.com/ClickHouse/ClickHouse.git
synced 2024-11-27 10:02:01 +00:00
Merge branch 'master' into s390x_ip_coding
This commit is contained in:
commit
8dc99e8d64
33
.github/workflows/jepsen.yml
vendored
33
.github/workflows/jepsen.yml
vendored
@ -32,10 +32,41 @@ jobs:
|
||||
mkdir -p "$TEMP_PATH"
|
||||
cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH"
|
||||
cd "$REPO_COPY/tests/ci"
|
||||
python3 keeper_jepsen_check.py
|
||||
python3 jepsen_check.py keeper
|
||||
- name: Cleanup
|
||||
if: always()
|
||||
run: |
|
||||
docker ps --quiet | xargs --no-run-if-empty docker kill ||:
|
||||
docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||:
|
||||
sudo rm -fr "$TEMP_PATH"
|
||||
# ServerJepsenRelease:
|
||||
# runs-on: [self-hosted, style-checker]
|
||||
# if: ${{ always() }}
|
||||
# needs: [KeeperJepsenRelease]
|
||||
# steps:
|
||||
# - name: Set envs
|
||||
# run: |
|
||||
# cat >> "$GITHUB_ENV" << 'EOF'
|
||||
# TEMP_PATH=${{runner.temp}}/server_jepsen
|
||||
# REPO_COPY=${{runner.temp}}/server_jepsen/ClickHouse
|
||||
# EOF
|
||||
# - name: Clear repository
|
||||
# run: |
|
||||
# sudo rm -fr "$GITHUB_WORKSPACE" && mkdir "$GITHUB_WORKSPACE"
|
||||
# - name: Check out repository code
|
||||
# uses: actions/checkout@v2
|
||||
# with:
|
||||
# fetch-depth: 0
|
||||
# - name: Jepsen Test
|
||||
# run: |
|
||||
# sudo rm -fr "$TEMP_PATH"
|
||||
# mkdir -p "$TEMP_PATH"
|
||||
# cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH"
|
||||
# cd "$REPO_COPY/tests/ci"
|
||||
# python3 jepsen_check.py server
|
||||
# - name: Cleanup
|
||||
# if: always()
|
||||
# run: |
|
||||
# docker ps --quiet | xargs --no-run-if-empty docker kill ||:
|
||||
# docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||:
|
||||
# sudo rm -fr "$TEMP_PATH"
|
||||
|
110
CHANGELOG.md
110
CHANGELOG.md
@ -1,4 +1,5 @@
|
||||
### Table of Contents
|
||||
**[ClickHouse release v22.11, 2022-11-17](#2211)**<br/>
|
||||
**[ClickHouse release v22.10, 2022-10-25](#2210)**<br/>
|
||||
**[ClickHouse release v22.9, 2022-09-22](#229)**<br/>
|
||||
**[ClickHouse release v22.8-lts, 2022-08-18](#228)**<br/>
|
||||
@ -11,6 +12,109 @@
|
||||
**[ClickHouse release v22.1, 2022-01-18](#221)**<br/>
|
||||
**[Changelog for 2021](https://clickhouse.com/docs/en/whats-new/changelog/2021/)**<br/>
|
||||
|
||||
### <a id="2211"></a> ClickHouse release 22.11, 2022-11-17
|
||||
|
||||
#### Backward Incompatible Change
|
||||
* `JSONExtract` family of functions will now attempt to coerce to the requested type. [#41502](https://github.com/ClickHouse/ClickHouse/pull/41502) ([Márcio Martins](https://github.com/marcioapm)).
|
||||
|
||||
#### New Feature
|
||||
* Adds support for retries during INSERTs into ReplicatedMergeTree when a session with ClickHouse Keeper is lost. Apart from fault tolerance, it aims to provide better user experience, - avoid returning a user an error during insert if keeper is restarted (for example, due to upgrade). This is controlled by the `insert_keeper_max_retries` setting, which is disabled by default. [#42607](https://github.com/ClickHouse/ClickHouse/pull/42607) ([Igor Nikonov](https://github.com/devcrafter)).
|
||||
* Add `Hudi` and `DeltaLake` table engines, read-only, only for tables on S3. [#41054](https://github.com/ClickHouse/ClickHouse/pull/41054) ([Daniil Rubin](https://github.com/rubin-do), [Kseniia Sumarokova](https://github.com/kssenii)).
|
||||
* Add table function `hudi` and `deltaLake`. [#43080](https://github.com/ClickHouse/ClickHouse/pull/43080) ([flynn](https://github.com/ucasfl)).
|
||||
* Support for composite time intervals. 1. Add, subtract and negate operations are now available on Intervals. In the case where the types of Intervals are different, they will be transformed into the Tuple of those types. 2. A tuple of intervals can be added to or subtracted from a Date/DateTime field. 3. Added parsing of Intervals with different types, for example: `INTERVAL '1 HOUR 1 MINUTE 1 SECOND'`. [#42195](https://github.com/ClickHouse/ClickHouse/pull/42195) ([Nikolay Degterinsky](https://github.com/evillique)).
|
||||
* Added `**` glob support for recursive directory traversal of the filesystem and S3. Resolves [#36316](https://github.com/ClickHouse/ClickHouse/issues/36316). [#42376](https://github.com/ClickHouse/ClickHouse/pull/42376) ([SmitaRKulkarni](https://github.com/SmitaRKulkarni)).
|
||||
* Introduce `s3_plain` disk type for write-once-read-many operations. Implement `ATTACH` of `MergeTree` table for `s3_plain` disk. [#42628](https://github.com/ClickHouse/ClickHouse/pull/42628) ([Azat Khuzhin](https://github.com/azat)).
|
||||
* Added applied row-level policies to `system.query_log`. [#39819](https://github.com/ClickHouse/ClickHouse/pull/39819) ([Vladimir Chebotaryov](https://github.com/quickhouse)).
|
||||
* Add four-letter command `csnp` for manually creating snapshots in ClickHouse Keeper. Additionally, `lgif` was added to get Raft information for a specific node (e.g. index of last created snapshot, last committed log index). [#41766](https://github.com/ClickHouse/ClickHouse/pull/41766) ([JackyWoo](https://github.com/JackyWoo)).
|
||||
* Add function `ascii` like in Apache Spark: https://spark.apache.org/docs/latest/api/sql/#ascii. [#42670](https://github.com/ClickHouse/ClickHouse/pull/42670) ([李扬](https://github.com/taiyang-li)).
|
||||
* Add function `positive_modulo` (`pmod`) which returns non-negative result based on modulo. [#42755](https://github.com/ClickHouse/ClickHouse/pull/42755) ([李扬](https://github.com/taiyang-li)).
|
||||
* Add function `formatReadableDecimalSize`. [#42774](https://github.com/ClickHouse/ClickHouse/pull/42774) ([Alejandro](https://github.com/alexon1234)).
|
||||
* Add function `randCanonical`, which is similar to the `rand` function in Apache Spark or Impala. The function generates pseudo random results with independent and identically distributed uniformly distributed values in [0, 1). [#43124](https://github.com/ClickHouse/ClickHouse/pull/43124) ([李扬](https://github.com/taiyang-li)).
|
||||
* Add function `displayName`, closes [#36770](https://github.com/ClickHouse/ClickHouse/issues/36770). [#37681](https://github.com/ClickHouse/ClickHouse/pull/37681) ([hongbin](https://github.com/xlwh)).
|
||||
* Add `min_age_to_force_merge_on_partition_only` setting to optimize old parts for the entire partition only. [#42659](https://github.com/ClickHouse/ClickHouse/pull/42659) ([Antonio Andelic](https://github.com/antonio2368)).
|
||||
* Add generic implementation for arbitrary structured named collections, access type and `system.named_collections`. [#43147](https://github.com/ClickHouse/ClickHouse/pull/43147) ([Kseniia Sumarokova](https://github.com/kssenii)).
|
||||
|
||||
#### Performance Improvement
|
||||
* Parallelized merging of `uniqExact` states for aggregation without key, i.e. queries like `SELECT uniqExact(number) FROM table`. The improvement becomes noticeable when the number of unique keys approaches 10^6. Also `uniq` performance is slightly optimized. [#43072](https://github.com/ClickHouse/ClickHouse/pull/43072) ([Nikita Taranov](https://github.com/nickitat)).
|
||||
* `match` function can use the index if it's a condition on string prefix. This closes [#37333](https://github.com/ClickHouse/ClickHouse/issues/37333). [#42458](https://github.com/ClickHouse/ClickHouse/pull/42458) ([clarkcaoliu](https://github.com/Clark0)).
|
||||
* Speed up AND and OR operators when they are sequenced. [#42214](https://github.com/ClickHouse/ClickHouse/pull/42214) ([Zhiguo Zhou](https://github.com/ZhiguoZh)).
|
||||
* Support parallel parsing for `LineAsString` input format. This improves performance just slightly. This closes [#42502](https://github.com/ClickHouse/ClickHouse/issues/42502). [#42780](https://github.com/ClickHouse/ClickHouse/pull/42780) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||
* ClickHouse Keeper performance improvement: improve commit performance for cases when many different nodes have uncommitted states. This should help with cases when a follower node can't sync fast enough. [#42926](https://github.com/ClickHouse/ClickHouse/pull/42926) ([Antonio Andelic](https://github.com/antonio2368)).
|
||||
* A condition like `NOT LIKE 'prefix%'` can use the primary index. [#42209](https://github.com/ClickHouse/ClickHouse/pull/42209) ([Duc Canh Le](https://github.com/canhld94)).
|
||||
|
||||
#### Experimental Feature
|
||||
* Support type `Object` inside other types, e.g. `Array(JSON)`. [#36969](https://github.com/ClickHouse/ClickHouse/pull/36969) ([Anton Popov](https://github.com/CurtizJ)).
|
||||
* Ignore MySQL binlog SAVEPOINT event for MaterializedMySQL. [#42931](https://github.com/ClickHouse/ClickHouse/pull/42931) ([zzsmdfj](https://github.com/zzsmdfj)). Handle (ignore) SAVEPOINT queries in MaterializedMySQL. [#43086](https://github.com/ClickHouse/ClickHouse/pull/43086) ([Stig Bakken](https://github.com/stigsb)).
|
||||
|
||||
#### Improvement
|
||||
* Trivial queries with small LIMIT will properly determine the number of estimated rows to read, so that the threshold will be checked properly. Closes [#7071](https://github.com/ClickHouse/ClickHouse/issues/7071). [#42580](https://github.com/ClickHouse/ClickHouse/pull/42580) ([Han Fei](https://github.com/hanfei1991)).
|
||||
* Add support for interactive parameters in INSERT VALUES queries. [#43077](https://github.com/ClickHouse/ClickHouse/pull/43077) ([Nikolay Degterinsky](https://github.com/evillique)).
|
||||
* Added new field `allow_readonly` in `system.table_functions` to allow using table functions in readonly mode. Resolves [#42414](https://github.com/ClickHouse/ClickHouse/issues/42414) Implementation: * Added a new field allow_readonly to table system.table_functions. * Updated to use new field allow_readonly to allow using table functions in readonly mode. Testing: * Added a test for filesystem tests/queries/0_stateless/02473_functions_in_readonly_mode.sh Documentation: * Updated the english documentation for Table Functions. [#42708](https://github.com/ClickHouse/ClickHouse/pull/42708) ([SmitaRKulkarni](https://github.com/SmitaRKulkarni)).
|
||||
* The `system.asynchronous_metrics` gets embedded documentation. This documentation is also exported to Prometheus. Fixed an error with the metrics about `cache` disks - they were calculated only for one arbitrary cache disk instead all of them. This closes [#7644](https://github.com/ClickHouse/ClickHouse/issues/7644). [#43194](https://github.com/ClickHouse/ClickHouse/pull/43194) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* Throttling algorithm changed to token bucket. [#42665](https://github.com/ClickHouse/ClickHouse/pull/42665) ([Sergei Trifonov](https://github.com/serxa)).
|
||||
* Mask passwords and secret keys both in `system.query_log` and `/var/log/clickhouse-server/*.log` and also in error messages. [#42484](https://github.com/ClickHouse/ClickHouse/pull/42484) ([Vitaly Baranov](https://github.com/vitlibar)).
|
||||
* Remove covered parts for fetched part (to avoid possible replication delay grows). [#39737](https://github.com/ClickHouse/ClickHouse/pull/39737) ([Azat Khuzhin](https://github.com/azat)).
|
||||
* If `/dev/tty` is available, the progress in clickhouse-client and clickhouse-local will be rendered directly to the terminal, without writing to STDERR. It allows getting progress even if STDERR is redirected to a file, and the file will not be polluted by terminal escape sequences. The progress can be disabled by `--progress false`. This closes [#32238](https://github.com/ClickHouse/ClickHouse/issues/32238). [#42003](https://github.com/ClickHouse/ClickHouse/pull/42003) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* Add support for `FixedString` input to base64 coding functions. [#42285](https://github.com/ClickHouse/ClickHouse/pull/42285) ([ltrk2](https://github.com/ltrk2)).
|
||||
* Add columns `bytes_on_disk` and `path` to `system.detached_parts`. Closes [#42264](https://github.com/ClickHouse/ClickHouse/issues/42264). [#42303](https://github.com/ClickHouse/ClickHouse/pull/42303) ([chen](https://github.com/xiedeyantu)).
|
||||
* Improve using structure from insertion table in table functions, now setting `use_structure_from_insertion_table_in_table_functions` has new possible value - `2` that means that ClickHouse will try to determine if we can use structure from insertion table or not automatically. Closes [#40028](https://github.com/ClickHouse/ClickHouse/issues/40028). [#42320](https://github.com/ClickHouse/ClickHouse/pull/42320) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||
* Fix no progress indication on INSERT FROM INFILE. Closes [#42548](https://github.com/ClickHouse/ClickHouse/issues/42548). [#42634](https://github.com/ClickHouse/ClickHouse/pull/42634) ([chen](https://github.com/xiedeyantu)).
|
||||
* Refactor function `tokens` to enable max tokens returned for related functions (disabled by default). [#42673](https://github.com/ClickHouse/ClickHouse/pull/42673) ([李扬](https://github.com/taiyang-li)).
|
||||
* Allow to use `Date32` arguments for `formatDateTime` and `FROM_UNIXTIME` functions. [#42737](https://github.com/ClickHouse/ClickHouse/pull/42737) ([Roman Vasin](https://github.com/rvasin)).
|
||||
* Update tzdata to 2022f. Mexico will no longer observe DST except near the US border: https://www.timeanddate.com/news/time/mexico-abolishes-dst-2022.html. Chihuahua moves to year-round UTC-6 on 2022-10-30. Fiji no longer observes DST. See https://github.com/google/cctz/pull/235 and https://bugs.launchpad.net/ubuntu/+source/tzdata/+bug/1995209. [#42796](https://github.com/ClickHouse/ClickHouse/pull/42796) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* Add `FailedAsyncInsertQuery` event metric for async inserts. [#42814](https://github.com/ClickHouse/ClickHouse/pull/42814) ([Krzysztof Góralski](https://github.com/kgoralski)).
|
||||
* Implement `read-in-order` optimization on top of query plan. It is enabled by default. Set `query_plan_read_in_order = 0` to use previous AST-based version. [#42829](https://github.com/ClickHouse/ClickHouse/pull/42829) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
|
||||
* Increase the size of upload part exponentially for backup to S3 to avoid errors about max 10 000 parts limit of the multipart upload to s3. [#42833](https://github.com/ClickHouse/ClickHouse/pull/42833) ([Vitaly Baranov](https://github.com/vitlibar)).
|
||||
* When the merge task is continuously busy and the disk space is insufficient, the completely expired parts cannot be selected and dropped, resulting in insufficient disk space. My idea is that when the entire Part expires, there is no need for additional disk space to guarantee, ensure the normal execution of TTL. [#42869](https://github.com/ClickHouse/ClickHouse/pull/42869) ([zhongyuankai](https://github.com/zhongyuankai)).
|
||||
* Add `oss` function and `OSS` table engine (this is convenient for users). oss is fully compatible with s3. [#43155](https://github.com/ClickHouse/ClickHouse/pull/43155) ([zzsmdfj](https://github.com/zzsmdfj)).
|
||||
* Improve error reporting in the collection of OS-related info for the `system.asynchronous_metrics` table. [#43192](https://github.com/ClickHouse/ClickHouse/pull/43192) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* Modify the `INFORMATION_SCHEMA` tables in a way so that ClickHouse can connect to itself using the MySQL compatibility protocol. Add columns instead of aliases (related to [#9769](https://github.com/ClickHouse/ClickHouse/issues/9769)). It will improve the compatibility with various MySQL clients. [#43198](https://github.com/ClickHouse/ClickHouse/pull/43198) ([Filatenkov Artur](https://github.com/FArthur-cmd)).
|
||||
* Add some functions for compatibility with PowerBI, when it connects using MySQL protocol [#42612](https://github.com/ClickHouse/ClickHouse/pull/42612) ([Filatenkov Artur](https://github.com/FArthur-cmd)).
|
||||
* Better usability for Dashboard on changes [#42872](https://github.com/ClickHouse/ClickHouse/pull/42872) ([Vladimir C](https://github.com/vdimir)).
|
||||
|
||||
#### Build/Testing/Packaging Improvement
|
||||
* Run SQLancer for each pull request and commit to master. [SQLancer](https://github.com/sqlancer/sqlancer) is an OpenSource fuzzer that focuses on automatic detection of logical bugs. [#42397](https://github.com/ClickHouse/ClickHouse/pull/42397) ([Ilya Yatsishin](https://github.com/qoega)).
|
||||
* Update to latest zlib-ng. [#42463](https://github.com/ClickHouse/ClickHouse/pull/42463) ([Boris Kuschel](https://github.com/bkuschel)).
|
||||
* Add support for testing ClickHouse server with Jepsen. By the way, we already have support for testing ClickHouse Keeper with Jepsen. This pull request extends it to Replicated tables. [#42619](https://github.com/ClickHouse/ClickHouse/pull/42619) ([Antonio Andelic](https://github.com/antonio2368)).
|
||||
* Use https://github.com/matus-chochlik/ctcache for clang-tidy results caching. [#42913](https://github.com/ClickHouse/ClickHouse/pull/42913) ([Mikhail f. Shiryaev](https://github.com/Felixoid)).
|
||||
* Before the fix, the user-defined config was preserved by RPM in `$file.rpmsave`. The PR fixes it and won't replace the user's files from packages. [#42936](https://github.com/ClickHouse/ClickHouse/pull/42936) ([Mikhail f. Shiryaev](https://github.com/Felixoid)).
|
||||
* Remove some libraries from Ubuntu Docker image. [#42622](https://github.com/ClickHouse/ClickHouse/pull/42622) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
|
||||
#### Bug Fix (user-visible misbehavior in official stable or prestable release)
|
||||
|
||||
* Updated normaliser to clone the alias ast. Resolves [#42452](https://github.com/ClickHouse/ClickHouse/issues/42452) Implementation: * Updated QueryNormalizer to clone alias ast, when its replaced. Previously just assigning the same leads to exception in LogicalExpressinsOptimizer as it would be the same parent being inserted again. * This bug is not seen with new analyser (allow_experimental_analyzer), so no changes for it. I added a test for the same. [#42827](https://github.com/ClickHouse/ClickHouse/pull/42827) ([SmitaRKulkarni](https://github.com/SmitaRKulkarni)).
|
||||
* Fix race for backup of tables in `Lazy` databases. [#43104](https://github.com/ClickHouse/ClickHouse/pull/43104) ([Vitaly Baranov](https://github.com/vitlibar)).
|
||||
* Fix for `skip_unavailable_shards`: it did not work with the `s3Cluster` table function. [#43131](https://github.com/ClickHouse/ClickHouse/pull/43131) ([chen](https://github.com/xiedeyantu)).
|
||||
* Fix schema inference in `s3Cluster` and improvement in `hdfsCluster`. [#41979](https://github.com/ClickHouse/ClickHouse/pull/41979) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||
* Fix retries while reading from URL table engines / table function. (retriable errors could be retries more times than needed, non-retriable errors resulted in failed assertion in code). [#42224](https://github.com/ClickHouse/ClickHouse/pull/42224) ([Kseniia Sumarokova](https://github.com/kssenii)).
|
||||
* A segmentation fault related to DNS & c-ares has been reported and fixed. [#42234](https://github.com/ClickHouse/ClickHouse/pull/42234) ([Arthur Passos](https://github.com/arthurpassos)).
|
||||
* Fix `LOGICAL_ERROR` `Arguments of 'plus' have incorrect data types` which may happen in PK analysis (monotonicity check). Fix invalid PK analysis for monotonic binary functions with first constant argument. [#42410](https://github.com/ClickHouse/ClickHouse/pull/42410) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
|
||||
* Fix incorrect key analysis when key types cannot be inside Nullable. This fixes [#42456](https://github.com/ClickHouse/ClickHouse/issues/42456). [#42469](https://github.com/ClickHouse/ClickHouse/pull/42469) ([Amos Bird](https://github.com/amosbird)).
|
||||
* Fix typo in a setting name that led to bad usage of schema inference cache while using setting `input_format_csv_use_best_effort_in_schema_inference`. Closes [#41735](https://github.com/ClickHouse/ClickHouse/issues/41735). [#42536](https://github.com/ClickHouse/ClickHouse/pull/42536) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||
* Fix creating a Set with wrong header when data type is LowCardinality. Closes [#42460](https://github.com/ClickHouse/ClickHouse/issues/42460). [#42579](https://github.com/ClickHouse/ClickHouse/pull/42579) ([flynn](https://github.com/ucasfl)).
|
||||
* `(U)Int128` and `(U)Int256` values were correctly checked in `PREWHERE`. [#42605](https://github.com/ClickHouse/ClickHouse/pull/42605) ([Antonio Andelic](https://github.com/antonio2368)).
|
||||
* Fix a bug in functions parser that could have led to a segmentation fault. [#42724](https://github.com/ClickHouse/ClickHouse/pull/42724) ([Nikolay Degterinsky](https://github.com/evillique)).
|
||||
* Fix the locking in `truncate table`. [#42728](https://github.com/ClickHouse/ClickHouse/pull/42728) ([flynn](https://github.com/ucasfl)).
|
||||
* Fix possible crash in `web` disks when file does not exist (or `OPTIMIZE TABLE FINAL`, that also can got the same error eventually). [#42767](https://github.com/ClickHouse/ClickHouse/pull/42767) ([Azat Khuzhin](https://github.com/azat)).
|
||||
* Fix `auth_type` mapping in `system.session_log`, by including `SSL_CERTIFICATE` for the enum values. [#42782](https://github.com/ClickHouse/ClickHouse/pull/42782) ([Miel Donkers](https://github.com/mdonkers)).
|
||||
* Fix stack-use-after-return under ASAN build in the Create User query parser. [#42804](https://github.com/ClickHouse/ClickHouse/pull/42804) ([Nikolay Degterinsky](https://github.com/evillique)).
|
||||
* Fix `lowerUTF8`/`upperUTF8` in case of symbol was in between 16-byte boundary (very frequent case of you have strings > 16 bytes long). [#42812](https://github.com/ClickHouse/ClickHouse/pull/42812) ([Azat Khuzhin](https://github.com/azat)).
|
||||
* Additional bound check was added to LZ4 decompression routine to fix misbehaviour in case of malformed input. [#42868](https://github.com/ClickHouse/ClickHouse/pull/42868) ([Nikita Taranov](https://github.com/nickitat)).
|
||||
* Fix rare possible hang on query cancellation. [#42874](https://github.com/ClickHouse/ClickHouse/pull/42874) ([Azat Khuzhin](https://github.com/azat)).
|
||||
* Fix incorrect behavior with multiple disjuncts in hash join, close [#42832](https://github.com/ClickHouse/ClickHouse/issues/42832). [#42876](https://github.com/ClickHouse/ClickHouse/pull/42876) ([Vladimir C](https://github.com/vdimir)).
|
||||
* A null pointer will be generated when select if as from ‘three table join’ , For example, this SQL query: [#42883](https://github.com/ClickHouse/ClickHouse/pull/42883) ([zzsmdfj](https://github.com/zzsmdfj)).
|
||||
* Fix memory sanitizer report in Cluster Discovery, close [#42763](https://github.com/ClickHouse/ClickHouse/issues/42763). [#42905](https://github.com/ClickHouse/ClickHouse/pull/42905) ([Vladimir C](https://github.com/vdimir)).
|
||||
* Improve DateTime schema inference in case of empty string. [#42911](https://github.com/ClickHouse/ClickHouse/pull/42911) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||
* Fix rare NOT_FOUND_COLUMN_IN_BLOCK error when projection is possible to use but there is no projection available. This fixes [#42771](https://github.com/ClickHouse/ClickHouse/issues/42771) . The bug was introduced in https://github.com/ClickHouse/ClickHouse/pull/25563. [#42938](https://github.com/ClickHouse/ClickHouse/pull/42938) ([Amos Bird](https://github.com/amosbird)).
|
||||
* Fix ATTACH TABLE in `PostgreSQL` database engine if the table contains DATETIME data type. Closes [#42817](https://github.com/ClickHouse/ClickHouse/issues/42817). [#42960](https://github.com/ClickHouse/ClickHouse/pull/42960) ([Kseniia Sumarokova](https://github.com/kssenii)).
|
||||
* Fix lambda parsing. Closes [#41848](https://github.com/ClickHouse/ClickHouse/issues/41848). [#42979](https://github.com/ClickHouse/ClickHouse/pull/42979) ([Nikolay Degterinsky](https://github.com/evillique)).
|
||||
* Fix incorrect key analysis when nullable keys appear in the middle of a hyperrectangle. This fixes [#43111](https://github.com/ClickHouse/ClickHouse/issues/43111) . [#43133](https://github.com/ClickHouse/ClickHouse/pull/43133) ([Amos Bird](https://github.com/amosbird)).
|
||||
* Fix several buffer over-reads in deserialization of carefully crafted aggregate function states. [#43159](https://github.com/ClickHouse/ClickHouse/pull/43159) ([Raúl Marín](https://github.com/Algunenano)).
|
||||
* Fix function `if` in case of NULL and const Nullable arguments. Closes [#43069](https://github.com/ClickHouse/ClickHouse/issues/43069). [#43178](https://github.com/ClickHouse/ClickHouse/pull/43178) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||
* Fix decimal math overflow in parsing DateTime with the 'best effort' algorithm. Closes [#43061](https://github.com/ClickHouse/ClickHouse/issues/43061). [#43180](https://github.com/ClickHouse/ClickHouse/pull/43180) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||
* The `indent` field produced by the `git-import` tool was miscalculated. See https://clickhouse.com/docs/en/getting-started/example-datasets/github/. [#43191](https://github.com/ClickHouse/ClickHouse/pull/43191) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* Fixed unexpected behaviour of `Interval` types with subquery and casting. [#43193](https://github.com/ClickHouse/ClickHouse/pull/43193) ([jh0x](https://github.com/jh0x)).
|
||||
|
||||
### <a id="2210"></a> ClickHouse release 22.10, 2022-10-26
|
||||
|
||||
#### Backward Incompatible Change
|
||||
@ -570,7 +674,7 @@
|
||||
* Support SQL standard CREATE INDEX and DROP INDEX syntax. [#35166](https://github.com/ClickHouse/ClickHouse/pull/35166) ([Jianmei Zhang](https://github.com/zhangjmruc)).
|
||||
* Send profile events for INSERT queries (previously only SELECT was supported). [#37391](https://github.com/ClickHouse/ClickHouse/pull/37391) ([Azat Khuzhin](https://github.com/azat)).
|
||||
* Implement in order aggregation (`optimize_aggregation_in_order`) for fully materialized projections. [#37469](https://github.com/ClickHouse/ClickHouse/pull/37469) ([Azat Khuzhin](https://github.com/azat)).
|
||||
* Remove subprocess run for kerberos initialization. Added new integration test. Closes [#27651](https://github.com/ClickHouse/ClickHouse/issues/27651). [#38105](https://github.com/ClickHouse/ClickHouse/pull/38105) ([Roman Vasin](https://github.com/rvasin)).
|
||||
* Remove subprocess run for Kerberos initialization. Added new integration test. Closes [#27651](https://github.com/ClickHouse/ClickHouse/issues/27651). [#38105](https://github.com/ClickHouse/ClickHouse/pull/38105) ([Roman Vasin](https://github.com/rvasin)).
|
||||
* * Add setting `multiple_joins_try_to_keep_original_names` to not rewrite identifier name on multiple JOINs rewrite, close [#34697](https://github.com/ClickHouse/ClickHouse/issues/34697). [#38149](https://github.com/ClickHouse/ClickHouse/pull/38149) ([Vladimir C](https://github.com/vdimir)).
|
||||
* Improved trace-visualizer UX. [#38169](https://github.com/ClickHouse/ClickHouse/pull/38169) ([Sergei Trifonov](https://github.com/serxa)).
|
||||
* Enable stack trace collection and query profiler for AArch64. [#38181](https://github.com/ClickHouse/ClickHouse/pull/38181) ([Maksim Kita](https://github.com/kitaisreal)).
|
||||
@ -850,8 +954,8 @@
|
||||
|
||||
#### Upgrade Notes
|
||||
|
||||
* Now, background merges, mutations and `OPTIMIZE` will not increment `SelectedRows` and `SelectedBytes` metrics. They (still) will increment `MergedRows` and `MergedUncompressedBytes` as it was before. This only affects the metric values, and makes them better. This change does not introduce any incompatibility, but you may wonder about the changes of metrics, so we put in this category. [#37040](https://github.com/ClickHouse/ClickHouse/pull/37040) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
|
||||
* Updated the BoringSSL module to the official FIPS compliant version. This makes ClickHouse FIPS compliant. [#35914](https://github.com/ClickHouse/ClickHouse/pull/35914) ([Meena-Renganathan](https://github.com/Meena-Renganathan)). The ciphers `aes-192-cfb128` and `aes-256-cfb128` were removed, because they are not included in the FIPS certified version of BoringSSL.
|
||||
* Now, background merges, mutations, and `OPTIMIZE` will not increment `SelectedRows` and `SelectedBytes` metrics. They (still) will increment `MergedRows` and `MergedUncompressedBytes` as it was before. This only affects the metric values and makes them better. This change does not introduce any incompatibility, but you may wonder about the changes to the metrics, so we put in this category. [#37040](https://github.com/ClickHouse/ClickHouse/pull/37040) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
|
||||
* Updated the BoringSSL module to the official FIPS compliant version. This makes ClickHouse FIPS compliant in this area. [#35914](https://github.com/ClickHouse/ClickHouse/pull/35914) ([Meena-Renganathan](https://github.com/Meena-Renganathan)). The ciphers `aes-192-cfb128` and `aes-256-cfb128` were removed, because they are not included in the FIPS certified version of BoringSSL.
|
||||
* `max_memory_usage` setting is removed from the default user profile in `users.xml`. This enables flexible memory limits for queries instead of the old rigid limit of 10 GB.
|
||||
* Disable `log_query_threads` setting by default. It controls the logging of statistics about every thread participating in query execution. After supporting asynchronous reads, the total number of distinct thread ids became too large, and logging into the `query_thread_log` has become too heavy. [#37077](https://github.com/ClickHouse/ClickHouse/pull/37077) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* Remove function `groupArraySorted` which has a bug. [#36822](https://github.com/ClickHouse/ClickHouse/pull/36822) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
|
@ -2,11 +2,11 @@
|
||||
|
||||
# NOTE: has nothing common with DBMS_TCP_PROTOCOL_VERSION,
|
||||
# only DBMS_TCP_PROTOCOL_VERSION should be incremented on protocol changes.
|
||||
SET(VERSION_REVISION 54468)
|
||||
SET(VERSION_REVISION 54469)
|
||||
SET(VERSION_MAJOR 22)
|
||||
SET(VERSION_MINOR 11)
|
||||
SET(VERSION_MINOR 12)
|
||||
SET(VERSION_PATCH 1)
|
||||
SET(VERSION_GITHASH 98ab5a3c189232ea2a3dddb9d2be7196ae8b3434)
|
||||
SET(VERSION_DESCRIBE v22.11.1.1-testing)
|
||||
SET(VERSION_STRING 22.11.1.1)
|
||||
SET(VERSION_GITHASH 0d211ed19849fe44b0e43fdebe2c15d76d560a77)
|
||||
SET(VERSION_DESCRIBE v22.12.1.1-testing)
|
||||
SET(VERSION_STRING 22.12.1.1)
|
||||
# end of autochange
|
||||
|
@ -16,7 +16,9 @@ endmacro()
|
||||
|
||||
if (SANITIZE)
|
||||
if (SANITIZE STREQUAL "address")
|
||||
set (ASAN_FLAGS "-fsanitize=address -fsanitize-address-use-after-scope")
|
||||
# LLVM-15 has a bug in Address Sanitizer, preventing the usage of 'sanitize-address-use-after-scope',
|
||||
# see https://github.com/llvm/llvm-project/issues/58633
|
||||
set (ASAN_FLAGS "-fsanitize=address -fno-sanitize-address-use-after-scope")
|
||||
set (CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} ${SAN_FLAGS} ${ASAN_FLAGS}")
|
||||
set (CMAKE_C_FLAGS "${CMAKE_C_FLAGS} ${SAN_FLAGS} ${ASAN_FLAGS}")
|
||||
|
||||
|
@ -25,6 +25,7 @@ done
|
||||
sed -i '/onBrokenMarkdownLinks:/ s/ignore/error/g' docusaurus.config.js
|
||||
|
||||
if [[ $# -lt 1 ]] || [[ "$1" == "--"* ]]; then
|
||||
export CI=true
|
||||
exec yarn build "$@"
|
||||
fi
|
||||
|
||||
|
@ -80,6 +80,16 @@ RUN arch=${TARGETARCH:-amd64} \
|
||||
&& mkdir -p /var/lib/clickhouse /var/log/clickhouse-server /etc/clickhouse-server /etc/clickhouse-client \
|
||||
&& chmod ugo+Xrw -R /var/lib/clickhouse /var/log/clickhouse-server /etc/clickhouse-server /etc/clickhouse-client
|
||||
|
||||
# Remove as much of Ubuntu as possible.
|
||||
# ClickHouse does not need Ubuntu. It can run on top of Linux kernel without any OS distribution.
|
||||
# ClickHouse does not need Docker at all. ClickHouse is above all that.
|
||||
# It does not care about Ubuntu, Docker, or other cruft and you should neither.
|
||||
# The fact that this Docker image is based on Ubuntu is just a misconception.
|
||||
# Some vulnerability scanners are arguing about Ubuntu, which is not relevant to ClickHouse at all.
|
||||
# ClickHouse does not care when you report false vulnerabilities by running some Docker scanners.
|
||||
|
||||
RUN apt-get remove --purge -y libksba8 && apt-get autoremove -y
|
||||
|
||||
# we need to allow "others" access to clickhouse folder, because docker container
|
||||
# can be started with arbitrary uid (openshift usecase)
|
||||
|
||||
|
@ -178,7 +178,7 @@ function fuzz
|
||||
# interferes with gdb
|
||||
export CLICKHOUSE_WATCHDOG_ENABLE=0
|
||||
# NOTE: we use process substitution here to preserve keep $! as a pid of clickhouse-server
|
||||
clickhouse-server --config-file db/config.xml --pid-file /var/run/clickhouse-server/clickhouse-server.pid -- --path db > >(tail -100000 > server.log) 2>&1 &
|
||||
clickhouse-server --config-file db/config.xml --pid-file /var/run/clickhouse-server/clickhouse-server.pid -- --path db 2>&1 | pigz > server.log.gz &
|
||||
server_pid=$!
|
||||
|
||||
kill -0 $server_pid
|
||||
@ -297,7 +297,7 @@ quit
|
||||
# The server has died.
|
||||
task_exit_code=210
|
||||
echo "failure" > status.txt
|
||||
if ! grep --text -ao "Received signal.*\|Logical error.*\|Assertion.*failed\|Failed assertion.*\|.*runtime error: .*\|.*is located.*\|SUMMARY: AddressSanitizer:.*\|SUMMARY: MemorySanitizer:.*\|SUMMARY: ThreadSanitizer:.*\|.*_LIBCPP_ASSERT.*" server.log > description.txt
|
||||
if ! zgrep --text -ao "Received signal.*\|Logical error.*\|Assertion.*failed\|Failed assertion.*\|.*runtime error: .*\|.*is located.*\|SUMMARY: AddressSanitizer:.*\|SUMMARY: MemorySanitizer:.*\|SUMMARY: ThreadSanitizer:.*\|.*_LIBCPP_ASSERT.*" server.log.gz > description.txt
|
||||
then
|
||||
echo "Lost connection to server. See the logs." > description.txt
|
||||
fi
|
||||
@ -391,8 +391,9 @@ th { cursor: pointer; }
|
||||
|
||||
<h1>AST Fuzzer for PR #${PR_TO_TEST} @ ${SHA_TO_TEST}</h1>
|
||||
<p class="links">
|
||||
<a href="runlog.log">runlog.log</a>
|
||||
<a href="fuzzer.log">fuzzer.log</a>
|
||||
<a href="server.log">server.log</a>
|
||||
<a href="server.log.gz">server.log.gz</a>
|
||||
<a href="main.log">main.log</a>
|
||||
${CORE_LINK}
|
||||
</p>
|
||||
|
@ -15,8 +15,8 @@ if [ -z "$CLICKHOUSE_REPO_PATH" ]; then
|
||||
ls -lath ||:
|
||||
fi
|
||||
|
||||
cd "$CLICKHOUSE_REPO_PATH/tests/jepsen.clickhouse-keeper"
|
||||
cd "$CLICKHOUSE_REPO_PATH/tests/jepsen.clickhouse"
|
||||
|
||||
(lein run test-all --nodes-file "$NODES_FILE_PATH" --username "$NODES_USERNAME" --logging-json --password "$NODES_PASSWORD" --time-limit "$TIME_LIMIT" --concurrency 50 -r 50 --snapshot-distance 100 --stale-log-gap 100 --reserved-log-items 10 --lightweight-run --clickhouse-source "$CLICKHOUSE_PACKAGE" -q --test-count "$TESTS_TO_RUN" || true) | tee "$TEST_OUTPUT/jepsen_run_all_tests.log"
|
||||
(lein run keeper test-all --nodes-file "$NODES_FILE_PATH" --username "$NODES_USERNAME" --logging-json --password "$NODES_PASSWORD" --time-limit "$TIME_LIMIT" --concurrency 50 -r 50 --snapshot-distance 100 --stale-log-gap 100 --reserved-log-items 10 --lightweight-run --clickhouse-source "$CLICKHOUSE_PACKAGE" -q --test-count "$TESTS_TO_RUN" || true) | tee "$TEST_OUTPUT/jepsen_run_all_tests.log"
|
||||
|
||||
mv store "$TEST_OUTPUT/"
|
||||
|
43
docker/test/server-jepsen/Dockerfile
Normal file
43
docker/test/server-jepsen/Dockerfile
Normal file
@ -0,0 +1,43 @@
|
||||
# rebuild in #33610
|
||||
# docker build -t clickhouse/server-jepsen-test .
|
||||
ARG FROM_TAG=latest
|
||||
FROM clickhouse/test-base:$FROM_TAG
|
||||
|
||||
ENV DEBIAN_FRONTEND=noninteractive
|
||||
ENV CLOJURE_VERSION=1.10.3.814
|
||||
|
||||
# arguments
|
||||
ENV PR_TO_TEST=""
|
||||
ENV SHA_TO_TEST=""
|
||||
|
||||
ENV NODES_USERNAME="root"
|
||||
ENV NODES_PASSWORD=""
|
||||
ENV TESTS_TO_RUN="8"
|
||||
ENV TIME_LIMIT="30"
|
||||
|
||||
ENV KEEPER_NODE=""
|
||||
|
||||
|
||||
# volumes
|
||||
ENV NODES_FILE_PATH="/nodes.txt"
|
||||
ENV TEST_OUTPUT="/test_output"
|
||||
|
||||
RUN mkdir "/root/.ssh"
|
||||
RUN touch "/root/.ssh/known_hosts"
|
||||
|
||||
# install java
|
||||
RUN apt-get update && apt-get install default-jre default-jdk libjna-java libjna-jni ssh gnuplot graphviz --yes --no-install-recommends
|
||||
|
||||
# install clojure
|
||||
RUN curl -O "https://download.clojure.org/install/linux-install-${CLOJURE_VERSION}.sh" && \
|
||||
chmod +x "linux-install-${CLOJURE_VERSION}.sh" && \
|
||||
bash "./linux-install-${CLOJURE_VERSION}.sh"
|
||||
|
||||
# install leiningen
|
||||
RUN curl -O "https://raw.githubusercontent.com/technomancy/leiningen/stable/bin/lein" && \
|
||||
chmod +x ./lein && \
|
||||
mv ./lein /usr/bin
|
||||
|
||||
COPY run.sh /
|
||||
|
||||
CMD ["/bin/bash", "/run.sh"]
|
22
docker/test/server-jepsen/run.sh
Normal file
22
docker/test/server-jepsen/run.sh
Normal file
@ -0,0 +1,22 @@
|
||||
#!/usr/bin/env bash
|
||||
set -euo pipefail
|
||||
|
||||
|
||||
CLICKHOUSE_PACKAGE=${CLICKHOUSE_PACKAGE:="https://clickhouse-builds.s3.amazonaws.com/$PR_TO_TEST/$SHA_TO_TEST/clickhouse_build_check/clang-15_relwithdebuginfo_none_unsplitted_disable_False_binary/clickhouse"}
|
||||
CLICKHOUSE_REPO_PATH=${CLICKHOUSE_REPO_PATH:=""}
|
||||
|
||||
|
||||
if [ -z "$CLICKHOUSE_REPO_PATH" ]; then
|
||||
CLICKHOUSE_REPO_PATH=ch
|
||||
rm -rf ch ||:
|
||||
mkdir ch ||:
|
||||
wget -nv -nd -c "https://clickhouse-test-reports.s3.amazonaws.com/$PR_TO_TEST/$SHA_TO_TEST/repo/clickhouse_no_subs.tar.gz"
|
||||
tar -C ch --strip-components=1 -xf clickhouse_no_subs.tar.gz
|
||||
ls -lath ||:
|
||||
fi
|
||||
|
||||
cd "$CLICKHOUSE_REPO_PATH/tests/jepsen.clickhouse"
|
||||
|
||||
(lein run server test-all --keeper "$KEEPER_NODE" --nodes-file "$NODES_FILE_PATH" --username "$NODES_USERNAME" --logging-json --password "$NODES_PASSWORD" --time-limit "$TIME_LIMIT" --concurrency 50 -r 50 --clickhouse-source "$CLICKHOUSE_PACKAGE" --test-count "$TESTS_TO_RUN" || true) | tee "$TEST_OUTPUT/jepsen_run_all_tests.log"
|
||||
|
||||
mv store "$TEST_OUTPUT/"
|
@ -388,6 +388,9 @@ else
|
||||
rm -f /etc/clickhouse-server/config.d/storage_conf.xml ||:
|
||||
rm -f /etc/clickhouse-server/config.d/azure_storage_conf.xml ||:
|
||||
|
||||
# it uses recently introduced settings which previous versions may not have
|
||||
rm -f /etc/clickhouse-server/users.d/insert_keeper_retries.xml ||:
|
||||
|
||||
start
|
||||
|
||||
clickhouse-client --query="SELECT 'Server version: ', version()"
|
||||
@ -481,6 +484,7 @@ else
|
||||
-e "The set of parts restored in place of" \
|
||||
-e "(ReplicatedMergeTreeAttachThread): Initialization failed. Error" \
|
||||
-e "Code: 269. DB::Exception: Destination table is myself" \
|
||||
-e "Coordination::Exception: Connection loss" \
|
||||
/var/log/clickhouse-server/clickhouse-server.backward.clean.log | zgrep -Fa "<Error>" > /test_output/bc_check_error_messages.txt \
|
||||
&& echo -e 'Backward compatibility check: Error message in clickhouse-server.log (see bc_check_error_messages.txt)\tFAIL' >> /test_output/test_results.tsv \
|
||||
|| echo -e 'Backward compatibility check: No Error messages in clickhouse-server.log\tOK' >> /test_output/test_results.tsv
|
||||
|
@ -6,7 +6,7 @@ sidebar_label: Integrations
|
||||
|
||||
# Table Engines for Integrations
|
||||
|
||||
ClickHouse provides various means for integrating with external systems, including table engines. Like with all other table engines, the configuration is done using `CREATE TABLE` or `ALTER TABLE` queries. Then from a user perspective, the configured integration looks like a normal table, but queries to it are proxied to the external system. This transparent querying is one of the key advantages of this approach over alternative integration methods, like external dictionaries or table functions, which require to use custom query methods on each use.
|
||||
ClickHouse provides various means for integrating with external systems, including table engines. Like with all other table engines, the configuration is done using `CREATE TABLE` or `ALTER TABLE` queries. Then from a user perspective, the configured integration looks like a normal table, but queries to it are proxied to the external system. This transparent querying is one of the key advantages of this approach over alternative integration methods, like dictionaries or table functions, which require to use custom query methods on each use.
|
||||
|
||||
List of supported integrations:
|
||||
|
||||
|
@ -180,6 +180,6 @@ Default value: `300`.
|
||||
## See Also {#see-also}
|
||||
|
||||
- [The mysql table function](../../../sql-reference/table-functions/mysql.md)
|
||||
- [Using MySQL as a source of external dictionary](../../../sql-reference/dictionaries/external-dictionaries/external-dicts-dict-sources.md#dicts-external_dicts_dict_sources-mysql)
|
||||
- [Using MySQL as a dictionary source](../../../sql-reference/dictionaries/external-dictionaries/external-dicts-dict-sources.md#dicts-external_dicts_dict_sources-mysql)
|
||||
|
||||
[Original article](https://clickhouse.com/docs/en/engines/table-engines/integrations/mysql/) <!--hide-->
|
||||
|
@ -126,7 +126,7 @@ SELECT * FROM odbc_t
|
||||
|
||||
## See Also {#see-also}
|
||||
|
||||
- [ODBC external dictionaries](../../../sql-reference/dictionaries/external-dictionaries/external-dicts-dict-sources.md#dicts-external_dicts_dict_sources-odbc)
|
||||
- [ODBC dictionaries](../../../sql-reference/dictionaries/external-dictionaries/external-dicts-dict-sources.md#dicts-external_dicts_dict_sources-odbc)
|
||||
- [ODBC table function](../../../sql-reference/table-functions/odbc.md)
|
||||
|
||||
[Original article](https://clickhouse.com/docs/en/engines/table-engines/integrations/odbc/) <!--hide-->
|
||||
|
@ -174,6 +174,6 @@ CREATE TABLE pg_table_schema_with_dots (a UInt32)
|
||||
**See Also**
|
||||
|
||||
- [The `postgresql` table function](../../../sql-reference/table-functions/postgresql.md)
|
||||
- [Using PostgreSQL as a source of external dictionary](../../../sql-reference/dictionaries/external-dictionaries/external-dicts-dict-sources.md#dicts-external_dicts_dict_sources-postgresql)
|
||||
- [Using PostgreSQL as a dictionary source](../../../sql-reference/dictionaries/external-dictionaries/external-dicts-dict-sources.md#dicts-external_dicts_dict_sources-postgresql)
|
||||
|
||||
[Original article](https://clickhouse.com/docs/en/engines/table-engines/integrations/postgresql/) <!--hide-->
|
||||
|
@ -28,7 +28,7 @@ Engines:
|
||||
|
||||
During `INSERT` queries, the table is locked, and other queries for reading and writing data both wait for the table to unlock. If there are no data writing queries, any number of data reading queries can be performed concurrently.
|
||||
|
||||
- Do not support [mutations](/docs/en/sql-reference/statements/alter/index.md/#alter-mutations).
|
||||
- Do not support [mutations](/docs/en/sql-reference/statements/alter/index.md#alter-mutations).
|
||||
|
||||
- Do not support indexes.
|
||||
|
||||
|
@ -537,7 +537,7 @@ TTL time_column
|
||||
TTL time_column + interval
|
||||
```
|
||||
|
||||
To define `interval`, use [time interval](/docs/en/sql-reference/operators/index.md/#operators-datetime) operators, for example:
|
||||
To define `interval`, use [time interval](/docs/en/sql-reference/operators/index.md#operators-datetime) operators, for example:
|
||||
|
||||
``` sql
|
||||
TTL date_time + INTERVAL 1 MONTH
|
||||
@ -860,7 +860,7 @@ The number of threads performing background moves of data parts can be changed b
|
||||
In the case of `MergeTree` tables, data is getting to disk in different ways:
|
||||
|
||||
- As a result of an insert (`INSERT` query).
|
||||
- During background merges and [mutations](/docs/en/sql-reference/statements/alter/index.md/#alter-mutations).
|
||||
- During background merges and [mutations](/docs/en/sql-reference/statements/alter/index.md#alter-mutations).
|
||||
- When downloading from another replica.
|
||||
- As a result of partition freezing [ALTER TABLE … FREEZE PARTITION](/docs/en/sql-reference/statements/alter/partition.md/#alter_freeze-partition).
|
||||
|
||||
|
@ -20,7 +20,7 @@ Replication works at the level of an individual table, not the entire server. A
|
||||
|
||||
Replication does not depend on sharding. Each shard has its own independent replication.
|
||||
|
||||
Compressed data for `INSERT` and `ALTER` queries is replicated (for more information, see the documentation for [ALTER](/docs/en/sql-reference/statements/alter/index.md/#query_language_queries_alter)).
|
||||
Compressed data for `INSERT` and `ALTER` queries is replicated (for more information, see the documentation for [ALTER](/docs/en/sql-reference/statements/alter/index.md#query_language_queries_alter)).
|
||||
|
||||
`CREATE`, `DROP`, `ATTACH`, `DETACH` and `RENAME` queries are executed on a single server and are not replicated:
|
||||
|
||||
|
@ -59,7 +59,7 @@ Main use-cases for `Join`-engine tables are following:
|
||||
|
||||
### Deleting Data {#deleting-data}
|
||||
|
||||
`ALTER DELETE` queries for `Join`-engine tables are implemented as [mutations](/docs/en/sql-reference/statements/alter/index.md/#mutations). `DELETE` mutation reads filtered data and overwrites data of memory and disk.
|
||||
`ALTER DELETE` queries for `Join`-engine tables are implemented as [mutations](/docs/en/sql-reference/statements/alter/index.md#mutations). `DELETE` mutation reads filtered data and overwrites data of memory and disk.
|
||||
|
||||
### Limitations and Settings {#join-limitations-and-settings}
|
||||
|
||||
|
@ -163,7 +163,7 @@ SELECT mcc, count() FROM cell_towers GROUP BY mcc ORDER BY count() DESC LIMIT 10
|
||||
|
||||
Based on the above query and the [MCC list](https://en.wikipedia.org/wiki/Mobile_country_code), the countries with the most cell towers are: the USA, Germany, and Russia.
|
||||
|
||||
You may want to create an [External Dictionary](../../sql-reference/dictionaries/external-dictionaries/external-dicts.md) in ClickHouse to decode these values.
|
||||
You may want to create a [Dictionary](../../sql-reference/dictionaries/external-dictionaries/external-dicts.md) in ClickHouse to decode these values.
|
||||
|
||||
## Use case: Incorporate geo data {#use-case}
|
||||
|
||||
|
2499
docs/en/getting-started/example-datasets/github.md
Normal file
2499
docs/en/getting-started/example-datasets/github.md
Normal file
File diff suppressed because it is too large
Load Diff
Binary file not shown.
After Width: | Height: | Size: 277 KiB |
Binary file not shown.
After Width: | Height: | Size: 315 KiB |
Binary file not shown.
After Width: | Height: | Size: 246 KiB |
Binary file not shown.
After Width: | Height: | Size: 69 KiB |
@ -5,7 +5,7 @@ sidebar_label: Input and Output Formats
|
||||
title: Formats for Input and Output Data
|
||||
---
|
||||
|
||||
ClickHouse can accept and return data in various formats. A format supported for input can be used to parse the data provided to `INSERT`s, to perform `SELECT`s from a file-backed table such as File, URL or HDFS, or to read an external dictionary. A format supported for output can be used to arrange the
|
||||
ClickHouse can accept and return data in various formats. A format supported for input can be used to parse the data provided to `INSERT`s, to perform `SELECT`s from a file-backed table such as File, URL or HDFS, or to read a dictionary. A format supported for output can be used to arrange the
|
||||
results of a `SELECT`, and to perform `INSERT`s into a file-backed table.
|
||||
|
||||
The supported formats are:
|
||||
|
@ -130,7 +130,7 @@ SHOW TABLES FROM mydatabase;
|
||||
└────────┘
|
||||
```
|
||||
|
||||
### Example of using named collections with an external dictionary with source MySQL
|
||||
### Example of using named collections with a dictionary with source MySQL
|
||||
|
||||
```sql
|
||||
CREATE DICTIONARY dict (A Int64, B String)
|
||||
@ -213,7 +213,7 @@ SHOW TABLES FROM mydatabase
|
||||
└──────┘
|
||||
```
|
||||
|
||||
### Example of using named collections with an external dictionary with source POSTGRESQL
|
||||
### Example of using named collections with a dictionary with source POSTGRESQL
|
||||
|
||||
```sql
|
||||
CREATE DICTIONARY dict (a Int64, b String)
|
||||
@ -270,7 +270,7 @@ SELECT * FROM remote(remote1, database = default, table = test);
|
||||
└───┴───┘
|
||||
```
|
||||
|
||||
### Example of using named collections with an external dictionary with source ClickHouse
|
||||
### Example of using named collections with a dictionary with source ClickHouse
|
||||
|
||||
```sql
|
||||
CREATE DICTIONARY dict(a Int64, b String)
|
||||
|
@ -268,14 +268,14 @@ The path to the table in ZooKeeper.
|
||||
|
||||
## dictionaries_config {#server_configuration_parameters-dictionaries_config}
|
||||
|
||||
The path to the config file for external dictionaries.
|
||||
The path to the config file for dictionaries.
|
||||
|
||||
Path:
|
||||
|
||||
- Specify the absolute path or the path relative to the server config file.
|
||||
- The path can contain wildcards \* and ?.
|
||||
|
||||
See also “[External dictionaries](../../sql-reference/dictionaries/external-dictionaries/external-dicts.md)”.
|
||||
See also “[Dictionaries](../../sql-reference/dictionaries/external-dictionaries/external-dicts.md)”.
|
||||
|
||||
**Example**
|
||||
|
||||
|
@ -26,7 +26,7 @@ Ways to configure settings, in order of priority:
|
||||
|
||||
- When starting the ClickHouse console client in non-interactive mode, set the startup parameter `--setting=value`.
|
||||
- When using the HTTP API, pass CGI parameters (`URL?setting_1=value&setting_2=value...`).
|
||||
- Make settings in the [SETTINGS](../../sql-reference/statements/select/index.md#settings-in-select) clause of the SELECT query. The setting value is applied only to that query and is reset to default or previous value after the query is executed.
|
||||
- Make settings in the [SETTINGS](../../sql-reference/statements/select/index.md#settings-in-select-query) clause of the SELECT query. The setting value is applied only to that query and is reset to default or previous value after the query is executed.
|
||||
|
||||
Settings that can only be made in the server config file are not covered in this section.
|
||||
|
||||
|
@ -16,44 +16,54 @@ Queries in ClickHouse can be divided into several types:
|
||||
|
||||
The following settings regulate user permissions by the type of query:
|
||||
|
||||
- [readonly](#settings_readonly) — Restricts permissions for all types of queries except DDL queries.
|
||||
- [allow_ddl](#settings_allow_ddl) — Restricts permissions for DDL queries.
|
||||
## readonly
|
||||
Restricts permissions for read data, write data, and change settings queries.
|
||||
|
||||
`KILL QUERY` can be performed with any settings.
|
||||
When set to 1, allows:
|
||||
|
||||
## readonly {#settings_readonly}
|
||||
- All types of read queries (like SELECT and equivalent queries).
|
||||
- Queries that modify only session context (like USE).
|
||||
|
||||
Restricts permissions for reading data, write data and change settings queries.
|
||||
When set to 2, allows the above plus:
|
||||
- SET and CREATE TEMPORARY TABLE
|
||||
|
||||
See how the queries are divided into types [above](#permissions_for_queries).
|
||||
:::tip
|
||||
Queries like EXISTS, DESCRIBE, EXPLAIN, SHOW PROCESSLIST, etc are equivalent to SELECT, because they just do select from system tables.
|
||||
:::
|
||||
|
||||
Possible values:
|
||||
|
||||
- 0 — All queries are allowed.
|
||||
- 1 — Only read data queries are allowed.
|
||||
- 2 — Read data and change settings queries are allowed.
|
||||
- 0 — Read, Write, and Change settings queries are allowed.
|
||||
- 1 — Only Read data queries are allowed.
|
||||
- 2 — Read data and Change settings queries are allowed.
|
||||
|
||||
Default value: 0
|
||||
|
||||
:::note
|
||||
After setting `readonly = 1`, the user can’t change `readonly` and `allow_ddl` settings in the current session.
|
||||
|
||||
When using the `GET` method in the [HTTP interface](../../interfaces/http.md), `readonly = 1` is set automatically. To modify data, use the `POST` method.
|
||||
|
||||
Setting `readonly = 1` prohibit the user from changing all the settings. There is a way to prohibit the user from changing only specific settings. Also there is a way to allow changing only specific settings under `readonly = 1` restrictions. For details see [constraints on settings](../../operations/settings/constraints-on-settings.md).
|
||||
Setting `readonly = 1` prohibits the user from changing settings. There is a way to prohibit the user from changing only specific settings. Also there is a way to allow changing only specific settings under `readonly = 1` restrictions. For details see [constraints on settings](../../operations/settings/constraints-on-settings.md).
|
||||
:::
|
||||
|
||||
Default value: 0
|
||||
|
||||
## allow_ddl {#settings_allow_ddl}
|
||||
|
||||
Allows or denies [DDL](https://en.wikipedia.org/wiki/Data_definition_language) queries.
|
||||
|
||||
See how the queries are divided into types [above](#permissions_for_queries).
|
||||
|
||||
Possible values:
|
||||
|
||||
- 0 — DDL queries are not allowed.
|
||||
- 1 — DDL queries are allowed.
|
||||
|
||||
You can’t execute `SET allow_ddl = 1` if `allow_ddl = 0` for the current session.
|
||||
|
||||
Default value: 1
|
||||
|
||||
[Original article](https://clickhouse.com/docs/en/operations/settings/permissions_for_queries/) <!--hide-->
|
||||
:::note
|
||||
You cannot run `SET allow_ddl = 1` if `allow_ddl = 0` for the current session.
|
||||
:::
|
||||
|
||||
|
||||
:::note KILL QUERY
|
||||
`KILL QUERY` can be performed with any combination of readonly and allow_ddl settings.
|
||||
:::
|
||||
|
@ -276,7 +276,7 @@ Default value: 0.
|
||||
Enables or disables the insertion of [default values](../../sql-reference/statements/create/table.md/#create-default-values) instead of [NULL](../../sql-reference/syntax.md/#null-literal) into columns with not [nullable](../../sql-reference/data-types/nullable.md/#data_type-nullable) data type.
|
||||
If column type is not nullable and this setting is disabled, then inserting `NULL` causes an exception. If column type is nullable, then `NULL` values are inserted as is, regardless of this setting.
|
||||
|
||||
This setting is applicable to [INSERT ... SELECT](../../sql-reference/statements/insert-into.md/#insert_query_insert-select) queries. Note that `SELECT` subqueries may be concatenated with `UNION ALL` clause.
|
||||
This setting is applicable to [INSERT ... SELECT](../../sql-reference/statements/insert-into.md/#inserting-the-results-of-select) queries. Note that `SELECT` subqueries may be concatenated with `UNION ALL` clause.
|
||||
|
||||
Possible values:
|
||||
|
||||
@ -1619,8 +1619,8 @@ These functions can be transformed:
|
||||
- [length](../../sql-reference/functions/array-functions.md/#array_functions-length) to read the [size0](../../sql-reference/data-types/array.md/#array-size) subcolumn.
|
||||
- [empty](../../sql-reference/functions/array-functions.md/#function-empty) to read the [size0](../../sql-reference/data-types/array.md/#array-size) subcolumn.
|
||||
- [notEmpty](../../sql-reference/functions/array-functions.md/#function-notempty) to read the [size0](../../sql-reference/data-types/array.md/#array-size) subcolumn.
|
||||
- [isNull](../../sql-reference/operators/index.md/#operator-is-null) to read the [null](../../sql-reference/data-types/nullable.md/#finding-null) subcolumn.
|
||||
- [isNotNull](../../sql-reference/operators/index.md/#is-not-null) to read the [null](../../sql-reference/data-types/nullable.md/#finding-null) subcolumn.
|
||||
- [isNull](../../sql-reference/operators/index.md#operator-is-null) to read the [null](../../sql-reference/data-types/nullable.md/#finding-null) subcolumn.
|
||||
- [isNotNull](../../sql-reference/operators/index.md#is-not-null) to read the [null](../../sql-reference/data-types/nullable.md/#finding-null) subcolumn.
|
||||
- [count](../../sql-reference/aggregate-functions/reference/count.md) to read the [null](../../sql-reference/data-types/nullable.md/#finding-null) subcolumn.
|
||||
- [mapKeys](../../sql-reference/functions/tuple-map-functions.md/#mapkeys) to read the [keys](../../sql-reference/data-types/map.md/#map-subcolumns) subcolumn.
|
||||
- [mapValues](../../sql-reference/functions/tuple-map-functions.md/#mapvalues) to read the [values](../../sql-reference/data-types/map.md/#map-subcolumns) subcolumn.
|
||||
@ -2041,7 +2041,7 @@ Default value: 16.
|
||||
|
||||
## validate_polygons {#validate_polygons}
|
||||
|
||||
Enables or disables throwing an exception in the [pointInPolygon](../../sql-reference/functions/geo/index.md/#pointinpolygon) function, if the polygon is self-intersecting or self-tangent.
|
||||
Enables or disables throwing an exception in the [pointInPolygon](../../sql-reference/functions/geo/index.md#pointinpolygon) function, if the polygon is self-intersecting or self-tangent.
|
||||
|
||||
Possible values:
|
||||
|
||||
@ -2227,7 +2227,7 @@ Default value: `0`.
|
||||
|
||||
## mutations_sync {#mutations_sync}
|
||||
|
||||
Allows to execute `ALTER TABLE ... UPDATE|DELETE` queries ([mutations](../../sql-reference/statements/alter/index.md/#mutations)) synchronously.
|
||||
Allows to execute `ALTER TABLE ... UPDATE|DELETE` queries ([mutations](../../sql-reference/statements/alter/index.md#mutations)) synchronously.
|
||||
|
||||
Possible values:
|
||||
|
||||
@ -2239,8 +2239,8 @@ Default value: `0`.
|
||||
|
||||
**See Also**
|
||||
|
||||
- [Synchronicity of ALTER Queries](../../sql-reference/statements/alter/index.md/#synchronicity-of-alter-queries)
|
||||
- [Mutations](../../sql-reference/statements/alter/index.md/#mutations)
|
||||
- [Synchronicity of ALTER Queries](../../sql-reference/statements/alter/index.md#synchronicity-of-alter-queries)
|
||||
- [Mutations](../../sql-reference/statements/alter/index.md#mutations)
|
||||
|
||||
## ttl_only_drop_parts {#ttl_only_drop_parts}
|
||||
|
||||
@ -3399,6 +3399,17 @@ Use schema from cache for URL with last modification time validation (for urls w
|
||||
|
||||
Default value: `true`.
|
||||
|
||||
## use_structure_from_insertion_table_in_table_functions {use_structure_from_insertion_table_in_table_functions}
|
||||
|
||||
Use structure from insertion table instead of schema inference from data.
|
||||
|
||||
Possible values:
|
||||
- 0 - disabled
|
||||
- 1 - enabled
|
||||
- 2 - auto
|
||||
|
||||
Default value: 2.
|
||||
|
||||
## compatibility {#compatibility}
|
||||
|
||||
This setting changes other settings according to provided ClickHouse version.
|
||||
|
@ -7,8 +7,8 @@ Contains information about stack traces for fatal errors. The table does not exi
|
||||
|
||||
Columns:
|
||||
|
||||
- `event_date` ([Datetime](../../sql-reference/data-types/datetime.md)) — Date of the event.
|
||||
- `event_time` ([Datetime](../../sql-reference/data-types/datetime.md)) — Time of the event.
|
||||
- `event_date` ([DateTime](../../sql-reference/data-types/datetime.md)) — Date of the event.
|
||||
- `event_time` ([DateTime](../../sql-reference/data-types/datetime.md)) — Time of the event.
|
||||
- `timestamp_ns` ([UInt64](../../sql-reference/data-types/int-uint.md)) — Timestamp of the event with nanoseconds.
|
||||
- `signal` ([Int32](../../sql-reference/data-types/int-uint.md)) — Signal number.
|
||||
- `thread_id` ([UInt64](../../sql-reference/data-types/int-uint.md)) — Thread ID.
|
||||
|
@ -3,7 +3,7 @@ slug: /en/operations/system-tables/dictionaries
|
||||
---
|
||||
# dictionaries
|
||||
|
||||
Contains information about [external dictionaries](../../sql-reference/dictionaries/external-dictionaries/external-dicts.md).
|
||||
Contains information about [dictionaries](../../sql-reference/dictionaries/external-dictionaries/external-dicts.md).
|
||||
|
||||
Columns:
|
||||
|
||||
@ -33,7 +33,7 @@ Columns:
|
||||
- `lifetime_min` ([UInt64](../../sql-reference/data-types/int-uint.md#uint-ranges)) — Minimum [lifetime](../../sql-reference/dictionaries/external-dictionaries/external-dicts-dict-lifetime.md) of the dictionary in memory, after which ClickHouse tries to reload the dictionary (if `invalidate_query` is set, then only if it has changed). Set in seconds.
|
||||
- `lifetime_max` ([UInt64](../../sql-reference/data-types/int-uint.md#uint-ranges)) — Maximum [lifetime](../../sql-reference/dictionaries/external-dictionaries/external-dicts-dict-lifetime.md) of the dictionary in memory, after which ClickHouse tries to reload the dictionary (if `invalidate_query` is set, then only if it has changed). Set in seconds.
|
||||
- `loading_start_time` ([DateTime](../../sql-reference/data-types/datetime.md)) — Start time for loading the dictionary.
|
||||
- `last_successful_update_time` ([DateTime](../../sql-reference/data-types/datetime.md)) — End time for loading or updating the dictionary. Helps to monitor some troubles with external sources and investigate causes.
|
||||
- `last_successful_update_time` ([DateTime](../../sql-reference/data-types/datetime.md)) — End time for loading or updating the dictionary. Helps to monitor some troubles with dictionary sources and investigate the causes.
|
||||
- `loading_duration` ([Float32](../../sql-reference/data-types/float.md)) — Duration of a dictionary loading.
|
||||
- `last_exception` ([String](../../sql-reference/data-types/string.md)) — Text of the error that occurs when creating or reloading the dictionary if the dictionary couldn’t be created.
|
||||
- `comment` ([String](../../sql-reference/data-types/string.md)) — Text of the comment to dictionary.
|
||||
|
@ -3,7 +3,7 @@ slug: /en/operations/system-tables/mutations
|
||||
---
|
||||
# mutations
|
||||
|
||||
The table contains information about [mutations](/docs/en/sql-reference/statements/alter/index.md/#mutations) of [MergeTree](/docs/en/engines/table-engines/mergetree-family/mergetree.md) tables and their progress. Each mutation command is represented by a single row.
|
||||
The table contains information about [mutations](/docs/en/sql-reference/statements/alter/index.md#mutations) of [MergeTree](/docs/en/engines/table-engines/mergetree-family/mergetree.md) tables and their progress. Each mutation command is represented by a single row.
|
||||
|
||||
Columns:
|
||||
|
||||
@ -15,7 +15,7 @@ Columns:
|
||||
|
||||
- `command` ([String](/docs/en/sql-reference/data-types/string.md)) — The mutation command string (the part of the query after `ALTER TABLE [db.]table`).
|
||||
|
||||
- `create_time` ([Datetime](/docs/en/sql-reference/data-types/datetime.md)) — Date and time when the mutation command was submitted for execution.
|
||||
- `create_time` ([DateTime](/docs/en/sql-reference/data-types/datetime.md)) — Date and time when the mutation command was submitted for execution.
|
||||
|
||||
- `block_numbers.partition_id` ([Array](/docs/en/sql-reference/data-types/array.md)([String](/docs/en/sql-reference/data-types/string.md))) — For mutations of replicated tables, the array contains the partitions' IDs (one record for each partition). For mutations of non-replicated tables the array is empty.
|
||||
|
||||
@ -39,13 +39,13 @@ If there were problems with mutating some data parts, the following columns cont
|
||||
|
||||
- `latest_failed_part` ([String](/docs/en/sql-reference/data-types/string.md)) — The name of the most recent part that could not be mutated.
|
||||
|
||||
- `latest_fail_time` ([Datetime](/docs/en/sql-reference/data-types/datetime.md)) — The date and time of the most recent part mutation failure.
|
||||
- `latest_fail_time` ([DateTime](/docs/en/sql-reference/data-types/datetime.md)) — The date and time of the most recent part mutation failure.
|
||||
|
||||
- `latest_fail_reason` ([String](/docs/en/sql-reference/data-types/string.md)) — The exception message that caused the most recent part mutation failure.
|
||||
|
||||
**See Also**
|
||||
|
||||
- [Mutations](/docs/en/sql-reference/statements/alter/index.md/#mutations)
|
||||
- [Mutations](/docs/en/sql-reference/statements/alter/index.md#mutations)
|
||||
- [MergeTree](/docs/en/engines/table-engines/mergetree-family/mergetree.md) table engine
|
||||
- [ReplicatedMergeTree](/docs/en/engines/table-engines/mergetree-family/replication.md) family
|
||||
|
||||
|
@ -9,7 +9,7 @@ Each row describes one data part.
|
||||
|
||||
Columns:
|
||||
|
||||
- `partition` ([String](../../sql-reference/data-types/string.md)) – The partition name. To learn what a partition is, see the description of the [ALTER](../../sql-reference/statements/alter/index.md/#query_language_queries_alter) query.
|
||||
- `partition` ([String](../../sql-reference/data-types/string.md)) – The partition name. To learn what a partition is, see the description of the [ALTER](../../sql-reference/statements/alter/index.md#query_language_queries_alter) query.
|
||||
|
||||
Formats:
|
||||
|
||||
|
@ -9,7 +9,7 @@ Each row describes one data part.
|
||||
|
||||
Columns:
|
||||
|
||||
- `partition` ([String](../../sql-reference/data-types/string.md)) — The partition name. To learn what a partition is, see the description of the [ALTER](../../sql-reference/statements/alter/index.md/#query_language_queries_alter) query.
|
||||
- `partition` ([String](../../sql-reference/data-types/string.md)) — The partition name. To learn what a partition is, see the description of the [ALTER](../../sql-reference/statements/alter/index.md#query_language_queries_alter) query.
|
||||
|
||||
Formats:
|
||||
|
||||
|
@ -29,7 +29,7 @@ Columns:
|
||||
- `MUTATE_PART` — Apply one or several mutations to the part.
|
||||
- `ALTER_METADATA` — Apply alter modification according to global /metadata and /columns paths.
|
||||
|
||||
- `create_time` ([Datetime](../../sql-reference/data-types/datetime.md)) — Date and time when the task was submitted for execution.
|
||||
- `create_time` ([DateTime](../../sql-reference/data-types/datetime.md)) — Date and time when the task was submitted for execution.
|
||||
|
||||
- `required_quorum` ([UInt32](../../sql-reference/data-types/int-uint.md)) — The number of replicas waiting for the task to complete with confirmation of completion. This column is only relevant for the `GET_PARTS` task.
|
||||
|
||||
@ -47,13 +47,13 @@ Columns:
|
||||
|
||||
- `last_exception` ([String](../../sql-reference/data-types/string.md)) — Text message about the last error that occurred (if any).
|
||||
|
||||
- `last_attempt_time` ([Datetime](../../sql-reference/data-types/datetime.md)) — Date and time when the task was last attempted.
|
||||
- `last_attempt_time` ([DateTime](../../sql-reference/data-types/datetime.md)) — Date and time when the task was last attempted.
|
||||
|
||||
- `num_postponed` ([UInt32](../../sql-reference/data-types/int-uint.md)) — The number of postponed tasks.
|
||||
|
||||
- `postpone_reason` ([String](../../sql-reference/data-types/string.md)) — The reason why the task was postponed.
|
||||
|
||||
- `last_postpone_time` ([Datetime](../../sql-reference/data-types/datetime.md)) — Date and time when the task was last postponed.
|
||||
- `last_postpone_time` ([DateTime](../../sql-reference/data-types/datetime.md)) — Date and time when the task was last postponed.
|
||||
|
||||
- `merge_type` ([String](../../sql-reference/data-types/string.md)) — Type of the current merge. Empty if it's a mutation.
|
||||
|
||||
|
@ -6,7 +6,7 @@ sidebar_label: Date32
|
||||
|
||||
# Date32
|
||||
|
||||
A date. Supports the date range same with [Datetime64](../../sql-reference/data-types/datetime64.md). Stored in four bytes as the number of days since 1900-01-01. Allows storing values till 2299-12-31.
|
||||
A date. Supports the date range same with [DateTime64](../../sql-reference/data-types/datetime64.md). Stored as a signed 32-bit integer in native byte order with the value representing the days since 1970-01-01 (0 represents 1970-01-01 and negative values represent the days before 1970).
|
||||
|
||||
**Examples**
|
||||
|
||||
|
@ -4,7 +4,7 @@ sidebar_position: 48
|
||||
sidebar_label: DateTime
|
||||
---
|
||||
|
||||
# Datetime
|
||||
# DateTime
|
||||
|
||||
Allows to store an instant in time, that can be expressed as a calendar date and a time of a day.
|
||||
|
||||
|
@ -4,7 +4,7 @@ sidebar_position: 49
|
||||
sidebar_label: DateTime64
|
||||
---
|
||||
|
||||
# Datetime64
|
||||
# DateTime64
|
||||
|
||||
Allows to store an instant in time, that can be expressed as a calendar date and a time of a day, with defined sub-second precision
|
||||
|
||||
|
@ -1,8 +1,8 @@
|
||||
position: 37
|
||||
label: 'External Dictionaries'
|
||||
label: 'Dictionaries'
|
||||
collapsible: true
|
||||
collapsed: true
|
||||
link:
|
||||
type: generated-index
|
||||
title: External Dictionaries
|
||||
title: Dictionaries
|
||||
slug: /en/sql-reference/dictionaries/external-dictionaries
|
||||
|
@ -0,0 +1,4 @@
|
||||
:::tip
|
||||
If you are using a dictionary with ClickHouse Cloud please use the DDL query option to create your dictionaries, and create your dictionary as user `default`.
|
||||
Also, verify the list of supported dictionary sources in the [Cloud Compatibility guide](/docs/en/whats-new/cloud-capabilities.md).
|
||||
:::
|
@ -3,6 +3,7 @@ slug: /en/sql-reference/dictionaries/external-dictionaries/external-dicts-dict-l
|
||||
sidebar_position: 41
|
||||
sidebar_label: Storing Dictionaries in Memory
|
||||
---
|
||||
import CloudDetails from '@site/docs/en/sql-reference/dictionaries/external-dictionaries/_snippet_dictionary_in_cloud.md';
|
||||
|
||||
# Storing Dictionaries in Memory
|
||||
|
||||
@ -22,7 +23,9 @@ ClickHouse generates an exception for errors with dictionaries. Examples of erro
|
||||
- The dictionary being accessed could not be loaded.
|
||||
- Error querying a `cached` dictionary.
|
||||
|
||||
You can view the list of external dictionaries and their statuses in the [system.dictionaries](../../../operations/system-tables/dictionaries.md) table.
|
||||
You can view the list of dictionaries and their statuses in the [system.dictionaries](../../../operations/system-tables/dictionaries.md) table.
|
||||
|
||||
<CloudDetails />
|
||||
|
||||
The configuration looks like this:
|
||||
|
||||
|
@ -3,6 +3,7 @@ slug: /en/sql-reference/dictionaries/external-dictionaries/external-dicts-dict-l
|
||||
sidebar_position: 42
|
||||
sidebar_label: Dictionary Updates
|
||||
---
|
||||
import CloudDetails from '@site/docs/en/sql-reference/dictionaries/external-dictionaries/_snippet_dictionary_in_cloud.md';
|
||||
|
||||
# Dictionary Updates
|
||||
|
||||
@ -12,6 +13,8 @@ Dictionary updates (other than loading for first use) do not block queries. Duri
|
||||
|
||||
Example of settings:
|
||||
|
||||
<CloudDetails />
|
||||
|
||||
``` xml
|
||||
<dictionary>
|
||||
...
|
||||
|
@ -4,12 +4,15 @@ sidebar_position: 46
|
||||
sidebar_label: Polygon Dictionaries With Grids
|
||||
title: "Polygon dictionaries"
|
||||
---
|
||||
import CloudDetails from '@site/docs/en/sql-reference/dictionaries/external-dictionaries/_snippet_dictionary_in_cloud.md';
|
||||
|
||||
Polygon dictionaries allow you to efficiently search for the polygon containing specified points.
|
||||
For example: defining a city area by geographical coordinates.
|
||||
|
||||
Example of a polygon dictionary configuration:
|
||||
|
||||
<CloudDetails />
|
||||
|
||||
``` xml
|
||||
<dictionary>
|
||||
<structure>
|
||||
@ -78,7 +81,7 @@ To respond to the query, there is a corresponding cell, and the index for the po
|
||||
|
||||
- `POLYGON`. Synonym to `POLYGON_INDEX_CELL`.
|
||||
|
||||
Dictionary queries are carried out using standard [functions](../../../sql-reference/functions/ext-dict-functions.md) for working with external dictionaries.
|
||||
Dictionary queries are carried out using standard [functions](../../../sql-reference/functions/ext-dict-functions.md) for working with dictionaries.
|
||||
An important difference is that here the keys will be the points for which you want to find the polygon containing them.
|
||||
|
||||
**Example**
|
||||
|
@ -1,12 +1,15 @@
|
||||
---
|
||||
slug: /en/sql-reference/dictionaries/external-dictionaries/external-dicts-dict-sources
|
||||
sidebar_position: 43
|
||||
sidebar_label: Sources of External Dictionaries
|
||||
sidebar_label: Dictionary Sources
|
||||
---
|
||||
import CloudDetails from '@site/docs/en/sql-reference/dictionaries/external-dictionaries/_snippet_dictionary_in_cloud.md';
|
||||
|
||||
# Sources of External Dictionaries
|
||||
# Dictionary Sources
|
||||
|
||||
An external dictionary can be connected to ClickHouse from many different sources.
|
||||
<CloudDetails />
|
||||
|
||||
A dictionary can be connected to ClickHouse from many different sources.
|
||||
|
||||
If the dictionary is configured using an xml-file, the configuration looks like this:
|
||||
|
||||
@ -65,13 +68,13 @@ Types of sources (`source_type`):
|
||||
- [Executable Pool](#dicts-external_dicts_dict_sources-executable_pool)
|
||||
- [HTTP(s)](#dicts-external_dicts_dict_sources-http)
|
||||
- DBMS
|
||||
- [ODBC](#dicts-external_dicts_dict_sources-odbc)
|
||||
- [MySQL](#dicts-external_dicts_dict_sources-mysql)
|
||||
- [ClickHouse](#dicts-external_dicts_dict_sources-clickhouse)
|
||||
- [MongoDB](#dicts-external_dicts_dict_sources-mongodb)
|
||||
- [Redis](#dicts-external_dicts_dict_sources-redis)
|
||||
- [Cassandra](#dicts-external_dicts_dict_sources-cassandra)
|
||||
- [PostgreSQL](#dicts-external_dicts_dict_sources-postgresql)
|
||||
- [ODBC](#odbc)
|
||||
- [MySQL](#mysql)
|
||||
- [ClickHouse](#clickhouse)
|
||||
- [MongoDB](#mongodb)
|
||||
- [Redis](#redis)
|
||||
- [Cassandra](#cassandra)
|
||||
- [PostgreSQL](#postgresql)
|
||||
|
||||
## Local File
|
||||
|
||||
|
@ -3,9 +3,12 @@ slug: /en/sql-reference/dictionaries/external-dictionaries/external-dicts-dict-s
|
||||
sidebar_position: 44
|
||||
sidebar_label: Dictionary Key and Fields
|
||||
---
|
||||
import CloudDetails from '@site/docs/en/sql-reference/dictionaries/external-dictionaries/_snippet_dictionary_in_cloud.md';
|
||||
|
||||
# Dictionary Key and Fields
|
||||
|
||||
<CloudDetails />
|
||||
|
||||
The `structure` clause describes the dictionary key and fields available for queries.
|
||||
|
||||
XML description:
|
||||
@ -171,5 +174,5 @@ Configuration fields:
|
||||
|
||||
**See Also**
|
||||
|
||||
- [Functions for working with external dictionaries](../../../sql-reference/functions/ext-dict-functions.md).
|
||||
- [Functions for working with dictionaries](../../../sql-reference/functions/ext-dict-functions.md).
|
||||
|
||||
|
@ -1,10 +1,13 @@
|
||||
---
|
||||
slug: /en/sql-reference/dictionaries/external-dictionaries/external-dicts-dict
|
||||
sidebar_position: 40
|
||||
sidebar_label: Configuring an External Dictionary
|
||||
sidebar_label: Configuring a Dictionary
|
||||
---
|
||||
import CloudDetails from '@site/docs/en/sql-reference/dictionaries/external-dictionaries/_snippet_dictionary_in_cloud.md';
|
||||
|
||||
# Configuring an External Dictionary
|
||||
# Configuring a Dictionary
|
||||
|
||||
<CloudDetails />
|
||||
|
||||
If dictionary is configured using xml file, than dictionary configuration has the following structure:
|
||||
|
||||
|
@ -3,18 +3,23 @@ slug: /en/sql-reference/dictionaries/external-dictionaries/external-dicts
|
||||
sidebar_position: 39
|
||||
sidebar_label: General Description
|
||||
---
|
||||
import CloudDetails from '@site/docs/en/sql-reference/dictionaries/external-dictionaries/_snippet_dictionary_in_cloud.md';
|
||||
|
||||
# External Dictionaries
|
||||
# Dictionaries
|
||||
|
||||
You can add your own dictionaries from various data sources. The data source for a dictionary can be a local text or executable file, an HTTP(s) resource, or another DBMS. For more information, see “[Sources for external dictionaries](../../../sql-reference/dictionaries/external-dictionaries/external-dicts-dict-sources.md)”.
|
||||
:::tip Tutorial
|
||||
If you are getting started with Dictionaries in ClickHouse we have a tutorial that covers that topic. Take a look [here](/docs/en/tutorial.md).
|
||||
:::
|
||||
|
||||
You can add your own dictionaries from various data sources. The source for a dictionary can be a ClickHouse table, a local text or executable file, an HTTP(s) resource, or another DBMS. For more information, see “[Dictionary Sources](../../../sql-reference/dictionaries/external-dictionaries/external-dicts-dict-sources.md)”.
|
||||
|
||||
ClickHouse:
|
||||
|
||||
- Fully or partially stores dictionaries in RAM.
|
||||
- Periodically updates dictionaries and dynamically loads missing values. In other words, dictionaries can be loaded dynamically.
|
||||
- Allows to create external dictionaries with xml files or [DDL queries](../../../sql-reference/statements/create/dictionary.md).
|
||||
- Allows creating dictionaries with xml files or [DDL queries](../../../sql-reference/statements/create/dictionary.md).
|
||||
|
||||
The configuration of external dictionaries can be located in one or more xml-files. The path to the configuration is specified in the [dictionaries_config](../../../operations/server-configuration-parameters/settings.md#server_configuration_parameters-dictionaries_config) parameter.
|
||||
The configuration of dictionaries can be located in one or more xml-files. The path to the configuration is specified in the [dictionaries_config](../../../operations/server-configuration-parameters/settings.md#server_configuration_parameters-dictionaries_config) parameter.
|
||||
|
||||
Dictionaries can be loaded at server startup or at first use, depending on the [dictionaries_lazy_load](../../../operations/server-configuration-parameters/settings.md#server_configuration_parameters-dictionaries_lazy_load) setting.
|
||||
|
||||
@ -24,6 +29,22 @@ The [dictionaries](../../../operations/system-tables/dictionaries.md#system_tabl
|
||||
- Configuration parameters.
|
||||
- Metrics like amount of RAM allocated for the dictionary or a number of queries since the dictionary was successfully loaded.
|
||||
|
||||
<CloudDetails />
|
||||
|
||||
## Creating a dictionary with a DDL query
|
||||
|
||||
Dictionaries can be created with [DDL queries](../../../sql-reference/statements/create/dictionary.md), and this is the recommended method because with DDL created dictionaries:
|
||||
- No additional records are added to server configuration files
|
||||
- The dictionaries can be worked with as first-class entities, like tables or views
|
||||
- Data can be read directly, using familiar SELECT rather than dictionary table functions
|
||||
- The dictionaries can be easily renamed
|
||||
|
||||
## Creating a dictionary with a configuration file
|
||||
|
||||
:::note
|
||||
Creating a dictionary with a configuration file is not applicable to ClickHouse Cloud. Please use DDL (see above), and create your dictionary as user `default`.
|
||||
:::
|
||||
|
||||
The dictionary configuration file has the following format:
|
||||
|
||||
``` xml
|
||||
@ -44,18 +65,17 @@ The dictionary configuration file has the following format:
|
||||
|
||||
You can [configure](../../../sql-reference/dictionaries/external-dictionaries/external-dicts-dict.md) any number of dictionaries in the same file.
|
||||
|
||||
[DDL queries for dictionaries](../../../sql-reference/statements/create/dictionary.md) does not require any additional records in server configuration. They allow to work with dictionaries as first-class entities, like tables or views.
|
||||
|
||||
:::note
|
||||
You can convert values for a small dictionary by describing it in a `SELECT` query (see the [transform](../../../sql-reference/functions/other-functions.md) function). This functionality is not related to external dictionaries.
|
||||
You can convert values for a small dictionary by describing it in a `SELECT` query (see the [transform](../../../sql-reference/functions/other-functions.md) function). This functionality is not related to dictionaries.
|
||||
:::
|
||||
|
||||
## See Also
|
||||
|
||||
- [Configuring an External Dictionary](../../../sql-reference/dictionaries/external-dictionaries/external-dicts-dict.md)
|
||||
- [Configuring a Dictionary](../../../sql-reference/dictionaries/external-dictionaries/external-dicts-dict.md)
|
||||
- [Storing Dictionaries in Memory](../../../sql-reference/dictionaries/external-dictionaries/external-dicts-dict-layout.md)
|
||||
- [Dictionary Updates](../../../sql-reference/dictionaries/external-dictionaries/external-dicts-dict-lifetime.md)
|
||||
- [Sources of External Dictionaries](../../../sql-reference/dictionaries/external-dictionaries/external-dicts-dict-sources.md)
|
||||
- [Dictionary Sources](../../../sql-reference/dictionaries/external-dictionaries/external-dicts-dict-sources.md)
|
||||
- [Dictionary Key and Fields](../../../sql-reference/dictionaries/external-dictionaries/external-dicts-dict-structure.md)
|
||||
- [Functions for Working with External Dictionaries](../../../sql-reference/functions/ext-dict-functions.md)
|
||||
- [Functions for Working with Dictionaries](../../../sql-reference/functions/ext-dict-functions.md)
|
||||
|
||||
|
@ -12,6 +12,6 @@ ClickHouse supports special functions for working with dictionaries that can be
|
||||
|
||||
ClickHouse supports:
|
||||
|
||||
- [Built-in dictionaries](../../sql-reference/dictionaries/internal-dicts.md#internal_dicts) with a specific [set of functions](../../sql-reference/functions/ym-dict-functions.md).
|
||||
- [Plug-in (external) dictionaries](../../sql-reference/dictionaries/external-dictionaries/external-dicts.md#dicts-external-dicts) with a [set of functions](../../sql-reference/functions/ext-dict-functions.md).
|
||||
- [Dictionaries](../../sql-reference/dictionaries/external-dictionaries/external-dicts.md#dicts-external-dicts) with a [set of functions](../../sql-reference/functions/ext-dict-functions.md).
|
||||
- [Embedded dictionaries](../../sql-reference/dictionaries/internal-dicts.md#internal_dicts) with a specific [set of functions](../../sql-reference/functions/ym-dict-functions.md).
|
||||
|
||||
|
@ -1,10 +1,13 @@
|
||||
---
|
||||
slug: /en/sql-reference/dictionaries/internal-dicts
|
||||
sidebar_position: 39
|
||||
sidebar_label: Internal Dictionaries
|
||||
sidebar_label: Embedded Dictionaries
|
||||
---
|
||||
import SelfManaged from '@site/docs/en/_snippets/_self_managed_only_no_roadmap.md';
|
||||
|
||||
# Internal Dictionaries
|
||||
# Embedded Dictionaries
|
||||
|
||||
<SelfManaged />
|
||||
|
||||
ClickHouse contains a built-in feature for working with a geobase.
|
||||
|
||||
|
@ -65,6 +65,11 @@ An exception is thrown when dividing by zero or when dividing a minimal negative
|
||||
|
||||
Differs from [modulo](#modulo) in that it returns zero when the divisor is zero.
|
||||
|
||||
## positive_modulo(a, b)
|
||||
Calculates the remainder when dividing `a` by `b`. Similar to function `modulo` except that `positive_modulo` always return non-negative number.
|
||||
|
||||
Notice that `positive_modulo` is 4-5 times slower than `modulo`. You should not use `positive_modulo` unless you want to get positive result and don't care about performance too much.
|
||||
|
||||
## negate(a), -a operator
|
||||
|
||||
Calculates a number with the reverse sign. The result is always signed.
|
||||
|
@ -550,7 +550,7 @@ Alias: `dateTrunc`.
|
||||
|
||||
- Value, truncated to the specified part of date.
|
||||
|
||||
Type: [Datetime](../../sql-reference/data-types/datetime.md).
|
||||
Type: [DateTime](../../sql-reference/data-types/datetime.md).
|
||||
|
||||
**Example**
|
||||
|
||||
@ -881,7 +881,7 @@ now([timezone])
|
||||
|
||||
- Current date and time.
|
||||
|
||||
Type: [Datetime](../../sql-reference/data-types/datetime.md).
|
||||
Type: [DateTime](../../sql-reference/data-types/datetime.md).
|
||||
|
||||
**Example**
|
||||
|
||||
@ -932,7 +932,7 @@ now64([scale], [timezone])
|
||||
|
||||
- Current date and time with sub-second precision.
|
||||
|
||||
Type: [Datetime64](../../sql-reference/data-types/datetime64.md).
|
||||
Type: [DateTime64](../../sql-reference/data-types/datetime64.md).
|
||||
|
||||
**Example**
|
||||
|
||||
@ -968,7 +968,7 @@ nowInBlock([timezone])
|
||||
|
||||
- Current date and time at the moment of processing of each block of data.
|
||||
|
||||
Type: [Datetime](../../sql-reference/data-types/datetime.md).
|
||||
Type: [DateTime](../../sql-reference/data-types/datetime.md).
|
||||
|
||||
**Example**
|
||||
|
||||
|
@ -185,7 +185,7 @@ unhex(arg)
|
||||
|
||||
**Arguments**
|
||||
|
||||
- `arg` — A string containing any number of hexadecimal digits. Type: [String](../../sql-reference/data-types/string.md).
|
||||
- `arg` — A string containing any number of hexadecimal digits. Type: [String](../../sql-reference/data-types/string.md), [FixedString](../../sql-reference/data-types/fixedstring.md).
|
||||
|
||||
Supports both uppercase and lowercase letters `A-F`. The number of hexadecimal digits does not have to be even. If it is odd, the last digit is interpreted as the least significant half of the `00-0F` byte. If the argument string contains anything other than hexadecimal digits, some implementation-defined result is returned (an exception isn’t thrown). For a numeric argument the inverse of hex(N) is not performed by unhex().
|
||||
|
||||
|
@ -1,20 +1,20 @@
|
||||
---
|
||||
slug: /en/sql-reference/functions/ext-dict-functions
|
||||
sidebar_position: 58
|
||||
sidebar_label: External Dictionaries
|
||||
sidebar_label: Dictionaries
|
||||
---
|
||||
|
||||
# Functions for Working with Dictionaries
|
||||
|
||||
:::note
|
||||
For dictionaries created with [DDL queries](../../sql-reference/statements/create/dictionary.md), the `dict_name` parameter must be fully specified, like `<database>.<dict_name>`. Otherwise, the current database is used.
|
||||
:::
|
||||
|
||||
# Functions for Working with External Dictionaries
|
||||
|
||||
For information on connecting and configuring external dictionaries, see [External dictionaries](../../sql-reference/dictionaries/external-dictionaries/external-dicts.md).
|
||||
For information on connecting and configuring dictionaries, see [Dictionaries](../../sql-reference/dictionaries/external-dictionaries/external-dicts.md).
|
||||
|
||||
## dictGet, dictGetOrDefault, dictGetOrNull
|
||||
|
||||
Retrieves values from an external dictionary.
|
||||
Retrieves values from a dictionary.
|
||||
|
||||
``` sql
|
||||
dictGet('dict_name', attr_names, id_expr)
|
||||
@ -52,7 +52,7 @@ Create a text file `ext-dict-test.csv` containing the following:
|
||||
|
||||
The first column is `id`, the second column is `c1`.
|
||||
|
||||
Configure the external dictionary:
|
||||
Configure the dictionary:
|
||||
|
||||
``` xml
|
||||
<clickhouse>
|
||||
@ -112,7 +112,7 @@ Create a text file `ext-dict-mult.csv` containing the following:
|
||||
|
||||
The first column is `id`, the second is `c1`, the third is `c2`.
|
||||
|
||||
Configure the external dictionary:
|
||||
Configure the dictionary:
|
||||
|
||||
``` xml
|
||||
<clickhouse>
|
||||
@ -185,7 +185,7 @@ INSERT INTO range_key_dictionary_source_table VALUES(2, toDate('2019-05-20'), to
|
||||
INSERT INTO range_key_dictionary_source_table VALUES(3, toDate('2019-05-20'), toDate('2019-05-20'), 'Third', 'Third');
|
||||
```
|
||||
|
||||
Create the external dictionary:
|
||||
Create the dictionary:
|
||||
|
||||
```sql
|
||||
CREATE DICTIONARY range_key_dictionary
|
||||
@ -226,7 +226,7 @@ Result:
|
||||
|
||||
**See Also**
|
||||
|
||||
- [External Dictionaries](../../sql-reference/dictionaries/external-dictionaries/external-dicts.md)
|
||||
- [Dictionaries](../../sql-reference/dictionaries/external-dictionaries/external-dicts.md)
|
||||
|
||||
## dictHas
|
||||
|
||||
|
@ -549,3 +549,33 @@ Result:
|
||||
│ 3.141592653589793 │
|
||||
└───────────────────┘
|
||||
```
|
||||
|
||||
|
||||
## factorial(n)
|
||||
|
||||
Computes the factorial of an integer value. It works with any native integer type including UInt(8|16|32|64) and Int(8|16|32|64). The return type is UInt64.
|
||||
|
||||
The factorial of 0 is 1. Likewise, the factorial() function returns 1 for any negative value. The maximum positive value for the input argument is 20, a value of 21 or greater will cause exception throw.
|
||||
|
||||
|
||||
**Syntax**
|
||||
|
||||
``` sql
|
||||
factorial(n)
|
||||
```
|
||||
|
||||
**Example**
|
||||
|
||||
Query:
|
||||
|
||||
``` sql
|
||||
SELECT factorial(10);
|
||||
```
|
||||
|
||||
Result:
|
||||
|
||||
``` text
|
||||
┌─factorial(10)─┐
|
||||
│ 3628800 │
|
||||
└───────────────┘
|
||||
```
|
||||
|
@ -24,6 +24,11 @@ Returns a pseudo-random UInt64 number, evenly distributed among all UInt64-type
|
||||
|
||||
Uses a linear congruential generator.
|
||||
|
||||
## randCanonical
|
||||
The function generates pseudo random results with independent and identically distributed uniformly distributed values in [0, 1).
|
||||
|
||||
Non-deterministic. Return type is Float64.
|
||||
|
||||
## randConstant
|
||||
|
||||
Produces a constant column with a random value.
|
||||
|
@ -6,21 +6,22 @@ sidebar_label: Splitting and Merging Strings and Arrays
|
||||
|
||||
# Functions for Splitting and Merging Strings and Arrays
|
||||
|
||||
## splitByChar(separator, s)
|
||||
## splitByChar(separator, s[, max_substrings])
|
||||
|
||||
Splits a string into substrings separated by a specified character. It uses a constant string `separator` which consisting of exactly one character.
|
||||
Splits a string into substrings separated by a specified character. It uses a constant string `separator` which consists of exactly one character.
|
||||
Returns an array of selected substrings. Empty substrings may be selected if the separator occurs at the beginning or end of the string, or if there are multiple consecutive separators.
|
||||
|
||||
**Syntax**
|
||||
|
||||
``` sql
|
||||
splitByChar(separator, s)
|
||||
splitByChar(separator, s[, max_substrings]))
|
||||
```
|
||||
|
||||
**Arguments**
|
||||
|
||||
- `separator` — The separator which should contain exactly one character. [String](../../sql-reference/data-types/string.md).
|
||||
- `s` — The string to split. [String](../../sql-reference/data-types/string.md).
|
||||
- `max_substrings` — An optional `Int64` defaulting to 0. When `max_substrings` > 0, the returned substrings will be no more than `max_substrings`, otherwise the function will return as many substrings as possible.
|
||||
|
||||
**Returned value(s)**
|
||||
|
||||
@ -44,20 +45,22 @@ SELECT splitByChar(',', '1,2,3,abcde');
|
||||
└─────────────────────────────────┘
|
||||
```
|
||||
|
||||
## splitByString(separator, s)
|
||||
## splitByString(separator, s[, max_substrings])
|
||||
|
||||
Splits a string into substrings separated by a string. It uses a constant string `separator` of multiple characters as the separator. If the string `separator` is empty, it will split the string `s` into an array of single characters.
|
||||
|
||||
**Syntax**
|
||||
|
||||
``` sql
|
||||
splitByString(separator, s)
|
||||
splitByString(separator, s[, max_substrings]))
|
||||
```
|
||||
|
||||
**Arguments**
|
||||
|
||||
- `separator` — The separator. [String](../../sql-reference/data-types/string.md).
|
||||
- `s` — The string to split. [String](../../sql-reference/data-types/string.md).
|
||||
- `max_substrings` — An optional `Int64` defaulting to 0. When `max_substrings` > 0, the returned substrings will be no more than `max_substrings`, otherwise the function will return as many substrings as possible.
|
||||
|
||||
|
||||
**Returned value(s)**
|
||||
|
||||
@ -91,20 +94,22 @@ SELECT splitByString('', 'abcde');
|
||||
└────────────────────────────┘
|
||||
```
|
||||
|
||||
## splitByRegexp(regexp, s)
|
||||
## splitByRegexp(regexp, s[, max_substrings])
|
||||
|
||||
Splits a string into substrings separated by a regular expression. It uses a regular expression string `regexp` as the separator. If the `regexp` is empty, it will split the string `s` into an array of single characters. If no match is found for this regular expression, the string `s` won't be split.
|
||||
|
||||
**Syntax**
|
||||
|
||||
``` sql
|
||||
splitByRegexp(regexp, s)
|
||||
splitByRegexp(regexp, s[, max_substrings]))
|
||||
```
|
||||
|
||||
**Arguments**
|
||||
|
||||
- `regexp` — Regular expression. Constant. [String](../data-types/string.md) or [FixedString](../data-types/fixedstring.md).
|
||||
- `s` — The string to split. [String](../../sql-reference/data-types/string.md).
|
||||
- `max_substrings` — An optional `Int64` defaulting to 0. When `max_substrings` > 0, the returned substrings will be no more than `max_substrings`, otherwise the function will return as many substrings as possible.
|
||||
|
||||
|
||||
**Returned value(s)**
|
||||
|
||||
@ -146,7 +151,7 @@ Result:
|
||||
└────────────────────────────┘
|
||||
```
|
||||
|
||||
## splitByWhitespace(s)
|
||||
## splitByWhitespace(s[, max_substrings])
|
||||
|
||||
Splits a string into substrings separated by whitespace characters.
|
||||
Returns an array of selected substrings.
|
||||
@ -154,12 +159,14 @@ Returns an array of selected substrings.
|
||||
**Syntax**
|
||||
|
||||
``` sql
|
||||
splitByWhitespace(s)
|
||||
splitByWhitespace(s[, max_substrings]))
|
||||
```
|
||||
|
||||
**Arguments**
|
||||
|
||||
- `s` — The string to split. [String](../../sql-reference/data-types/string.md).
|
||||
- `max_substrings` — An optional `Int64` defaulting to 0. When `max_substrings` > 0, the returned substrings will be no more than `max_substrings`, otherwise the function will return as many substrings as possible.
|
||||
|
||||
|
||||
**Returned value(s)**
|
||||
|
||||
@ -179,7 +186,7 @@ SELECT splitByWhitespace(' 1! a, b. ');
|
||||
└─────────────────────────────────────┘
|
||||
```
|
||||
|
||||
## splitByNonAlpha(s)
|
||||
## splitByNonAlpha(s[, max_substrings])
|
||||
|
||||
Splits a string into substrings separated by whitespace and punctuation characters.
|
||||
Returns an array of selected substrings.
|
||||
@ -187,12 +194,14 @@ Returns an array of selected substrings.
|
||||
**Syntax**
|
||||
|
||||
``` sql
|
||||
splitByNonAlpha(s)
|
||||
splitByNonAlpha(s[, max_substrings]))
|
||||
```
|
||||
|
||||
**Arguments**
|
||||
|
||||
- `s` — The string to split. [String](../../sql-reference/data-types/string.md).
|
||||
- `max_substrings` — An optional `Int64` defaulting to 0. When `max_substrings` > 0, the returned substrings will be no more than `max_substrings`, otherwise the function will return as many substrings as possible.
|
||||
|
||||
|
||||
**Returned value(s)**
|
||||
|
||||
@ -217,10 +226,28 @@ SELECT splitByNonAlpha(' 1! a, b. ');
|
||||
Concatenates string representations of values listed in the array with the separator. `separator` is an optional parameter: a constant string, set to an empty string by default.
|
||||
Returns the string.
|
||||
|
||||
## alphaTokens(s)
|
||||
## alphaTokens(s[, max_substrings]), splitByAlpha(s[, max_substrings])
|
||||
|
||||
Selects substrings of consecutive bytes from the ranges a-z and A-Z.Returns an array of substrings.
|
||||
|
||||
**Syntax**
|
||||
|
||||
``` sql
|
||||
alphaTokens(s[, max_substrings]))
|
||||
splitByAlpha(s[, max_substrings])
|
||||
```
|
||||
|
||||
**Arguments**
|
||||
|
||||
- `s` — The string to split. [String](../../sql-reference/data-types/string.md).
|
||||
- `max_substrings` — An optional `Int64` defaulting to 0. When `max_substrings` > 0, the returned substrings will be no more than `max_substrings`, otherwise the function will return as many substrings as possible.
|
||||
|
||||
**Returned value(s)**
|
||||
|
||||
Returns an array of selected substrings.
|
||||
|
||||
Type: [Array](../../sql-reference/data-types/array.md)([String](../../sql-reference/data-types/string.md)).
|
||||
|
||||
**Example**
|
||||
|
||||
``` sql
|
||||
|
@ -131,7 +131,7 @@ Type: `UInt32`.
|
||||
### regionToPopulation(id\[, geobase\])
|
||||
|
||||
Gets the population for a region.
|
||||
The population can be recorded in files with the geobase. See the section “External dictionaries”.
|
||||
The population can be recorded in files with the geobase. See the section “Dictionaries”.
|
||||
If the population is not recorded for the region, it returns 0.
|
||||
In the geobase, the population might be recorded for child regions, but not for parent regions.
|
||||
|
||||
|
@ -254,7 +254,7 @@ The `ALTER` query lets you create and delete separate elements (columns) in nest
|
||||
|
||||
There is no support for deleting columns in the primary key or the sampling key (columns that are used in the `ENGINE` expression). Changing the type for columns that are included in the primary key is only possible if this change does not cause the data to be modified (for example, you are allowed to add values to an Enum or to change a type from `DateTime` to `UInt32`).
|
||||
|
||||
If the `ALTER` query is not sufficient to make the table changes you need, you can create a new table, copy the data to it using the [INSERT SELECT](/docs/en/sql-reference/statements/insert-into.md/#insert_query_insert-select) query, then switch the tables using the [RENAME](/docs/en/sql-reference/statements/rename.md/#rename-table) query and delete the old table. You can use the [clickhouse-copier](/docs/en/operations/utilities/clickhouse-copier.md) as an alternative to the `INSERT SELECT` query.
|
||||
If the `ALTER` query is not sufficient to make the table changes you need, you can create a new table, copy the data to it using the [INSERT SELECT](/docs/en/sql-reference/statements/insert-into.md/#inserting-the-results-of-select) query, then switch the tables using the [RENAME](/docs/en/sql-reference/statements/rename.md/#rename-table) query and delete the old table. You can use the [clickhouse-copier](/docs/en/operations/utilities/clickhouse-copier.md) as an alternative to the `INSERT SELECT` query.
|
||||
|
||||
The `ALTER` query blocks all reads and writes for the table. In other words, if a long `SELECT` is running at the time of the `ALTER` query, the `ALTER` query will wait for it to complete. At the same time, all new queries to the same table will wait while this `ALTER` is running.
|
||||
|
||||
|
@ -10,7 +10,7 @@ sidebar_label: DELETE
|
||||
ALTER TABLE [db.]table [ON CLUSTER cluster] DELETE WHERE filter_expr
|
||||
```
|
||||
|
||||
Deletes data matching the specified filtering expression. Implemented as a [mutation](/docs/en/sql-reference/statements/alter/index.md/#mutations).
|
||||
Deletes data matching the specified filtering expression. Implemented as a [mutation](/docs/en/sql-reference/statements/alter/index.md#mutations).
|
||||
|
||||
|
||||
:::note
|
||||
@ -25,6 +25,6 @@ The synchronicity of the query processing is defined by the [mutations_sync](/do
|
||||
|
||||
**See also**
|
||||
|
||||
- [Mutations](/docs/en/sql-reference/statements/alter/index.md/#mutations)
|
||||
- [Synchronicity of ALTER Queries](/docs/en/sql-reference/statements/alter/index.md/#synchronicity-of-alter-queries)
|
||||
- [Mutations](/docs/en/sql-reference/statements/alter/index.md#mutations)
|
||||
- [Synchronicity of ALTER Queries](/docs/en/sql-reference/statements/alter/index.md#synchronicity-of-alter-queries)
|
||||
- [mutations_sync](/docs/en/operations/settings/settings.md/#mutations_sync) setting
|
||||
|
@ -270,7 +270,7 @@ ALTER TABLE hits MOVE PARTITION '2019-09-01' TO DISK 'fast_ssd'
|
||||
|
||||
## UPDATE IN PARTITION
|
||||
|
||||
Manipulates data in the specifies partition matching the specified filtering expression. Implemented as a [mutation](/docs/en/sql-reference/statements/alter/index.md/#mutations).
|
||||
Manipulates data in the specifies partition matching the specified filtering expression. Implemented as a [mutation](/docs/en/sql-reference/statements/alter/index.md#mutations).
|
||||
|
||||
Syntax:
|
||||
|
||||
@ -290,7 +290,7 @@ ALTER TABLE mt UPDATE x = x + 1 IN PARTITION 2 WHERE p = 2;
|
||||
|
||||
## DELETE IN PARTITION
|
||||
|
||||
Deletes data in the specifies partition matching the specified filtering expression. Implemented as a [mutation](/docs/en/sql-reference/statements/alter/index.md/#mutations).
|
||||
Deletes data in the specifies partition matching the specified filtering expression. Implemented as a [mutation](/docs/en/sql-reference/statements/alter/index.md#mutations).
|
||||
|
||||
Syntax:
|
||||
|
||||
|
@ -2,9 +2,134 @@
|
||||
slug: /en/sql-reference/statements/alter/projection
|
||||
sidebar_position: 49
|
||||
sidebar_label: PROJECTION
|
||||
title: "Manipulating Projections"
|
||||
title: "Projections"
|
||||
---
|
||||
|
||||
Projections store data in a format that optimizes query execution, this feature is useful for:
|
||||
- Running queries on a column that is not a part of the primary key
|
||||
- Pre-aggregating columns, it will reduce both computation and IO
|
||||
|
||||
You can define one or more projections for a table, and during the query analysis the projection with the least data to scan will be selected by ClickHouse without modifying the query provided by the user.
|
||||
|
||||
## Example filtering without using primary keys
|
||||
|
||||
Creating the table:
|
||||
```
|
||||
CREATE TABLE visits_order
|
||||
(
|
||||
`user_id` UInt64,
|
||||
`user_name` String,
|
||||
`pages_visited` Nullable(Float64),
|
||||
`user_agent` String
|
||||
)
|
||||
ENGINE = MergeTree()
|
||||
PRIMARY KEY user_agent
|
||||
```
|
||||
Using `ALTER TABLE`, we could add the Projection to an existing table:
|
||||
```
|
||||
ALTER TABLE visits_order ADD PROJECTION user_name_projection (
|
||||
SELECT
|
||||
*
|
||||
ORDER BY user_name
|
||||
)
|
||||
|
||||
ALTER TABLE visits_order MATERIALIZE PROJECTION user_name_projection
|
||||
```
|
||||
Inserting the data:
|
||||
```
|
||||
INSERT INTO visits_order SELECT
|
||||
number,
|
||||
'test',
|
||||
1.5 * (number / 2),
|
||||
'Android'
|
||||
FROM numbers(1, 100);
|
||||
```
|
||||
|
||||
The Projection will allow us to filter by `user_name` fast even if in the original Table `user_name` was not defined as a `PRIMARY_KEY`.
|
||||
At query time ClickHouse determined that less data will be processed if the projection is used, as the data is ordered by `user_name`.
|
||||
```
|
||||
SELECT
|
||||
*
|
||||
FROM visits_order
|
||||
WHERE user_name='test'
|
||||
LIMIT 2
|
||||
```
|
||||
|
||||
To verify that a query is using the projection, we could review the `system.query_log` table. On the `projections` field we have the name of the projection used or empty if none has been used:
|
||||
```
|
||||
SELECT query, projections FROM system.query_log WHERE query_id='<query_id>'
|
||||
```
|
||||
|
||||
## Example pre-aggregation query
|
||||
|
||||
Creating the table with the Projection:
|
||||
```
|
||||
CREATE TABLE visits
|
||||
(
|
||||
`user_id` UInt64,
|
||||
`user_name` String,
|
||||
`pages_visited` Nullable(Float64),
|
||||
`user_agent` String,
|
||||
PROJECTION projection_visits_by_user
|
||||
(
|
||||
SELECT
|
||||
user_agent,
|
||||
sum(pages_visited)
|
||||
GROUP BY user_id, user_agent
|
||||
)
|
||||
)
|
||||
ENGINE = MergeTree()
|
||||
ORDER BY user_agent
|
||||
```
|
||||
Inserting the data:
|
||||
```
|
||||
INSERT INTO visits SELECT
|
||||
number,
|
||||
'test',
|
||||
1.5 * (number / 2),
|
||||
'Android'
|
||||
FROM numbers(1, 100);
|
||||
```
|
||||
```
|
||||
INSERT INTO visits SELECT
|
||||
number,
|
||||
'test',
|
||||
1. * (number / 2),
|
||||
'IOS'
|
||||
FROM numbers(100, 500);
|
||||
```
|
||||
We will execute a first query using `GROUP BY` using the field `user_agent`, this query will not use the projection defined as the pre-aggregation does not match.
|
||||
```
|
||||
SELECT
|
||||
user_agent,
|
||||
count(DISTINCT user_id)
|
||||
FROM visits
|
||||
GROUP BY user_agent
|
||||
```
|
||||
|
||||
To use the projection we could execute queries that select part of, or all of the pre-aggregation and `GROUP BY` fields.
|
||||
```
|
||||
SELECT
|
||||
user_agent
|
||||
FROM visits
|
||||
WHERE user_id > 50 AND user_id < 150
|
||||
GROUP BY user_agent
|
||||
```
|
||||
```
|
||||
SELECT
|
||||
user_agent,
|
||||
sum(pages_visited)
|
||||
FROM visits
|
||||
GROUP BY user_id
|
||||
```
|
||||
|
||||
As mentioned before, we could review the `system.query_log` table. On the `projections` field we have the name of the projection used or empty if none has been used:
|
||||
```
|
||||
SELECT query, projections FROM system.query_log WHERE query_id='<query_id>'
|
||||
```
|
||||
|
||||
# Manipulating Projections
|
||||
|
||||
The following operations with [projections](/docs/en/engines/table-engines/mergetree-family/mergetree.md/#projections) are available:
|
||||
|
||||
## ADD PROJECTION
|
||||
@ -13,15 +138,15 @@ The following operations with [projections](/docs/en/engines/table-engines/merge
|
||||
|
||||
## DROP PROJECTION
|
||||
|
||||
`ALTER TABLE [db].name DROP PROJECTION name` - Removes projection description from tables metadata and deletes projection files from disk. Implemented as a [mutation](/docs/en/sql-reference/statements/alter/index.md/#mutations).
|
||||
`ALTER TABLE [db].name DROP PROJECTION name` - Removes projection description from tables metadata and deletes projection files from disk. Implemented as a [mutation](/docs/en/sql-reference/statements/alter/index.md#mutations).
|
||||
|
||||
## MATERIALIZE PROJECTION
|
||||
|
||||
`ALTER TABLE [db.]table MATERIALIZE PROJECTION name IN PARTITION partition_name` - The query rebuilds the projection `name` in the partition `partition_name`. Implemented as a [mutation](/docs/en/sql-reference/statements/alter/index.md/#mutations).
|
||||
`ALTER TABLE [db.]table MATERIALIZE PROJECTION name IN PARTITION partition_name` - The query rebuilds the projection `name` in the partition `partition_name`. Implemented as a [mutation](/docs/en/sql-reference/statements/alter/index.md#mutations).
|
||||
|
||||
## CLEAR PROJECTION
|
||||
|
||||
`ALTER TABLE [db.]table CLEAR PROJECTION name IN PARTITION partition_name` - Deletes projection files from disk without removing description. Implemented as a [mutation](/docs/en/sql-reference/statements/alter/index.md/#mutations).
|
||||
`ALTER TABLE [db.]table CLEAR PROJECTION name IN PARTITION partition_name` - Deletes projection files from disk without removing description. Implemented as a [mutation](/docs/en/sql-reference/statements/alter/index.md#mutations).
|
||||
|
||||
|
||||
The commands `ADD`, `DROP` and `CLEAR` are lightweight in a sense that they only change metadata or remove files.
|
||||
|
@ -14,7 +14,7 @@ The following operations are available:
|
||||
|
||||
- `ALTER TABLE [db].table_name [ON CLUSTER cluster] DROP INDEX name` - Removes index description from tables metadata and deletes index files from disk.
|
||||
|
||||
- `ALTER TABLE [db.]table_name [ON CLUSTER cluster] MATERIALIZE INDEX name [IN PARTITION partition_name]` - Rebuilds the secondary index `name` for the specified `partition_name`. Implemented as a [mutation](/docs/en/sql-reference/statements/alter/index.md/#mutations). If `IN PARTITION` part is omitted then it rebuilds the index for the whole table data.
|
||||
- `ALTER TABLE [db.]table_name [ON CLUSTER cluster] MATERIALIZE INDEX name [IN PARTITION partition_name]` - Rebuilds the secondary index `name` for the specified `partition_name`. Implemented as a [mutation](/docs/en/sql-reference/statements/alter/index.md#mutations). If `IN PARTITION` part is omitted then it rebuilds the index for the whole table data.
|
||||
|
||||
The first two commands are lightweight in a sense that they only change metadata or remove files.
|
||||
|
||||
|
@ -10,7 +10,7 @@ sidebar_label: UPDATE
|
||||
ALTER TABLE [db.]table [ON CLUSTER cluster] UPDATE column1 = expr1 [, ...] WHERE filter_expr
|
||||
```
|
||||
|
||||
Manipulates data matching the specified filtering expression. Implemented as a [mutation](/docs/en/sql-reference/statements/alter/index.md/#mutations).
|
||||
Manipulates data matching the specified filtering expression. Implemented as a [mutation](/docs/en/sql-reference/statements/alter/index.md#mutations).
|
||||
|
||||
:::note
|
||||
The `ALTER TABLE` prefix makes this syntax different from most other systems supporting SQL. It is intended to signify that unlike similar queries in OLTP databases this is a heavy operation not designed for frequent use.
|
||||
@ -24,7 +24,7 @@ The synchronicity of the query processing is defined by the [mutations_sync](/do
|
||||
|
||||
**See also**
|
||||
|
||||
- [Mutations](/docs/en/sql-reference/statements/alter/index.md/#mutations)
|
||||
- [Synchronicity of ALTER Queries](/docs/en/sql-reference/statements/alter/index.md/#synchronicity-of-alter-queries)
|
||||
- [Mutations](/docs/en/sql-reference/statements/alter/index.md#mutations)
|
||||
- [Synchronicity of ALTER Queries](/docs/en/sql-reference/statements/alter/index.md#synchronicity-of-alter-queries)
|
||||
- [mutations_sync](/docs/en/operations/settings/settings.md/#mutations_sync) setting
|
||||
|
||||
|
@ -5,9 +5,9 @@ sidebar_label: DICTIONARY
|
||||
title: "CREATE DICTIONARY"
|
||||
---
|
||||
|
||||
Creates a new [external dictionary](../../../sql-reference/dictionaries/external-dictionaries/external-dicts.md) with given [structure](../../../sql-reference/dictionaries/external-dictionaries/external-dicts-dict-structure.md), [source](../../../sql-reference/dictionaries/external-dictionaries/external-dicts-dict-sources.md), [layout](../../../sql-reference/dictionaries/external-dictionaries/external-dicts-dict-layout.md) and [lifetime](../../../sql-reference/dictionaries/external-dictionaries/external-dicts-dict-lifetime.md).
|
||||
Creates a new [dictionary](../../../sql-reference/dictionaries/external-dictionaries/external-dicts.md) with given [structure](../../../sql-reference/dictionaries/external-dictionaries/external-dicts-dict-structure.md), [source](../../../sql-reference/dictionaries/external-dictionaries/external-dicts-dict-sources.md), [layout](../../../sql-reference/dictionaries/external-dictionaries/external-dicts-dict-layout.md) and [lifetime](../../../sql-reference/dictionaries/external-dictionaries/external-dicts-dict-lifetime.md).
|
||||
|
||||
**Syntax**
|
||||
## Syntax
|
||||
|
||||
``` sql
|
||||
CREATE [OR REPLACE] DICTIONARY [IF NOT EXISTS] [db.]dictionary_name [ON CLUSTER cluster]
|
||||
@ -25,17 +25,21 @@ SETTINGS(setting_name = setting_value, setting_name = setting_value, ...)
|
||||
COMMENT 'Comment'
|
||||
```
|
||||
|
||||
External dictionary structure consists of attributes. Dictionary attributes are specified similarly to table columns. The only required attribute property is its type, all other properties may have default values.
|
||||
The dictionary structure consists of attributes. Dictionary attributes are specified similarly to table columns. The only required attribute property is its type, all other properties may have default values.
|
||||
|
||||
`ON CLUSTER` clause allows creating dictionary on a cluster, see [Distributed DDL](../../../sql-reference/distributed-ddl.md).
|
||||
|
||||
Depending on dictionary [layout](../../../sql-reference/dictionaries/external-dictionaries/external-dicts-dict-layout.md) one or more attributes can be specified as dictionary keys.
|
||||
|
||||
For more information, see [External Dictionaries](../../../sql-reference/dictionaries/external-dictionaries/external-dicts.md) section.
|
||||
## SOURCE
|
||||
|
||||
You can add a comment to the dictionary when you creating it using `COMMENT` clause.
|
||||
The source for a dictionary can be a:
|
||||
- table in the current ClickHouse service
|
||||
- table in a remote ClickHouse service
|
||||
- file available by HTTP(S)
|
||||
- another database
|
||||
|
||||
**Example**
|
||||
### Create a dictionary from a table in the current ClickHouse service
|
||||
|
||||
Input table `source_table`:
|
||||
|
||||
@ -49,51 +53,81 @@ Input table `source_table`:
|
||||
Creating the dictionary:
|
||||
|
||||
``` sql
|
||||
CREATE DICTIONARY dictionary_with_comment
|
||||
CREATE DICTIONARY id_value_dictionary
|
||||
(
|
||||
id UInt64,
|
||||
value String
|
||||
)
|
||||
PRIMARY KEY id
|
||||
SOURCE(CLICKHOUSE(HOST 'localhost' PORT tcpPort() TABLE 'source_table'))
|
||||
SOURCE(CLICKHOUSE(TABLE 'source_table'))
|
||||
LAYOUT(FLAT())
|
||||
LIFETIME(MIN 0 MAX 1000)
|
||||
COMMENT 'The temporary dictionary';
|
||||
```
|
||||
|
||||
Output the dictionary:
|
||||
|
||||
``` sql
|
||||
SHOW CREATE DICTIONARY dictionary_with_comment;
|
||||
SHOW CREATE DICTIONARY id_value_dictionary;
|
||||
```
|
||||
|
||||
```text
|
||||
┌─statement───────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────┐
|
||||
│ CREATE DICTIONARY default.dictionary_with_comment
|
||||
```response
|
||||
CREATE DICTIONARY default.id_value_dictionary
|
||||
(
|
||||
`id` UInt64,
|
||||
`value` String
|
||||
)
|
||||
PRIMARY KEY id
|
||||
SOURCE(CLICKHOUSE(HOST 'localhost' PORT tcpPort() TABLE 'source_table'))
|
||||
SOURCE(CLICKHOUSE(TABLE 'source_table'))
|
||||
LIFETIME(MIN 0 MAX 1000)
|
||||
LAYOUT(FLAT())
|
||||
COMMENT 'The temporary dictionary' │
|
||||
└─────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────┘
|
||||
```
|
||||
|
||||
Output the comment to dictionary:
|
||||
### Create a dictionary from a table in a remote ClickHouse service
|
||||
|
||||
Input table (in the remote ClickHouse service) `source_table`:
|
||||
|
||||
``` text
|
||||
┌─id─┬─value──┐
|
||||
│ 1 │ First │
|
||||
│ 2 │ Second │
|
||||
└────┴────────┘
|
||||
```
|
||||
|
||||
Creating the dictionary:
|
||||
|
||||
``` sql
|
||||
SELECT comment FROM system.dictionaries WHERE name == 'dictionary_with_comment' AND database == currentDatabase();
|
||||
CREATE DICTIONARY id_value_dictionary
|
||||
(
|
||||
id UInt64,
|
||||
value String
|
||||
)
|
||||
PRIMARY KEY id
|
||||
SOURCE(CLICKHOUSE(HOST 'HOSTNAME' PORT 9000 USER 'default' PASSWORD 'PASSWORD' TABLE 'source_table' DB 'default'))
|
||||
LAYOUT(FLAT())
|
||||
LIFETIME(MIN 0 MAX 1000)
|
||||
```
|
||||
|
||||
```text
|
||||
┌─comment──────────────────┐
|
||||
│ The temporary dictionary │
|
||||
└──────────────────────────┘
|
||||
### Create a dictionary from a file available by HTTP(S)
|
||||
|
||||
```sql
|
||||
statement: CREATE DICTIONARY default.taxi_zone_dictionary
|
||||
(
|
||||
`LocationID` UInt16 DEFAULT 0,
|
||||
`Borough` String,
|
||||
`Zone` String,
|
||||
`service_zone` String
|
||||
)
|
||||
PRIMARY KEY LocationID
|
||||
SOURCE(HTTP(URL 'https://datasets-documentation.s3.eu-west-3.amazonaws.com/nyc-taxi/taxi_zone_lookup.csv' FORMAT 'CSVWithNames'))
|
||||
LIFETIME(MIN 0 MAX 0)
|
||||
LAYOUT(HASHED())
|
||||
```
|
||||
|
||||
### Create a dictionary from another database
|
||||
|
||||
Please see the details in [Dictionary sources](/docs/en/sql-reference/dictionaries/external-dictionaries/external-dicts-dict-sources.md/#dbms).
|
||||
|
||||
**See Also**
|
||||
|
||||
- [system.dictionaries](../../../operations/system-tables/dictionaries.md) — This table contains information about [external dictionaries](../../../sql-reference/dictionaries/external-dictionaries/external-dicts.md).
|
||||
- For more information, see the [Dictionaries](../../../sql-reference/dictionaries/external-dictionaries/external-dicts.md) section.
|
||||
- [system.dictionaries](../../../operations/system-tables/dictionaries.md) — This table contains information about [Dictionaries](../../../sql-reference/dictionaries/external-dictionaries/external-dicts.md).
|
||||
|
@ -282,7 +282,7 @@ Each time a query is run with the same `JOIN`, the subquery is run again because
|
||||
|
||||
In some cases, it is more efficient to use [IN](../../../sql-reference/operators/in.md) instead of `JOIN`.
|
||||
|
||||
If you need a `JOIN` for joining with dimension tables (these are relatively small tables that contain dimension properties, such as names for advertising campaigns), a `JOIN` might not be very convenient due to the fact that the right table is re-accessed for every query. For such cases, there is an “external dictionaries” feature that you should use instead of `JOIN`. For more information, see the [External dictionaries](../../../sql-reference/dictionaries/external-dictionaries/external-dicts.md) section.
|
||||
If you need a `JOIN` for joining with dimension tables (these are relatively small tables that contain dimension properties, such as names for advertising campaigns), a `JOIN` might not be very convenient due to the fact that the right table is re-accessed for every query. For such cases, there is a “dictionaries” feature that you should use instead of `JOIN`. For more information, see the [Dictionaries](../../../sql-reference/dictionaries/external-dictionaries/external-dicts.md) section.
|
||||
|
||||
### Memory Limitations
|
||||
|
||||
|
@ -41,7 +41,7 @@ Purge default roles from a user:
|
||||
SET DEFAULT ROLE NONE TO user
|
||||
```
|
||||
|
||||
Set all the granted roles as default excepting some of them:
|
||||
Set all the granted roles as default except for specific roles `role1` and `role2`:
|
||||
|
||||
``` sql
|
||||
SET DEFAULT ROLE ALL EXCEPT role1, role2 TO user
|
||||
|
@ -198,7 +198,7 @@ Result:
|
||||
|
||||
## SHOW DICTIONARIES
|
||||
|
||||
Displays a list of [external dictionaries](../../sql-reference/dictionaries/external-dictionaries/external-dicts.md).
|
||||
Displays a list of [Dictionaries](../../sql-reference/dictionaries/external-dictionaries/external-dicts.md).
|
||||
|
||||
``` sql
|
||||
SHOW DICTIONARIES [FROM <db>] [LIKE '<pattern>'] [LIMIT <N>] [INTO OUTFILE <filename>] [FORMAT <format>]
|
||||
|
@ -110,5 +110,5 @@ SELECT * FROM mysql('localhost:3306', 'test', 'test', 'bayonet', '123');
|
||||
**See Also**
|
||||
|
||||
- [The ‘MySQL’ table engine](../../engines/table-engines/integrations/mysql.md)
|
||||
- [Using MySQL as a source of external dictionary](../../sql-reference/dictionaries/external-dictionaries/external-dicts-dict-sources.md#dicts-external_dicts_dict_sources-mysql)
|
||||
- [Using MySQL as a dictionary source](../../sql-reference/dictionaries/external-dictionaries/external-dicts-dict-sources.md#dicts-external_dicts_dict_sources-mysql)
|
||||
|
||||
|
@ -101,5 +101,5 @@ SELECT * FROM odbc('DSN=mysqlconn', 'test', 'test')
|
||||
|
||||
## See Also
|
||||
|
||||
- [ODBC external dictionaries](../../sql-reference/dictionaries/external-dictionaries/external-dicts-dict-sources.md#dicts-external_dicts_dict_sources-odbc)
|
||||
- [ODBC dictionaries](../../sql-reference/dictionaries/external-dictionaries/external-dicts-dict-sources.md#dicts-external_dicts_dict_sources-odbc)
|
||||
- [ODBC table engine](../../engines/table-engines/integrations/odbc.md).
|
||||
|
@ -130,6 +130,6 @@ CREATE TABLE pg_table_schema_with_dots (a UInt32)
|
||||
**See Also**
|
||||
|
||||
- [The PostgreSQL table engine](../../engines/table-engines/integrations/postgresql.md)
|
||||
- [Using PostgreSQL as a source of external dictionary](../../sql-reference/dictionaries/external-dictionaries/external-dicts-dict-sources.md#dicts-external_dicts_dict_sources-postgresql)
|
||||
- [Using PostgreSQL as a dictionary source](../../sql-reference/dictionaries/external-dictionaries/external-dicts-dict-sources.md#dicts-external_dicts_dict_sources-postgresql)
|
||||
|
||||
[Original article](https://clickhouse.com/docs/en/sql-reference/table-functions/postgresql/) <!--hide-->
|
||||
|
@ -24,7 +24,7 @@ slug: /ru/operations/settings/
|
||||
|
||||
- При запуске консольного клиента ClickHouse в не интерактивном режиме установите параметр запуска `--setting=value`.
|
||||
- При использовании HTTP API передавайте cgi-параметры (`URL?setting_1=value&setting_2=value...`).
|
||||
- Укажите необходимые настройки в секции [SETTINGS](../../sql-reference/statements/select/index.md#settings-in-select) запроса SELECT. Эти настройки действуют только в рамках данного запроса, а после его выполнения сбрасываются до предыдущего значения или значения по умолчанию.
|
||||
- Укажите необходимые настройки в секции [SETTINGS](../../sql-reference/statements/select/index.md#settings-in-select-query) запроса SELECT. Эти настройки действуют только в рамках данного запроса, а после его выполнения сбрасываются до предыдущего значения или значения по умолчанию.
|
||||
|
||||
Настройки, которые можно задать только в конфигурационном файле сервера, в разделе не рассматриваются.
|
||||
|
||||
|
@ -479,7 +479,7 @@ SELECT * FROM table_with_enum_column_for_tsv_insert;
|
||||
Включает или отключает вставку [значений по умолчанию](../../sql-reference/statements/create/table.md#create-default-values) вместо [NULL](../../sql-reference/syntax.md#null-literal) в столбцы, которые не позволяют [хранить NULL](../../sql-reference/data-types/nullable.md#data_type-nullable).
|
||||
Если столбец не позволяет хранить `NULL` и эта настройка отключена, то вставка `NULL` приведет к возникновению исключения. Если столбец позволяет хранить `NULL`, то значения `NULL` вставляются независимо от этой настройки.
|
||||
|
||||
Эта настройка используется для запросов [INSERT ... SELECT](../../sql-reference/statements/insert-into.md#insert_query_insert-select). При этом подзапросы `SELECT` могут объединяться с помощью `UNION ALL`.
|
||||
Эта настройка используется для запросов [INSERT ... SELECT](../../sql-reference/statements/insert-into.md#inserting-the-results-of-select). При этом подзапросы `SELECT` могут объединяться с помощью `UNION ALL`.
|
||||
|
||||
Возможные значения:
|
||||
|
||||
|
@ -7,8 +7,8 @@ slug: /ru/operations/system-tables/crash-log
|
||||
|
||||
Колонки:
|
||||
|
||||
- `event_date` ([Datetime](../../sql-reference/data-types/datetime.md)) — Дата события.
|
||||
- `event_time` ([Datetime](../../sql-reference/data-types/datetime.md)) — Время события.
|
||||
- `event_date` ([DateTime](../../sql-reference/data-types/datetime.md)) — Дата события.
|
||||
- `event_time` ([DateTime](../../sql-reference/data-types/datetime.md)) — Время события.
|
||||
- `timestamp_ns` ([UInt64](../../sql-reference/data-types/int-uint.md)) — Время события с наносекундами.
|
||||
- `signal` ([Int32](../../sql-reference/data-types/int-uint.md)) — Номер сигнала, пришедшего в поток.
|
||||
- `thread_id` ([UInt64](../../sql-reference/data-types/int-uint.md)) — Идентификатор треда.
|
||||
|
@ -15,7 +15,7 @@ slug: /ru/operations/system-tables/mutations
|
||||
|
||||
- `command` ([String](../../sql-reference/data-types/string.md)) — команда мутации (часть запроса после `ALTER TABLE [db.]table`).
|
||||
|
||||
- `create_time` ([Datetime](../../sql-reference/data-types/datetime.md)) — дата и время создания мутации.
|
||||
- `create_time` ([DateTime](../../sql-reference/data-types/datetime.md)) — дата и время создания мутации.
|
||||
|
||||
- `block_numbers.partition_id` ([Array](../../sql-reference/data-types/array.md)([String](../../sql-reference/data-types/string.md))) — Для мутаций реплицированных таблиц массив содержит содержит номера партиций (по одной записи для каждой партиции). Для мутаций нереплицированных таблиц массив пустой.
|
||||
|
||||
@ -39,7 +39,7 @@ slug: /ru/operations/system-tables/mutations
|
||||
|
||||
- `latest_failed_part` ([String](../../sql-reference/data-types/string.md)) — имя последнего куска, мутация которого не удалась.
|
||||
|
||||
- `latest_fail_time` ([Datetime](../../sql-reference/data-types/datetime.md)) — дата и время последней ошибки мутации.
|
||||
- `latest_fail_time` ([DateTime](../../sql-reference/data-types/datetime.md)) — дата и время последней ошибки мутации.
|
||||
|
||||
- `latest_fail_reason` ([String](../../sql-reference/data-types/string.md)) — причина последней ошибки мутации.
|
||||
|
||||
|
@ -29,7 +29,7 @@ slug: /ru/operations/system-tables/replication_queue
|
||||
- `MUTATE_PART` — применить одну или несколько мутаций к куску.
|
||||
- `ALTER_METADATA` — применить изменения структуры таблицы в результате запросов с выражением `ALTER`.
|
||||
|
||||
- `create_time` ([Datetime](../../sql-reference/data-types/datetime.md)) — дата и время отправки задачи на выполнение.
|
||||
- `create_time` ([DateTime](../../sql-reference/data-types/datetime.md)) — дата и время отправки задачи на выполнение.
|
||||
|
||||
- `required_quorum` ([UInt32](../../sql-reference/data-types/int-uint.md)) — количество реплик, ожидающих завершения задачи, с подтверждением о завершении. Этот столбец актуален только для задачи `GET_PARTS`.
|
||||
|
||||
@ -47,13 +47,13 @@ slug: /ru/operations/system-tables/replication_queue
|
||||
|
||||
- `last_exception` ([String](../../sql-reference/data-types/string.md)) — текст сообщения о последней возникшей ошибке, если таковые имеются.
|
||||
|
||||
- `last_attempt_time` ([Datetime](../../sql-reference/data-types/datetime.md)) — дата и время последней попытки выполнить задачу.
|
||||
- `last_attempt_time` ([DateTime](../../sql-reference/data-types/datetime.md)) — дата и время последней попытки выполнить задачу.
|
||||
|
||||
- `num_postponed` ([UInt32](../../sql-reference/data-types/int-uint.md)) — количество отложенных задач.
|
||||
|
||||
- `postpone_reason` ([String](../../sql-reference/data-types/string.md)) — причина, по которой была отложена задача.
|
||||
|
||||
- `last_postpone_time` ([Datetime](../../sql-reference/data-types/datetime.md)) — дата и время, когда была отложена задача в последний раз.
|
||||
- `last_postpone_time` ([DateTime](../../sql-reference/data-types/datetime.md)) — дата и время, когда была отложена задача в последний раз.
|
||||
|
||||
- `merge_type` ([String](../../sql-reference/data-types/string.md)) — тип текущего слияния. Пусто, если это мутация.
|
||||
|
||||
|
@ -6,7 +6,7 @@ sidebar_label: Date32
|
||||
|
||||
# Date32 {#data_type-datetime32}
|
||||
|
||||
Дата. Поддерживается такой же диапазон дат, как для типа [Datetime64](../../sql-reference/data-types/datetime64.md). Значение хранится в четырех байтах и соответствует числу дней с 1900-01-01 по 2299-12-31.
|
||||
Дата. Поддерживается такой же диапазон дат, как для типа [DateTime64](../../sql-reference/data-types/datetime64.md). Значение хранится в четырех байтах и соответствует числу дней с 1900-01-01 по 2299-12-31.
|
||||
|
||||
**Пример**
|
||||
|
||||
|
@ -602,7 +602,7 @@ date_trunc(unit, value[, timezone])
|
||||
|
||||
- Дата и время, отсеченные до указанной части.
|
||||
|
||||
Тип: [Datetime](../../sql-reference/data-types/datetime.md).
|
||||
Тип: [DateTime](../../sql-reference/data-types/datetime.md).
|
||||
|
||||
**Примеры**
|
||||
|
||||
@ -913,7 +913,7 @@ now([timezone])
|
||||
|
||||
- Текущие дата и время.
|
||||
|
||||
Тип: [Datetime](../../sql-reference/data-types/datetime.md).
|
||||
Тип: [DateTime](../../sql-reference/data-types/datetime.md).
|
||||
|
||||
**Пример**
|
||||
|
||||
|
@ -254,7 +254,7 @@ SELECT groupArray(x), groupArray(s) FROM tmp;
|
||||
|
||||
Отсутствует возможность удалять столбцы, входящие в первичный ключ или ключ для сэмплирования (в общем, входящие в выражение `ENGINE`). Изменение типа у столбцов, входящих в первичный ключ возможно только в том случае, если это изменение не приводит к изменению данных (например, разрешено добавление значения в Enum или изменение типа с `DateTime` на `UInt32`).
|
||||
|
||||
Если возможностей запроса `ALTER` не хватает для нужного изменения таблицы, вы можете создать новую таблицу, скопировать туда данные с помощью запроса [INSERT SELECT](../insert-into.md#insert_query_insert-select), затем поменять таблицы местами с помощью запроса [RENAME](../rename.md#rename-table), и удалить старую таблицу. В качестве альтернативы для запроса `INSERT SELECT`, можно использовать инструмент [clickhouse-copier](../../../sql-reference/statements/alter/index.md).
|
||||
Если возможностей запроса `ALTER` не хватает для нужного изменения таблицы, вы можете создать новую таблицу, скопировать туда данные с помощью запроса [INSERT SELECT](../insert-into.md#inserting-the-results-of-select), затем поменять таблицы местами с помощью запроса [RENAME](../rename.md#rename-table), и удалить старую таблицу. В качестве альтернативы для запроса `INSERT SELECT`, можно использовать инструмент [clickhouse-copier](../../../sql-reference/statements/alter/index.md).
|
||||
|
||||
Запрос `ALTER` блокирует все чтения и записи для таблицы. То есть если на момент запроса `ALTER` выполнялся долгий `SELECT`, то запрос `ALTER` сначала дождётся его выполнения. И в это время все новые запросы к той же таблице будут ждать, пока завершится этот `ALTER`.
|
||||
|
||||
|
@ -95,7 +95,7 @@ INSERT INTO t FORMAT TabSeparated
|
||||
|
||||
Если в таблице объявлены [ограничения](../../sql-reference/statements/create/table.md#constraints), то их выполнимость будет проверена для каждой вставляемой строки. Если для хотя бы одной строки ограничения не будут выполнены, запрос будет остановлен.
|
||||
|
||||
### Вставка результатов `SELECT` {#insert_query_insert-select}
|
||||
### Вставка результатов `SELECT` {#inserting-the-results-of-select}
|
||||
|
||||
**Синтаксис**
|
||||
|
||||
|
@ -270,7 +270,7 @@ SELECT * REPLACE(i + 1 AS i) EXCEPT (j) APPLY(sum) from columns_transformers;
|
||||
└─────────────────┴────────┘
|
||||
```
|
||||
|
||||
## SETTINGS в запросе SELECT {#settings-in-select}
|
||||
## SETTINGS в запросе SELECT {#settings-in-select-query}
|
||||
|
||||
Вы можете задать значения необходимых настроек непосредственно в запросе `SELECT` в секции `SETTINGS`. Эти настройки действуют только в рамках данного запроса, а после его выполнения сбрасываются до предыдущего значения или значения по умолчанию.
|
||||
|
||||
|
@ -67,7 +67,7 @@ ClickHouse提供各种各样在允许牺牲数据精度的情况下对查询进
|
||||
2. 基于数据的部分样本进行近似查询。这时,仅会从磁盘检索少部分比例的数据。
|
||||
3. 不使用全部的聚合条件,通过随机选择有限个数据聚合条件进行聚合。这在数据聚合条件满足某些分布条件下,在提供相当准确的聚合结果的同时降低了计算资源的使用。
|
||||
|
||||
## Adaptive Join Algorithm {#adaptive-join-algorithm}
|
||||
## 自适应连接算法 {#adaptive-join-algorithm}
|
||||
|
||||
ClickHouse支持自定义[JOIN](../sql-reference/statements/select/join.md)多个表,它更倾向于散列连接算法,如果有多个大表,则使用合并-连接算法
|
||||
|
||||
|
@ -7,8 +7,8 @@ slug: /zh/operations/system-tables/crash-log
|
||||
|
||||
列信息:
|
||||
|
||||
- `event_date` ([Datetime](../../sql-reference/data-types/datetime.md)) — 事件日期.
|
||||
- `event_time` ([Datetime](../../sql-reference/data-types/datetime.md)) — 事件时间.
|
||||
- `event_date` ([DateTime](../../sql-reference/data-types/datetime.md)) — 事件日期.
|
||||
- `event_time` ([DateTime](../../sql-reference/data-types/datetime.md)) — 事件时间.
|
||||
- `timestamp_ns` ([UInt64](../../sql-reference/data-types/int-uint.md)) — 以纳秒为单位的事件时间戳.
|
||||
- `signal` ([Int32](../../sql-reference/data-types/int-uint.md)) — 信号编号.
|
||||
- `thread_id` ([UInt64](../../sql-reference/data-types/int-uint.md)) — 线程ID.
|
||||
|
@ -15,7 +15,7 @@ slug: /zh/operations/system-tables/mutations
|
||||
|
||||
- `command` ([String](../../sql-reference/data-types/string.md)) — mutation命令字符串(`ALTER TABLE [db.]table`语句之后的部分)。
|
||||
|
||||
- `create_time` ([Datetime](../../sql-reference/data-types/datetime.md)) — mutation命令提交执行的日期和时间。
|
||||
- `create_time` ([DateTime](../../sql-reference/data-types/datetime.md)) — mutation命令提交执行的日期和时间。
|
||||
|
||||
- `block_numbers.partition_id` ([Array](../../sql-reference/data-types/array.md)([String](../../sql-reference/data-types/string.md))) — 对于复制表的mutation,该数组包含分区的ID(每个分区都有一条记录)。对于非复制表的mutation,该数组为空。
|
||||
|
||||
@ -39,7 +39,7 @@ slug: /zh/operations/system-tables/mutations
|
||||
|
||||
- `latest_failed_part`([String](../../sql-reference/data-types/string.md)) — 最近不能mutation的part的名称。
|
||||
|
||||
- `latest_fail_time`([Datetime](../../sql-reference/data-types/datetime.md)) — 最近的一个mutation失败的时间。
|
||||
- `latest_fail_time`([DateTime](../../sql-reference/data-types/datetime.md)) — 最近的一个mutation失败的时间。
|
||||
|
||||
- `latest_fail_reason`([String](../../sql-reference/data-types/string.md)) — 导致最近part的mutation失败的异常消息。
|
||||
|
||||
|
@ -29,7 +29,7 @@ slug: /zh/operations/system-tables/replication_queue
|
||||
- `MUTATE_PART` — 对分片应用一个或多个突变.
|
||||
- `ALTER_METADATA` — 根据全局 /metadata 和 /columns 路径应用alter修改.
|
||||
|
||||
- `create_time` ([Datetime](../../sql-reference/data-types/datetime.md)) — 提交任务执行的日期和时间.
|
||||
- `create_time` ([DateTime](../../sql-reference/data-types/datetime.md)) — 提交任务执行的日期和时间.
|
||||
|
||||
- `required_quorum` ([UInt32](../../sql-reference/data-types/int-uint.md)) — 等待任务完成并确认完成的副本数. 此列仅与 `GET_PARTS` 任务相关.
|
||||
|
||||
@ -47,13 +47,13 @@ slug: /zh/operations/system-tables/replication_queue
|
||||
|
||||
- `last_exception` ([String](../../sql-reference/data-types/string.md)) — 发生的最后一个错误的短信(如果有).
|
||||
|
||||
- `last_attempt_time` ([Datetime](../../sql-reference/data-types/datetime.md)) — 上次尝试任务的日期和时间.
|
||||
- `last_attempt_time` ([DateTime](../../sql-reference/data-types/datetime.md)) — 上次尝试任务的日期和时间.
|
||||
|
||||
- `num_postponed` ([UInt32](../../sql-reference/data-types/int-uint.md)) — 延期任务数.
|
||||
|
||||
- `postpone_reason` ([String](../../sql-reference/data-types/string.md)) — 任务延期的原因.
|
||||
|
||||
- `last_postpone_time` ([Datetime](../../sql-reference/data-types/datetime.md)) — 上次推迟任务的日期和时间.
|
||||
- `last_postpone_time` ([DateTime](../../sql-reference/data-types/datetime.md)) — 上次推迟任务的日期和时间.
|
||||
|
||||
- `merge_type` ([String](../../sql-reference/data-types/string.md)) — 当前合并的类型. 如果是突变则为空.
|
||||
|
||||
|
@ -152,7 +152,7 @@ sidebar_label: "ANSI\u517C\u5BB9\u6027"
|
||||
| F051-02 | TIME(时间)数据类型(并支持用于表达时间的字面量),小数秒精度至少为0 | 否 {.text-danger} | |
|
||||
| F051-03 | 时间戳数据类型(并支持用于表达时间戳的字面量),小数秒精度至少为0和6 | 是 {.text-danger} | |
|
||||
| F051-04 | 日期、时间和时间戳数据类型的比较谓词 | 是 {.text-success} | |
|
||||
| F051-05 | Datetime 类型和字符串形式表达的时间之间的显式转换 | 是 {.text-success} | |
|
||||
| F051-05 | DateTime 类型和字符串形式表达的时间之间的显式转换 | 是 {.text-success} | |
|
||||
| F051-06 | CURRENT_DATE | 否 {.text-danger} | 使用`today()`替代 |
|
||||
| F051-07 | LOCALTIME | 否 {.text-danger} | 使用`now()`替代 |
|
||||
| F051-08 | LOCALTIMESTAMP | 否 {.text-danger} | |
|
||||
|
@ -6,7 +6,7 @@ sidebar_position: 49
|
||||
sidebar_label: DateTime64
|
||||
---
|
||||
|
||||
# Datetime64 {#data_type-datetime64}
|
||||
# DateTime64 {#data_type-datetime64}
|
||||
|
||||
此类型允许以日期(date)加时间(time)的形式来存储一个时刻的时间值,具有定义的亚秒精度
|
||||
|
||||
|
@ -539,7 +539,7 @@ date_trunc(unit, value[, timezone])
|
||||
|
||||
- 按指定的单位向前取整后的DateTime。
|
||||
|
||||
类型: [Datetime](../../sql-reference/data-types/datetime.md).
|
||||
类型: [DateTime](../../sql-reference/data-types/datetime.md).
|
||||
|
||||
**示例**
|
||||
|
||||
@ -850,7 +850,7 @@ now([timezone])
|
||||
|
||||
- 当前日期和时间。
|
||||
|
||||
类型: [Datetime](../../sql-reference/data-types/datetime.md).
|
||||
类型: [DateTime](../../sql-reference/data-types/datetime.md).
|
||||
|
||||
**示例**
|
||||
|
||||
|
@ -181,7 +181,7 @@ unhex(arg)
|
||||
|
||||
**参数**
|
||||
|
||||
- `arg` — 包含任意数量的十六进制数字的字符串。类型为:[String](../../sql-reference/data-types/string.md)。
|
||||
- `arg` — 包含任意数量的十六进制数字的字符串。类型为:[String](../../sql-reference/data-types/string.md),[FixedString](../../sql-reference/data-types/fixedstring.md)。
|
||||
|
||||
支持大写和小写字母A-F。十六进制数字的数量不必是偶数。如果是奇数,则最后一位数被解释为00-0F字节的低位。如果参数字符串包含除十六进制数字以外的任何内容,则返回一些实现定义的结果(不抛出异常)。对于数字参数, unhex()不执行 hex(N) 的倒数。
|
||||
|
||||
|
@ -150,7 +150,7 @@ ALTER TABLE visits MODIFY COLUMN browser Array(String)
|
||||
|
||||
不支持对primary key或者sampling key中的列(在 `ENGINE` 表达式中用到的列)进行删除操作。改变包含在primary key中的列的类型时,如果操作不会导致数据的变化(例如,往Enum中添加一个值,或者将`DateTime` 类型改成 `UInt32`),那么这种操作是可行的。
|
||||
|
||||
如果 `ALTER` 操作不足以完成你想要的表变动操作,你可以创建一张新的表,通过 [INSERT SELECT](../../sql-reference/statements/insert-into.md#insert_query_insert-select)将数据拷贝进去,然后通过 [RENAME](../../sql-reference/statements/misc.md#misc_operations-rename)将新的表改成和原有表一样的名称,并删除原有的表。你可以使用 [clickhouse-copier](../../operations/utilities/clickhouse-copier.md) 代替 `INSERT SELECT`。
|
||||
如果 `ALTER` 操作不足以完成你想要的表变动操作,你可以创建一张新的表,通过 [INSERT SELECT](../../sql-reference/statements/insert-into.md#inserting-the-results-of-select)将数据拷贝进去,然后通过 [RENAME](../../sql-reference/statements/misc.md#misc_operations-rename)将新的表改成和原有表一样的名称,并删除原有的表。你可以使用 [clickhouse-copier](../../operations/utilities/clickhouse-copier.md) 代替 `INSERT SELECT`。
|
||||
|
||||
`ALTER` 操作会阻塞对表的所有读写操作。换句话说,当一个大的 `SELECT` 语句和 `ALTER`同时执行时,`ALTER`会等待,直到 `SELECT` 执行结束。与此同时,当 `ALTER` 运行时,新的 sql 语句将会等待。
|
||||
|
||||
|
@ -90,7 +90,7 @@ INSERT INTO t FORMAT TabSeparated
|
||||
|
||||
如果表中有一些[限制](../../sql-reference/statements/create/table.mdx#constraints),,数据插入时会逐行进行数据校验,如果这里面包含了不符合限制条件的数据,服务将会抛出包含限制信息的异常,这个语句也会被停止执行。
|
||||
|
||||
### 使用`SELECT`的结果写入 {#insert_query_insert-select}
|
||||
### 使用`SELECT`的结果写入 {#inserting-the-results-of-select}
|
||||
|
||||
``` sql
|
||||
INSERT INTO [db.]table [(c1, c2, c3)] SELECT ...
|
||||
|
@ -243,6 +243,7 @@ try
|
||||
registerAggregateFunctions();
|
||||
|
||||
processConfig();
|
||||
initTtyBuffer(toProgressOption(config().getString("progress", "default")));
|
||||
|
||||
/// Includes delayed_interactive.
|
||||
if (is_interactive)
|
||||
@ -1088,8 +1089,6 @@ void Client::processConfig()
|
||||
}
|
||||
else
|
||||
{
|
||||
std::string progress = config().getString("progress", "tty");
|
||||
need_render_progress = (Poco::icompare(progress, "off") && Poco::icompare(progress, "no") && Poco::icompare(progress, "false") && Poco::icompare(progress, "0"));
|
||||
echo_queries = config().getBool("echo", false);
|
||||
ignore_error = config().getBool("ignore-error", false);
|
||||
|
||||
|
@ -351,7 +351,7 @@ struct LineChange
|
||||
++pos;
|
||||
}
|
||||
|
||||
indent = std::max(255U, num_spaces);
|
||||
indent = std::min(255U, num_spaces);
|
||||
line.assign(pos, end);
|
||||
|
||||
if (pos == end)
|
||||
|
@ -149,19 +149,7 @@ std::string getUserName(uid_t user_id)
|
||||
Poco::Net::SocketAddress Keeper::socketBindListen(Poco::Net::ServerSocket & socket, const std::string & host, UInt16 port, [[maybe_unused]] bool secure) const
|
||||
{
|
||||
auto address = makeSocketAddress(host, port, &logger());
|
||||
#if !defined(POCO_CLICKHOUSE_PATCH) || POCO_VERSION < 0x01090100
|
||||
if (secure)
|
||||
/// Bug in old (<1.9.1) poco, listen() after bind() with reusePort param will fail because have no implementation in SecureServerSocketImpl
|
||||
/// https://github.com/pocoproject/poco/pull/2257
|
||||
socket.bind(address, /* reuseAddress = */ true);
|
||||
else
|
||||
#endif
|
||||
#if POCO_VERSION < 0x01080000
|
||||
socket.bind(address, /* reuseAddress = */ true);
|
||||
#else
|
||||
socket.bind(address, /* reuseAddress = */ true, /* reusePort = */ config().getBool("listen_reuse_port", false));
|
||||
#endif
|
||||
|
||||
socket.listen(/* backlog = */ config().getUInt("listen_backlog", 64));
|
||||
|
||||
return address;
|
||||
|
@ -37,6 +37,7 @@
|
||||
#include <AggregateFunctions/registerAggregateFunctions.h>
|
||||
#include <TableFunctions/registerTableFunctions.h>
|
||||
#include <Storages/registerStorages.h>
|
||||
#include <Storages/NamedCollections.h>
|
||||
#include <Dictionaries/registerDictionaries.h>
|
||||
#include <Disks/registerDisks.h>
|
||||
#include <Formats/registerFormats.h>
|
||||
@ -118,6 +119,8 @@ void LocalServer::initialize(Poco::Util::Application & self)
|
||||
config().getUInt("max_io_thread_pool_size", 100),
|
||||
config().getUInt("max_io_thread_pool_free_size", 0),
|
||||
config().getUInt("io_thread_pool_queue_size", 10000));
|
||||
|
||||
NamedCollectionFactory::instance().initialize(config());
|
||||
}
|
||||
|
||||
|
||||
@ -414,6 +417,8 @@ try
|
||||
registerFormats();
|
||||
|
||||
processConfig();
|
||||
initTtyBuffer(toProgressOption(config().getString("progress", "default")));
|
||||
|
||||
applyCmdSettings(global_context);
|
||||
|
||||
if (is_interactive)
|
||||
@ -489,8 +494,6 @@ void LocalServer::processConfig()
|
||||
}
|
||||
else
|
||||
{
|
||||
std::string progress = config().getString("progress", "tty");
|
||||
need_render_progress = (Poco::icompare(progress, "off") && Poco::icompare(progress, "no") && Poco::icompare(progress, "false") && Poco::icompare(progress, "0"));
|
||||
echo_queries = config().hasOption("echo") || config().hasOption("verbose");
|
||||
ignore_error = config().getBool("ignore-error", false);
|
||||
is_multiquery = true;
|
||||
|
@ -123,7 +123,7 @@ void MetricsTransmitter::transmit(std::vector<ProfileEvents::Count> & prev_count
|
||||
{
|
||||
for (const auto & name_value : async_metrics_values)
|
||||
{
|
||||
key_vals.emplace_back(asynchronous_metrics_path_prefix + name_value.first, name_value.second);
|
||||
key_vals.emplace_back(asynchronous_metrics_path_prefix + name_value.first, name_value.second.value);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -60,6 +60,7 @@
|
||||
#include <Storages/System/attachInformationSchemaTables.h>
|
||||
#include <Storages/Cache/ExternalDataSourceCache.h>
|
||||
#include <Storages/Cache/registerRemoteFileMetadatas.h>
|
||||
#include <Storages/NamedCollections.h>
|
||||
#include <AggregateFunctions/registerAggregateFunctions.h>
|
||||
#include <Functions/UserDefined/IUserDefinedSQLObjectsLoader.h>
|
||||
#include <Functions/registerFunctions.h>
|
||||
@ -341,19 +342,7 @@ Poco::Net::SocketAddress Server::socketBindListen(
|
||||
[[maybe_unused]] bool secure) const
|
||||
{
|
||||
auto address = makeSocketAddress(host, port, &logger());
|
||||
#if !defined(POCO_CLICKHOUSE_PATCH) || POCO_VERSION < 0x01090100
|
||||
if (secure)
|
||||
/// Bug in old (<1.9.1) poco, listen() after bind() with reusePort param will fail because have no implementation in SecureServerSocketImpl
|
||||
/// https://github.com/pocoproject/poco/pull/2257
|
||||
socket.bind(address, /* reuseAddress = */ true);
|
||||
else
|
||||
#endif
|
||||
#if POCO_VERSION < 0x01080000
|
||||
socket.bind(address, /* reuseAddress = */ true);
|
||||
#else
|
||||
socket.bind(address, /* reuseAddress = */ true, /* reusePort = */ config.getBool("listen_reuse_port", false));
|
||||
#endif
|
||||
|
||||
/// If caller requests any available port from the OS, discover it after binding.
|
||||
if (port == 0)
|
||||
{
|
||||
@ -732,6 +721,8 @@ int Server::main(const std::vector<std::string> & /*args*/)
|
||||
config().getUInt("max_io_thread_pool_free_size", 0),
|
||||
config().getUInt("io_thread_pool_queue_size", 10000));
|
||||
|
||||
NamedCollectionFactory::instance().initialize(config());
|
||||
|
||||
/// Initialize global local cache for remote filesystem.
|
||||
if (config().has("local_cache_for_remote_fs"))
|
||||
{
|
||||
@ -805,41 +796,43 @@ int Server::main(const std::vector<std::string> & /*args*/)
|
||||
/// that are interpreted (not executed) but can alter the behaviour of the program as well.
|
||||
|
||||
/// Please keep the below log messages in-sync with the ones in daemon/BaseDaemon.cpp
|
||||
|
||||
String calculated_binary_hash = getHashOfLoadedBinaryHex();
|
||||
|
||||
if (stored_binary_hash.empty())
|
||||
{
|
||||
LOG_WARNING(log, "Integrity check of the executable skipped because the reference checksum could not be read."
|
||||
" (calculated checksum: {})", calculated_binary_hash);
|
||||
}
|
||||
else if (calculated_binary_hash == stored_binary_hash)
|
||||
{
|
||||
LOG_INFO(log, "Integrity check of the executable successfully passed (checksum: {})", calculated_binary_hash);
|
||||
LOG_WARNING(log, "Integrity check of the executable skipped because the reference checksum could not be read.");
|
||||
}
|
||||
else
|
||||
{
|
||||
/// If program is run under debugger, ptrace will fail.
|
||||
if (ptrace(PTRACE_TRACEME, 0, nullptr, nullptr) == -1)
|
||||
String calculated_binary_hash = getHashOfLoadedBinaryHex();
|
||||
if (calculated_binary_hash == stored_binary_hash)
|
||||
{
|
||||
/// Program is run under debugger. Modification of it's binary image is ok for breakpoints.
|
||||
global_context->addWarningMessage(
|
||||
fmt::format("Server is run under debugger and its binary image is modified (most likely with breakpoints).",
|
||||
calculated_binary_hash)
|
||||
);
|
||||
LOG_INFO(log, "Integrity check of the executable successfully passed (checksum: {})", calculated_binary_hash);
|
||||
}
|
||||
else
|
||||
{
|
||||
throw Exception(ErrorCodes::CORRUPTED_DATA,
|
||||
"Calculated checksum of the executable ({0}) does not correspond"
|
||||
" to the reference checksum stored in the executable ({1})."
|
||||
" This may indicate one of the following:"
|
||||
" - the executable {2} was changed just after startup;"
|
||||
" - the executable {2} was corrupted on disk due to faulty hardware;"
|
||||
" - the loaded executable was corrupted in memory due to faulty hardware;"
|
||||
" - the file {2} was intentionally modified;"
|
||||
" - a logical error in the code."
|
||||
, calculated_binary_hash, stored_binary_hash, executable_path);
|
||||
/// If program is run under debugger, ptrace will fail.
|
||||
if (ptrace(PTRACE_TRACEME, 0, nullptr, nullptr) == -1)
|
||||
{
|
||||
/// Program is run under debugger. Modification of it's binary image is ok for breakpoints.
|
||||
global_context->addWarningMessage(fmt::format(
|
||||
"Server is run under debugger and its binary image is modified (most likely with breakpoints).",
|
||||
calculated_binary_hash));
|
||||
}
|
||||
else
|
||||
{
|
||||
throw Exception(
|
||||
ErrorCodes::CORRUPTED_DATA,
|
||||
"Calculated checksum of the executable ({0}) does not correspond"
|
||||
" to the reference checksum stored in the executable ({1})."
|
||||
" This may indicate one of the following:"
|
||||
" - the executable {2} was changed just after startup;"
|
||||
" - the executable {2} was corrupted on disk due to faulty hardware;"
|
||||
" - the loaded executable was corrupted in memory due to faulty hardware;"
|
||||
" - the file {2} was intentionally modified;"
|
||||
" - a logical error in the code.",
|
||||
calculated_binary_hash,
|
||||
stored_binary_hash,
|
||||
executable_path);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -1279,6 +1272,7 @@ int Server::main(const std::vector<std::string> & /*args*/)
|
||||
#if USE_SSL
|
||||
CertificateReloader::instance().tryLoad(*config);
|
||||
#endif
|
||||
NamedCollectionFactory::instance().reload(*config);
|
||||
ProfileEvents::increment(ProfileEvents::MainConfigLoads);
|
||||
|
||||
/// Must be the last.
|
||||
@ -1486,11 +1480,6 @@ int Server::main(const std::vector<std::string> & /*args*/)
|
||||
#endif
|
||||
|
||||
SCOPE_EXIT({
|
||||
/// Stop reloading of the main config. This must be done before `global_context->shutdown()` because
|
||||
/// otherwise the reloading may pass a changed config to some destroyed parts of ContextSharedPart.
|
||||
main_config_reloader.reset();
|
||||
access_control.stopPeriodicReloading();
|
||||
|
||||
async_metrics.stop();
|
||||
|
||||
/** Ask to cancel background jobs all table engines,
|
||||
@ -1789,10 +1778,17 @@ int Server::main(const std::vector<std::string> & /*args*/)
|
||||
|
||||
SCOPE_EXIT_SAFE({
|
||||
LOG_DEBUG(log, "Received termination signal.");
|
||||
LOG_DEBUG(log, "Waiting for current connections to close.");
|
||||
|
||||
/// Stop reloading of the main config. This must be done before everything else because it
|
||||
/// can try to access/modify already deleted objects.
|
||||
/// E.g. it can recreate new servers or it may pass a changed config to some destroyed parts of ContextSharedPart.
|
||||
main_config_reloader.reset();
|
||||
access_control.stopPeriodicReloading();
|
||||
|
||||
is_cancelled = true;
|
||||
|
||||
LOG_DEBUG(log, "Waiting for current connections to close.");
|
||||
|
||||
size_t current_connections = 0;
|
||||
{
|
||||
std::lock_guard lock(servers_lock);
|
||||
|
@ -152,7 +152,7 @@
|
||||
filter: contrast(125%);
|
||||
}
|
||||
|
||||
#add {
|
||||
#add, #reload {
|
||||
font-weight: bold;
|
||||
user-select: none;
|
||||
cursor: pointer;
|
||||
@ -166,7 +166,7 @@
|
||||
margin-bottom: 1rem;
|
||||
}
|
||||
|
||||
#add:hover {
|
||||
#add:hover, #reload:hover {
|
||||
background: var(--button-background-color);
|
||||
}
|
||||
|
||||
@ -286,6 +286,7 @@
|
||||
<input spellcheck="false" id="password" type="password" placeholder="password" />
|
||||
</div>
|
||||
<div>
|
||||
<input id="reload" type="button" value="Reload" style="display: none;">
|
||||
<input id="add" type="button" value="Add chart">
|
||||
<span class="nowrap themes"><span id="toggle-dark">🌚</span><span id="toggle-light">🌞</span></span>
|
||||
<div id="chart-params"></div>
|
||||
@ -672,15 +673,39 @@ function insertChart(i) {
|
||||
chart.addEventListener('mouseleave', e => { edit_buttons.style.display = 'none'; });
|
||||
|
||||
charts.appendChild(chart);
|
||||
return {chart: chart, textarea: query_editor_textarea};
|
||||
};
|
||||
|
||||
document.getElementById('add').addEventListener('click', e => {
|
||||
queries.push({ title: '', query: '' });
|
||||
insertChart(plots.length);
|
||||
|
||||
const {chart, textarea} = insertChart(plots.length);
|
||||
chart.scrollIntoView();
|
||||
textarea.focus();
|
||||
|
||||
plots.push(null);
|
||||
resize();
|
||||
});
|
||||
|
||||
document.getElementById('reload').addEventListener('click', e => {
|
||||
reloadAll();
|
||||
});
|
||||
|
||||
function showReloadIfNeeded() {
|
||||
const is_any_field_changed = (host != document.getElementById('url').value
|
||||
|| user != document.getElementById('user').value
|
||||
|| password != document.getElementById('password').value);
|
||||
if (is_any_field_changed) {
|
||||
document.getElementById('reload').style.display = '';
|
||||
} else {
|
||||
document.getElementById('reload').style.display = 'none';
|
||||
}
|
||||
}
|
||||
|
||||
document.getElementById('password').addEventListener('input', e => { showReloadIfNeeded(); })
|
||||
document.getElementById('user').addEventListener('input', e => { showReloadIfNeeded(); })
|
||||
document.getElementById('url').addEventListener('input', e => { showReloadIfNeeded(); })
|
||||
|
||||
function legendAsTooltipPlugin({ className, style = { background: "var(--legend-background)" } } = {}) {
|
||||
let legendEl;
|
||||
|
||||
@ -731,6 +756,8 @@ function legendAsTooltipPlugin({ className, style = { background: "var(--legend-
|
||||
};
|
||||
}
|
||||
|
||||
let add_http_cors_header = false;
|
||||
|
||||
async function draw(idx, chart, url_params, query) {
|
||||
if (plots[idx]) {
|
||||
plots[idx].destroy();
|
||||
@ -742,6 +769,12 @@ async function draw(idx, chart, url_params, query) {
|
||||
password = document.getElementById('password').value;
|
||||
|
||||
let url = `${host}?default_format=JSONCompactColumns`
|
||||
|
||||
if (add_http_cors_header) {
|
||||
// For debug purposes, you may set add_http_cors_header from a browser console
|
||||
url += '&add_http_cors_header=1';
|
||||
}
|
||||
|
||||
if (user) {
|
||||
url += `&user=${encodeURIComponent(user)}`;
|
||||
}
|
||||
@ -843,10 +876,15 @@ function resize() {
|
||||
|
||||
new ResizeObserver(resize).observe(document.body);
|
||||
|
||||
document.getElementById('params').onsubmit = function(event) {
|
||||
function reloadAll() {
|
||||
updateParams();
|
||||
drawAll();
|
||||
saveState();
|
||||
document.getElementById('reload').style.display = 'none';
|
||||
}
|
||||
|
||||
document.getElementById('params').onsubmit = function(event) {
|
||||
reloadAll();
|
||||
event.preventDefault();
|
||||
}
|
||||
|
||||
|
@ -130,6 +130,7 @@ enum class AccessType
|
||||
M(SHOW_ROW_POLICIES, "SHOW POLICIES, SHOW CREATE ROW POLICY, SHOW CREATE POLICY", TABLE, SHOW_ACCESS) \
|
||||
M(SHOW_QUOTAS, "SHOW CREATE QUOTA", GLOBAL, SHOW_ACCESS) \
|
||||
M(SHOW_SETTINGS_PROFILES, "SHOW PROFILES, SHOW CREATE SETTINGS PROFILE, SHOW CREATE PROFILE", GLOBAL, SHOW_ACCESS) \
|
||||
M(SHOW_NAMED_COLLECTIONS, "SHOW NAMED COLLECTIONS", GLOBAL, SHOW_ACCESS) \
|
||||
M(SHOW_ACCESS, "", GROUP, ACCESS_MANAGEMENT) \
|
||||
M(ACCESS_MANAGEMENT, "", GROUP, ALL) \
|
||||
\
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user