Merge branch 'master' into input_format_json_empty_as_default

This commit is contained in:
Kruglov Pavel 2024-08-21 14:50:36 +02:00 committed by GitHub
commit 34d02304d3
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
1048 changed files with 34933 additions and 9101 deletions

18
.github/actions/debug/action.yml vendored Normal file
View File

@ -0,0 +1,18 @@
name: DebugInfo
description: Prints workflow debug info
runs:
using: "composite"
steps:
- name: Print envs
shell: bash
run: |
echo "::group::Envs"
env
echo "::endgroup::"
- name: Print Event.json
shell: bash
run: |
echo "::group::Event.json"
python3 -m json.tool "$GITHUB_EVENT_PATH"
echo "::endgroup::"

109
.github/workflows/auto_releases.yml vendored Normal file
View File

@ -0,0 +1,109 @@
name: AutoReleases
env:
PYTHONUNBUFFERED: 1
concurrency:
group: autoreleases
on:
# schedule:
# - cron: '0 9 * * *'
workflow_dispatch:
inputs:
dry-run:
description: 'Dry run'
required: false
default: true
type: boolean
jobs:
AutoReleaseInfo:
runs-on: [self-hosted, style-checker-aarch64]
outputs:
data: ${{ steps.info.outputs.AUTO_RELEASE_PARAMS }}
dry_run: ${{ steps.info.outputs.DRY_RUN }}
steps:
- name: Debug Info
uses: ./.github/actions/debug
- name: Set envs
run: |
cat >> "$GITHUB_ENV" << 'EOF'
ROBOT_CLICKHOUSE_SSH_KEY<<RCSK
${{secrets.ROBOT_CLICKHOUSE_SSH_KEY}}
RCSK
EOF
echo "DRY_RUN=true" >> "$GITHUB_ENV"
- name: Check out repository code
uses: ClickHouse/checkout@v1
- name: Prepare Info
id: info
run: |
cd "$GITHUB_WORKSPACE/tests/ci"
python3 auto_release.py --prepare
echo "::group::Auto Release Info"
python3 -m json.tool /tmp/autorelease_info.json
echo "::endgroup::"
{
echo 'AUTO_RELEASE_PARAMS<<EOF'
cat /tmp/autorelease_info.json
echo 'EOF'
} >> "$GITHUB_ENV"
{
echo 'AUTO_RELEASE_PARAMS<<EOF'
cat /tmp/autorelease_info.json
echo 'EOF'
} >> "$GITHUB_OUTPUT"
echo "DRY_RUN=true" >> "$GITHUB_OUTPUT"
- name: Post Release Branch statuses
run: |
cd "$GITHUB_WORKSPACE/tests/ci"
python3 auto_release.py --post-status
- name: Clean up
uses: ./.github/actions/clean
Release_0:
needs: AutoReleaseInfo
name: Release ${{ fromJson(needs.AutoReleaseInfo.outputs.data).releases[0].release_branch }}
if: ${{ fromJson(needs.AutoReleaseInfo.outputs.data).releases[0] && fromJson(needs.AutoReleaseInfo.outputs.data).releases[0].ready }}
uses: ./.github/workflows/create_release.yml
with:
ref: ${{ fromJson(needs.AutoReleaseInfo.outputs.data).releases[0].commit_sha }}
type: patch
dry-run: ${{ needs.AutoReleaseInfo.outputs.dry_run }}
#
# Release_1:
# needs: [AutoReleaseInfo, Release_0]
# name: Release ${{ fromJson(needs.AutoReleaseInfo.outputs.data).releases[1].release_branch }}
# if: ${{ fromJson(needs.AutoReleaseInfo.outputs.data).releases[1] && fromJson(needs.AutoReleaseInfo.outputs.data).releases[1].ready }}
# uses: ./.github/workflows/create_release.yml
# with:
# ref: ${{ fromJson(needs.AutoReleaseInfo.outputs.data).releases[1].commit_sha }}
# type: patch
# dry-run: ${{ env.DRY_RUN }}
#
# Release_2:
# needs: [AutoReleaseInfo, Release_1]
# name: Release ${{ fromJson(needs.AutoReleaseInfo.outputs.data).releases[2].release_branch }}
# if: ${{ fromJson(needs.AutoReleaseInfo.outputs.data).releases[0] && fromJson(needs.AutoReleaseInfo.outputs.data).releases[2].ready }}
# uses: ./.github/workflow/create_release.yml
# with:
# ref: ${{ fromJson(needs.AutoReleaseInfo.outputs.data).releases[0].commit_sha }}
# type: patch
# dry-run: ${{ env.DRY_RUN }}
#
# Release_3:
# needs: [AutoReleaseInfo, Release_2]
# name: Release ${{ fromJson(needs.AutoReleaseInfo.outputs.data).releases[3].release_branch }}
# if: ${{ fromJson(needs.AutoReleaseInfo.outputs.data).releases[3] && fromJson(needs.AutoReleaseInfo.outputs.data).releases[3].ready }}
# uses: ./.github/workflow/create_release.yml
# with:
# ref: ${{ fromJson(needs.AutoReleaseInfo.outputs.data).releases[3].commit_sha }}
# type: patch
# dry-run: ${{ env.DRY_RUN }}
# - name: Post Slack Message
# if: ${{ !cancelled() }}
# run: |
# cd "$GITHUB_WORKSPACE/tests/ci"
# python3 auto_release.py --post-auto-release-complete --wf-status ${{ job.status }}

View File

@ -2,6 +2,7 @@ name: CreateRelease
concurrency: concurrency:
group: release group: release
'on': 'on':
workflow_dispatch: workflow_dispatch:
inputs: inputs:
@ -26,6 +27,26 @@ concurrency:
required: false required: false
default: false default: false
type: boolean type: boolean
workflow_call:
inputs:
ref:
description: 'Git reference (branch or commit sha) from which to create the release'
required: true
type: string
type:
description: 'The type of release: "new" for a new release or "patch" for a patch release'
required: true
type: string
only-repo:
description: 'Run only repos updates including docker (repo-recovery, tests)'
required: false
default: false
type: boolean
dry-run:
description: 'Dry run'
required: false
default: false
type: boolean
jobs: jobs:
CreateRelease: CreateRelease:
@ -101,6 +122,7 @@ jobs:
--volume=".:/wd" --workdir="/wd" \ --volume=".:/wd" --workdir="/wd" \
clickhouse/style-test \ clickhouse/style-test \
./tests/ci/changelog.py -v --debug-helpers \ ./tests/ci/changelog.py -v --debug-helpers \
--gh-user-or-token ${{ secrets.ROBOT_CLICKHOUSE_COMMIT_TOKEN }} \
--jobs=5 \ --jobs=5 \
--output="./docs/changelogs/${{ env.RELEASE_TAG }}.md" ${{ env.RELEASE_TAG }} --output="./docs/changelogs/${{ env.RELEASE_TAG }}.md" ${{ env.RELEASE_TAG }}
git add ./docs/changelogs/${{ env.RELEASE_TAG }}.md git add ./docs/changelogs/${{ env.RELEASE_TAG }}.md
@ -129,9 +151,9 @@ jobs:
if: ${{ inputs.type == 'patch' && ! inputs.only-repo }} if: ${{ inputs.type == 'patch' && ! inputs.only-repo }}
shell: bash shell: bash
run: | run: |
python3 ./tests/ci/create_release.py --set-progress-completed
git reset --hard HEAD git reset --hard HEAD
git checkout "$GITHUB_REF_NAME" git checkout "$GITHUB_REF_NAME"
python3 ./tests/ci/create_release.py --set-progress-completed
- name: Create GH Release - name: Create GH Release
if: ${{ inputs.type == 'patch' && ! inputs.only-repo }} if: ${{ inputs.type == 'patch' && ! inputs.only-repo }}
shell: bash shell: bash

View File

@ -482,7 +482,7 @@ jobs:
if: ${{ !failure() }} if: ${{ !failure() }}
run: | run: |
# update overall ci report # update overall ci report
python3 finish_check.py --wf-status ${{ contains(needs.*.result, 'failure') && 'failure' || 'success' }} python3 ./tests/ci/finish_check.py --wf-status ${{ contains(needs.*.result, 'failure') && 'failure' || 'success' }}
- name: Check Workflow results - name: Check Workflow results
if: ${{ !cancelled() }} if: ${{ !cancelled() }}
run: | run: |
@ -490,5 +490,4 @@ jobs:
cat > "$WORKFLOW_RESULT_FILE" << 'EOF' cat > "$WORKFLOW_RESULT_FILE" << 'EOF'
${{ toJson(needs) }} ${{ toJson(needs) }}
EOF EOF
python3 ./tests/ci/ci_buddy.py --check-wf-status python3 ./tests/ci/ci_buddy.py --check-wf-status

5
.gitmodules vendored
View File

@ -108,7 +108,7 @@
url = https://github.com/ClickHouse/icudata url = https://github.com/ClickHouse/icudata
[submodule "contrib/icu"] [submodule "contrib/icu"]
path = contrib/icu path = contrib/icu
url = https://github.com/unicode-org/icu url = https://github.com/ClickHouse/icu
[submodule "contrib/flatbuffers"] [submodule "contrib/flatbuffers"]
path = contrib/flatbuffers path = contrib/flatbuffers
url = https://github.com/ClickHouse/flatbuffers url = https://github.com/ClickHouse/flatbuffers
@ -345,9 +345,6 @@
[submodule "contrib/FP16"] [submodule "contrib/FP16"]
path = contrib/FP16 path = contrib/FP16
url = https://github.com/Maratyszcza/FP16.git url = https://github.com/Maratyszcza/FP16.git
[submodule "contrib/robin-map"]
path = contrib/robin-map
url = https://github.com/Tessil/robin-map.git
[submodule "contrib/aklomp-base64"] [submodule "contrib/aklomp-base64"]
path = contrib/aklomp-base64 path = contrib/aklomp-base64
url = https://github.com/aklomp/base64.git url = https://github.com/aklomp/base64.git

View File

@ -1,4 +1,5 @@
### Table of Contents ### Table of Contents
**[ClickHouse release v24.8 LTS, 2024-08-20](#243)**<br/>
**[ClickHouse release v24.7, 2024-07-30](#247)**<br/> **[ClickHouse release v24.7, 2024-07-30](#247)**<br/>
**[ClickHouse release v24.6, 2024-07-01](#246)**<br/> **[ClickHouse release v24.6, 2024-07-01](#246)**<br/>
**[ClickHouse release v24.5, 2024-05-30](#245)**<br/> **[ClickHouse release v24.5, 2024-05-30](#245)**<br/>
@ -10,6 +11,153 @@
# 2024 Changelog # 2024 Changelog
### <a id="248"></a> ClickHouse release 24.8 LTS, 2024-08-20
#### Backward Incompatible Change
* `clickhouse-client` and `clickhouse-local` now default to multi-query mode (instead single-query mode). As an example, `clickhouse-client -q "SELECT 1; SELECT 2"` now works, whereas users previously had to add `--multiquery` (or `-n`). The `--multiquery/-n` switch became obsolete. INSERT queries in multi-query statements are treated specially based on their FORMAT clause: If the FORMAT is `VALUES` (the most common case), the end of the INSERT statement is represented by a trailing semicolon `;` at the end of the query. For all other FORMATs (e.g. `CSV` or `JSONEachRow`), the end of the INSERT statement is represented by two newlines `\n\n` at the end of the query. [#63898](https://github.com/ClickHouse/ClickHouse/pull/63898) ([FFish](https://github.com/wxybear)).
* In previous versions, it was possible to use an alternative syntax for `LowCardinality` data types by appending `WithDictionary` to the name of the data type. It was an initial working implementation, and it was never documented or exposed to the public. Now, it is deprecated. If you have used this syntax, you have to ALTER your tables and rename the data types to `LowCardinality`. [#66842](https://github.com/ClickHouse/ClickHouse/pull/66842) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
* Fix logical errors with storage `Buffer` used with distributed destination table. It's a backward incompatible change: queries using `Buffer` with a distributed destination table may stop working if the table appears more than once in the query (e.g., in a self-join). [#67015](https://github.com/ClickHouse/ClickHouse/pull/67015) ([vdimir](https://github.com/vdimir)).
* In previous versions, calling functions for random distributions based on the Gamma function (such as Chi-Squared, Student, Fisher) with negative arguments close to zero led to a long computation or an infinite loop. In the new version, calling these functions with zero or negative arguments will produce an exception. This closes [#67297](https://github.com/ClickHouse/ClickHouse/issues/67297). [#67326](https://github.com/ClickHouse/ClickHouse/pull/67326) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
* The system table `text_log` is enabled by default. This is fully compatible with previous versions, but you may notice subtly increased disk usage on the local disk (this system table takes a tiny amount of disk space). [#67428](https://github.com/ClickHouse/ClickHouse/pull/67428) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
* In previous versions, `arrayWithConstant` can be slow if asked to generate very large arrays. In the new version, it is limited to 1 GB per array. This closes [#32754](https://github.com/ClickHouse/ClickHouse/issues/32754). [#67741](https://github.com/ClickHouse/ClickHouse/pull/67741) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
* Fix REPLACE modifier formatting (forbid omitting brackets). [#67774](https://github.com/ClickHouse/ClickHouse/pull/67774) ([Azat Khuzhin](https://github.com/azat)).
* Backported in [#68349](https://github.com/ClickHouse/ClickHouse/issues/68349): Reimplement `Dynamic` type. Now when the limit of dynamic data types is reached new types are not casted to String but stored in a special data structure in binary format with binary encoded data type. Now any type ever inserted into `Dynamic` column can be read from it as subcolumn. [#68132](https://github.com/ClickHouse/ClickHouse/pull/68132) ([Kruglov Pavel](https://github.com/Avogar)).
#### New Feature
* Added a new `MergeTree` setting `deduplicate_merge_projection_mode` to control the projections during merges (for specific engines) and `OPTIMIZE DEDUPLICATE` query. Supported options: `throw` (throw an exception in case the projection is not fully supported for *MergeTree engine), `drop` (remove projection during merge if it can't be merged itself consistently) and `rebuild` (rebuild projection from scratch, which is a heavy operation). [#66672](https://github.com/ClickHouse/ClickHouse/pull/66672) ([jsc0218](https://github.com/jsc0218)).
* Add `_etag` virtual column for S3 table engine. Fixes [#65312](https://github.com/ClickHouse/ClickHouse/issues/65312). [#65386](https://github.com/ClickHouse/ClickHouse/pull/65386) ([skyoct](https://github.com/skyoct)).
* Added a tagging (namespace) mechanism for the query cache. The same queries with different tags are considered different by the query cache. Example: `SELECT 1 SETTINGS use_query_cache = 1, query_cache_tag = 'abc'` and `SELECT 1 SETTINGS use_query_cache = 1, query_cache_tag = 'def'` now create different query cache entries. [#68235](https://github.com/ClickHouse/ClickHouse/pull/68235) ([sakulali](https://github.com/sakulali)).
* Support more variants of JOIN strictness (`LEFT/RIGHT SEMI/ANTI/ANY JOIN`) with inequality conditions which involve columns from both left and right table. e.g. `t1.y < t2.y` (see the setting `allow_experimental_join_condition`). [#64281](https://github.com/ClickHouse/ClickHouse/pull/64281) ([lgbo](https://github.com/lgbo-ustc)).
* Intrpret Hive-style partitioning for different engines (`File`, `URL`, `S3`, `AzureBlobStorage`, `HDFS`). Hive-style partitioning organizes data into partitioned sub-directories, making it efficient to query and manage large datasets. Currently, it only creates virtual columns with the appropriate name and data. The follow-up PR will introduce the appropriate data filtering (performance speedup). [#65997](https://github.com/ClickHouse/ClickHouse/pull/65997) ([Yarik Briukhovetskyi](https://github.com/yariks5s)).
* Add function `printf` for Spark compatiability (but you can use the existing `format` function). [#66257](https://github.com/ClickHouse/ClickHouse/pull/66257) ([李扬](https://github.com/taiyang-li)).
* Added a new server setting, `disable_insertion_and_mutation`. If it is enabled, the server will deny all insertions and mutations. This includes asynchronous INSERTs. This setting can be used to create read-only replicas. [#66519](https://github.com/ClickHouse/ClickHouse/pull/66519) ([Xu Jia](https://github.com/XuJia0210)).
* Add options `restore_replace_external_engines_to_null` and `restore_replace_external_table_functions_to_null` to replace external engines and table_engines to `Null` engine that can be useful for testing. It should work for RESTORE and explicit table creation. [#66536](https://github.com/ClickHouse/ClickHouse/pull/66536) ([Ilya Yatsishin](https://github.com/qoega)).
* Added support for reading `MULTILINESTRING` geometry in `WKT` format using function `readWKTLineString`. [#67647](https://github.com/ClickHouse/ClickHouse/pull/67647) ([Jacob Reckhard](https://github.com/jacobrec)).
* Add a new table function `fuzzQuery`. This function allows the modification of a given query string with random variations. Example: `SELECT query FROM fuzzQuery('SELECT 1') LIMIT 5;`. [#67655](https://github.com/ClickHouse/ClickHouse/pull/67655) ([pufit](https://github.com/pufit)).
* Add a query `ALTER TABLE ... DROP DETACHED PARTITION ALL` to drop all detached partitions. [#67885](https://github.com/ClickHouse/ClickHouse/pull/67885) ([Duc Canh Le](https://github.com/canhld94)).
* Add the `rows_before_aggregation_at_least` statistic to the query response when a new setting, `rows_before_aggregation` is enabled. This statistic represents the number of rows read before aggregation. In the context of a distributed query, when using the `group by` or `max` aggregation function without a `limit`, `rows_before_aggregation_at_least` can reflect the number of rows hit by the query. [#66084](https://github.com/ClickHouse/ClickHouse/pull/66084) ([morning-color](https://github.com/morning-color)).
* Support `OPTIMIZE` query on `Join` tables to reduce their memory footprint. [#67883](https://github.com/ClickHouse/ClickHouse/pull/67883) ([Duc Canh Le](https://github.com/canhld94)).
* Allow run query instantly in play if you add `&run=1` in the URL [#66457](https://github.com/ClickHouse/ClickHouse/pull/66457) ([Aleksandr Musorin](https://github.com/AVMusorin)).
#### Experimental Feature
* Implement a new `JSON` data type. [#66444](https://github.com/ClickHouse/ClickHouse/pull/66444) ([Kruglov Pavel](https://github.com/Avogar)).
* Add the new `TimeSeries` table engine. [#64183](https://github.com/ClickHouse/ClickHouse/pull/64183) ([Vitaly Baranov](https://github.com/vitlibar)).
* Add new experimental `Kafka` storage engine to store offsets in Keeper instead of relying on committing them to Kafka. It makes the commit to ClickHouse tables atomic with regard to consumption from the queue. [#57625](https://github.com/ClickHouse/ClickHouse/pull/57625) ([János Benjamin Antal](https://github.com/antaljanosbenjamin)).
* Use adaptive read task size calculation method (adaptive meaning it depends on read column sizes) for parallel replicas. [#60377](https://github.com/ClickHouse/ClickHouse/pull/60377) ([Nikita Taranov](https://github.com/nickitat)).
* Added statistics type `count_min` (count-min sketches) which provide selectivity estimations for equality predicates like `col = 'val'`. Supported data types are string, date, datatime and numeric types. [#65521](https://github.com/ClickHouse/ClickHouse/pull/65521) ([JackyWoo](https://github.com/JackyWoo)).
#### Performance Improvement
* Setting `optimize_functions_to_subcolumns` is enabled by default. [#68053](https://github.com/ClickHouse/ClickHouse/pull/68053) ([Anton Popov](https://github.com/CurtizJ)).
* Store the `plain_rewritable` disk directory metadata in `__meta` layout, separately from the merge tree data in the object storage. Move the `plain_rewritable` disk to a flat directory structure. [#65751](https://github.com/ClickHouse/ClickHouse/pull/65751) ([Julia Kartseva](https://github.com/jkartseva)).
* Improve columns squashing (an operation happening in INSERT queries) for `String`/`Array`/`Map`/`Variant`/`Dynamic` types by reserving required memory in advance for all subcolumns. [#67043](https://github.com/ClickHouse/ClickHouse/pull/67043) ([Kruglov Pavel](https://github.com/Avogar)).
* Speed up `SYSTEM FLUSH LOGS` and flush logs on shutdown. [#67472](https://github.com/ClickHouse/ClickHouse/pull/67472) ([Sema Checherinda](https://github.com/CheSema)).
* Improved overall performance of merges by reducing the overhead of the scheduling steps of merges. [#68016](https://github.com/ClickHouse/ClickHouse/pull/68016) ([Anton Popov](https://github.com/CurtizJ)).
* Speed up tables removal for `DROP DATABASE` query, increased the default value for `database_catalog_drop_table_concurrency` to 16. [#67228](https://github.com/ClickHouse/ClickHouse/pull/67228) ([Nikita Mikhaylov](https://github.com/nikitamikhaylov)).
* Avoid allocating too much capacity for array column while writing ORC. Performance speeds up 15% for an Array column. [#67879](https://github.com/ClickHouse/ClickHouse/pull/67879) ([李扬](https://github.com/taiyang-li)).
* Speed up mutations for non-replicated MergeTree significantly [#66911](https://github.com/ClickHouse/ClickHouse/pull/66911) [#66909](https://github.com/ClickHouse/ClickHouse/pull/66909) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
#### Improvement
* Setting `allow_experimental_analyzer` is renamed to `enable_analyzer`. The old name is preserved in a form of an alias. This signifies that Analyzer is no longer in beta and is fully promoted to production. [#66438](https://github.com/ClickHouse/ClickHouse/pull/66438) ([Nikita Mikhaylov](https://github.com/nikitamikhaylov)).
* Improve schema inference of date times. Now DateTime64 used only when date time has fractional part, otherwise regular DateTime is used. Inference of Date/DateTime is more strict now, especially when `date_time_input_format='best_effort'` to avoid inferring date times from strings in corner cases. [#68382](https://github.com/ClickHouse/ClickHouse/pull/68382) ([Kruglov Pavel](https://github.com/Avogar)).
* ClickHouse server now supports new setting `max_keep_alive_requests`. For keep-alive HTTP connections to the server it works in tandem with `keep_alive_timeout` - if idle timeout not expired but there already more than `max_keep_alive_requests` requests done through the given connection - it will be closed by the server. [#61793](https://github.com/ClickHouse/ClickHouse/pull/61793) ([Nikita Taranov](https://github.com/nickitat)).
* Various improvements in the advanced dashboard. This closes [#67697](https://github.com/ClickHouse/ClickHouse/issues/67697). This closes [#63407](https://github.com/ClickHouse/ClickHouse/issues/63407). This closes [#51129](https://github.com/ClickHouse/ClickHouse/issues/51129). This closes [#61204](https://github.com/ClickHouse/ClickHouse/issues/61204). [#67701](https://github.com/ClickHouse/ClickHouse/pull/67701) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
* Do not require a grant for REMOTE when creating a Distributed table: a grant for the Distributed engine is enough. [#65419](https://github.com/ClickHouse/ClickHouse/pull/65419) ([jsc0218](https://github.com/jsc0218)).
* Do not pass logs for keeper explicitly in the Docker image to allow overriding. [#65564](https://github.com/ClickHouse/ClickHouse/pull/65564) ([Azat Khuzhin](https://github.com/azat)).
* Introduced `use_same_password_for_base_backup` settings for `BACKUP` and `RESTORE` queries, allowing to create and restore incremental backups to/from password protected archives. [#66214](https://github.com/ClickHouse/ClickHouse/pull/66214) ([Samuele](https://github.com/sguerrini97)).
* Ignore `async_load_databases` for `ATTACH` query (previously it was possible for ATTACH to return before the tables had been attached). [#66240](https://github.com/ClickHouse/ClickHouse/pull/66240) ([Azat Khuzhin](https://github.com/azat)).
* Added logs and metrics for rejected connections (where there are not enough resources). [#66410](https://github.com/ClickHouse/ClickHouse/pull/66410) ([Alexander Tokmakov](https://github.com/tavplubix)).
* Support proper `UUID` type for MongoDB engine. [#66671](https://github.com/ClickHouse/ClickHouse/pull/66671) ([Azat Khuzhin](https://github.com/azat)).
* Add replication lag and recovery time metrics. [#66703](https://github.com/ClickHouse/ClickHouse/pull/66703) ([Miсhael Stetsyuk](https://github.com/mstetsyuk)).
* Add `DiskS3NoSuchKeyErrors` metric. [#66704](https://github.com/ClickHouse/ClickHouse/pull/66704) ([Miсhael Stetsyuk](https://github.com/mstetsyuk)).
* Ensure the `COMMENT` clause works for all table engines. [#66832](https://github.com/ClickHouse/ClickHouse/pull/66832) ([Joe Lynch](https://github.com/joelynch)).
* Function `mapFromArrays` now accepts `Map(K, V)` as first argument, for example: `SELECT mapFromArrays(map('a', 4, 'b', 4), ['aa', 'bb'])` now works and returns `{('a',4):'aa',('b',4):'bb'}`. Also, if the 1st argument is an Array, it can now also be of type `Array(Nullable(T))` or `Array(LowCardinality(Nullable(T)))` as long as the actual array values are not `NULL`. [#67103](https://github.com/ClickHouse/ClickHouse/pull/67103) ([李扬](https://github.com/taiyang-li)).
* Read configuration for `clickhouse-local` from `~/.clickhouse-local`. [#67135](https://github.com/ClickHouse/ClickHouse/pull/67135) ([Azat Khuzhin](https://github.com/azat)).
* Rename setting `input_format_orc_read_use_writer_time_zone` to `input_format_orc_reader_timezone` and allow the user to set the reader timezone. [#67175](https://github.com/ClickHouse/ClickHouse/pull/67175) ([kevinyhzou](https://github.com/KevinyhZou)).
* Decrease level of the `Socket is not connected` error when HTTP connection immediately reset by peer after connecting, close [#34218](https://github.com/ClickHouse/ClickHouse/issues/34218). [#67177](https://github.com/ClickHouse/ClickHouse/pull/67177) ([vdimir](https://github.com/vdimir)).
* Add ability to load dashboards for `system.dashboards` from config (once set, they overrides the default dashboards preset). [#67232](https://github.com/ClickHouse/ClickHouse/pull/67232) ([Azat Khuzhin](https://github.com/azat)).
* The window functions in SQL are traditionally in snake case. ClickHouse uses `camelCase`, so new aliases `denseRank()` and `percentRank()` have been created. These new functions can be called the exact same as the original `dense_rank()` and `percent_rank()` functions. Both snake case and camelCase syntaxes remain usable. A new test for each of the functions has been added as well. This closes [#67042](https://github.com/ClickHouse/ClickHouse/issues/67042) . [#67334](https://github.com/ClickHouse/ClickHouse/pull/67334) ([Peter Nguyen](https://github.com/petern48)).
* Autodetect configuration file format if is not `.xml`, `.yml` or `.yaml`. If the file begins with &lt; it might be XML, otherwise it might be YAML. It is useful when providing a configuration file from a pipe: `clickhouse-server --config-file <(echo "hello: world")`. [#67391](https://github.com/ClickHouse/ClickHouse/pull/67391) ([sakulali](https://github.com/sakulali)).
* Functions `formatDateTime` and `formatDateTimeInJodaSyntax` now treat their format parameter as optional. If it is not specified, format strings `%Y-%m-%d %H:%i:%s` and `yyyy-MM-dd HH:mm:ss` are assumed. Example: `SELECT parseDateTime('2021-01-04 23:12:34')` now returns DateTime value `2021-01-04 23:12:34` (previously, this threw an exception). [#67399](https://github.com/ClickHouse/ClickHouse/pull/67399) ([Robert Schulze](https://github.com/rschu1ze)).
* Automatically retry Keeper requests in KeeperMap if they happen because of timeout or connection loss. [#67448](https://github.com/ClickHouse/ClickHouse/pull/67448) ([Antonio Andelic](https://github.com/antonio2368)).
* Add `-no-pie` to Aarch64 Linux builds to allow proper introspection and symbolizing of stacktraces after a ClickHouse restart. [#67916](https://github.com/ClickHouse/ClickHouse/pull/67916) ([filimonov](https://github.com/filimonov)).
* Added profile events for merges and mutations for better introspection. [#68015](https://github.com/ClickHouse/ClickHouse/pull/68015) ([Anton Popov](https://github.com/CurtizJ)).
* Fix settings and `current_database` in `system.processes` for async BACKUP/RESTORE. [#68163](https://github.com/ClickHouse/ClickHouse/pull/68163) ([Azat Khuzhin](https://github.com/azat)).
* Remove unnecessary logs for non-replicated `MergeTree`. [#68238](https://github.com/ClickHouse/ClickHouse/pull/68238) ([Daniil Ivanik](https://github.com/divanik)).
#### Build/Testing/Packaging Improvement
* Integration tests flaky check will not run each test case multiple times to find more issues in tests and make them more reliable. It is using `pytest-repeat` library to run test case multiple times for the same environment. It is important to cleanup tables and other entities in the end of a test case to pass. Repeating works much faster than several pytest runs as it starts necessary containers only once. [#66986](https://github.com/ClickHouse/ClickHouse/pull/66986) ([Ilya Yatsishin](https://github.com/qoega)).
* Unblock the usage of CLion with ClickHouse. In previous versions, CLion freezed for a minute on every keypress. This closes [#66994](https://github.com/ClickHouse/ClickHouse/issues/66994). [#66995](https://github.com/ClickHouse/ClickHouse/pull/66995) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
* getauxval: avoid a crash under a sanitizer re-exec due to high ASLR entropy in newer Linux kernels. [#67081](https://github.com/ClickHouse/ClickHouse/pull/67081) ([Raúl Marín](https://github.com/Algunenano)).
* Some parts of client code are extracted to a single file and highest possible level optimization is applied to them even for debug builds. This closes: [#65745](https://github.com/ClickHouse/ClickHouse/issues/65745). [#67215](https://github.com/ClickHouse/ClickHouse/pull/67215) ([Nikita Mikhaylov](https://github.com/nikitamikhaylov)).
#### Bug Fix
* Only relevant to the experimental Variant data type. Fix crash with Variant + AggregateFunction type. [#67122](https://github.com/ClickHouse/ClickHouse/pull/67122) ([Kruglov Pavel](https://github.com/Avogar)).
* Fix crash in DistributedAsyncInsert when connection is empty. [#67219](https://github.com/ClickHouse/ClickHouse/pull/67219) ([Pablo Marcos](https://github.com/pamarcos)).
* Fix crash of `uniq` and `uniqTheta ` with `tuple()` argument. Closes [#67303](https://github.com/ClickHouse/ClickHouse/issues/67303). [#67306](https://github.com/ClickHouse/ClickHouse/pull/67306) ([flynn](https://github.com/ucasfl)).
* Fixes [#66026](https://github.com/ClickHouse/ClickHouse/issues/66026). Avoid unresolved table function arguments traversal in `ReplaceTableNodeToDummyVisitor`. [#67522](https://github.com/ClickHouse/ClickHouse/pull/67522) ([Dmitry Novik](https://github.com/novikd)).
* Fix potential stack overflow in `JSONMergePatch` function. Renamed this function from `jsonMergePatch` to `JSONMergePatch` because the previous name was wrong. The previous name is still kept for compatibility. Improved diagnostic of errors in the function. This closes [#67304](https://github.com/ClickHouse/ClickHouse/issues/67304). [#67756](https://github.com/ClickHouse/ClickHouse/pull/67756) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
* Fixed a NULL pointer dereference, triggered by a specially crafted query, that crashed the server via hopEnd, hopStart, tumbleEnd, and tumbleStart. [#68098](https://github.com/ClickHouse/ClickHouse/pull/68098) ([Salvatore Mesoraca](https://github.com/aiven-sal)).
* Fixed `Not-ready Set` in some system tables when filtering using subqueries. [#66018](https://github.com/ClickHouse/ClickHouse/pull/66018) ([Michael Kolupaev](https://github.com/al13n321)).
* Fixed reading of subcolumns after `ALTER ADD COLUMN` query. [#66243](https://github.com/ClickHouse/ClickHouse/pull/66243) ([Anton Popov](https://github.com/CurtizJ)).
* Fix boolean literals in query sent to external database (for engines like `PostgreSQL`). [#66282](https://github.com/ClickHouse/ClickHouse/pull/66282) ([vdimir](https://github.com/vdimir)).
* Fix formatting of query with aliased JOIN ON expression, e.g. `... JOIN t2 ON (x = y) AS e ORDER BY x` should be formatted as `... JOIN t2 ON ((x = y) AS e) ORDER BY x`. [#66312](https://github.com/ClickHouse/ClickHouse/pull/66312) ([vdimir](https://github.com/vdimir)).
* Fix cluster() for inter-server secret (preserve initial user as before). [#66364](https://github.com/ClickHouse/ClickHouse/pull/66364) ([Azat Khuzhin](https://github.com/azat)).
* Fix possible runtime error while converting Array field with nulls to Array(Variant). [#66727](https://github.com/ClickHouse/ClickHouse/pull/66727) ([Kruglov Pavel](https://github.com/Avogar)).
* Fix for occasional deadlock in Context::getDDLWorker. [#66843](https://github.com/ClickHouse/ClickHouse/pull/66843) ([Alexander Gololobov](https://github.com/davenger)).
* Fix creating KeeperMap table after an incomplete drop. [#66865](https://github.com/ClickHouse/ClickHouse/pull/66865) ([Antonio Andelic](https://github.com/antonio2368)).
* Fix broken part error while restoring to a `s3_plain_rewritable` disk. [#66881](https://github.com/ClickHouse/ClickHouse/pull/66881) ([Vitaly Baranov](https://github.com/vitlibar)).
* In rare cases ClickHouse could consider parts as broken because of some unexpected projections on disk. Now it's fixed. [#66898](https://github.com/ClickHouse/ClickHouse/pull/66898) ([alesapin](https://github.com/alesapin)).
* Fix invalid format detection in schema inference that could lead to logical error Format {} doesn't support schema inference. [#66899](https://github.com/ClickHouse/ClickHouse/pull/66899) ([Kruglov Pavel](https://github.com/Avogar)).
* Fix possible deadlock on query cancel with parallel replicas. [#66905](https://github.com/ClickHouse/ClickHouse/pull/66905) ([Nikita Taranov](https://github.com/nickitat)).
* Forbid create as select even when database_replicated_allow_heavy_create is set. It was unconditionally forbidden in 23.12 and accidentally allowed under the setting in unreleased 24.7. [#66980](https://github.com/ClickHouse/ClickHouse/pull/66980) ([vdimir](https://github.com/vdimir)).
* Reading from the `numbers` could wrongly throw an exception when the `max_rows_to_read` limit was set. This closes [#66992](https://github.com/ClickHouse/ClickHouse/issues/66992). [#66996](https://github.com/ClickHouse/ClickHouse/pull/66996) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
* Add proper type conversion to lagInFrame and leadInFrame window functions - fixes msan test. [#67091](https://github.com/ClickHouse/ClickHouse/pull/67091) ([Yakov Olkhovskiy](https://github.com/yakov-olkhovskiy)).
* TRUNCATE DATABASE used to stop replication as if it was a DROP DATABASE query, it's fixed. [#67129](https://github.com/ClickHouse/ClickHouse/pull/67129) ([Alexander Tokmakov](https://github.com/tavplubix)).
* Use a separate client context in `clickhouse-local`. [#67133](https://github.com/ClickHouse/ClickHouse/pull/67133) ([Vitaly Baranov](https://github.com/vitlibar)).
* Fix error `Cannot convert column because it is non constant in source stream but must be constant in result.` for a query that reads from the `Merge` table over the `Distriburted` table with one shard. [#67146](https://github.com/ClickHouse/ClickHouse/pull/67146) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
* Correct behavior of `ORDER BY all` with disabled `enable_order_by_all` and parallel replicas (distributed queries as well). [#67153](https://github.com/ClickHouse/ClickHouse/pull/67153) ([Igor Nikonov](https://github.com/devcrafter)).
* Fix wrong usage of input_format_max_bytes_to_read_for_schema_inference in schema cache. [#67157](https://github.com/ClickHouse/ClickHouse/pull/67157) ([Kruglov Pavel](https://github.com/Avogar)).
* Fix the memory leak for count distinct, when exception issued during group by single nullable key. [#67171](https://github.com/ClickHouse/ClickHouse/pull/67171) ([Jet He](https://github.com/compasses)).
* Fix an error in optimization which converts OUTER JOIN to INNER JOIN. This closes [#67156](https://github.com/ClickHouse/ClickHouse/issues/67156). This closes [#66447](https://github.com/ClickHouse/ClickHouse/issues/66447). The bug was introduced in https://github.com/ClickHouse/ClickHouse/pull/62907. [#67178](https://github.com/ClickHouse/ClickHouse/pull/67178) ([Maksim Kita](https://github.com/kitaisreal)).
* Fix error `Conversion from AggregateFunction(name, Type) to AggregateFunction(name, Nullable(Type)) is not supported`. The bug was caused by the `optimize_rewrite_aggregate_function_with_if` optimization. Fixes [#67112](https://github.com/ClickHouse/ClickHouse/issues/67112). [#67229](https://github.com/ClickHouse/ClickHouse/pull/67229) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
* Fix hung query when using empty tuple as lhs of function IN. [#67295](https://github.com/ClickHouse/ClickHouse/pull/67295) ([Duc Canh Le](https://github.com/canhld94)).
* It was possible to create a very deep nested JSON data that triggered stack overflow while skipping unknown fields. This closes [#67292](https://github.com/ClickHouse/ClickHouse/issues/67292). [#67324](https://github.com/ClickHouse/ClickHouse/pull/67324) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
* Fix attaching ReplicatedMergeTree table after exception during startup. [#67360](https://github.com/ClickHouse/ClickHouse/pull/67360) ([Antonio Andelic](https://github.com/antonio2368)).
* Fix segfault caused by incorrectly detaching from thread group in `Aggregator`. [#67385](https://github.com/ClickHouse/ClickHouse/pull/67385) ([Antonio Andelic](https://github.com/antonio2368)).
* Fix one more case when a non-deterministic function is specified in PK. [#67395](https://github.com/ClickHouse/ClickHouse/pull/67395) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
* Fixed `bloom_filter` index breaking queries with mildly weird conditions like `(k=2)=(k=2)` or `has([1,2,3], k)`. [#67423](https://github.com/ClickHouse/ClickHouse/pull/67423) ([Michael Kolupaev](https://github.com/al13n321)).
* Correctly parse file name/URI containing `::` if it's not an archive. [#67433](https://github.com/ClickHouse/ClickHouse/pull/67433) ([Antonio Andelic](https://github.com/antonio2368)).
* Fix wait for tasks in ~WriteBufferFromS3 in case WriteBuffer was cancelled. [#67459](https://github.com/ClickHouse/ClickHouse/pull/67459) ([Kseniia Sumarokova](https://github.com/kssenii)).
* Protect temporary part directories from removing during RESTORE. [#67491](https://github.com/ClickHouse/ClickHouse/pull/67491) ([Vitaly Baranov](https://github.com/vitlibar)).
* Fix execution of nested short-circuit functions. [#67520](https://github.com/ClickHouse/ClickHouse/pull/67520) ([Kruglov Pavel](https://github.com/Avogar)).
* Fix `Logical error: Expected the argument №N of type T to have X rows, but it has 0`. The error could happen in a remote query with constant expression in `GROUP BY` (with a new analyzer). [#67536](https://github.com/ClickHouse/ClickHouse/pull/67536) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
* Fix join on tuple with NULLs: Some queries with the new analyzer and `NULL` inside the tuple in the `JOIN ON` section returned incorrect results. [#67538](https://github.com/ClickHouse/ClickHouse/pull/67538) ([vdimir](https://github.com/vdimir)).
* Fix redundant reschedule of FileCache::freeSpaceRatioKeepingThreadFunc() in case of full non-evictable cache. [#67540](https://github.com/ClickHouse/ClickHouse/pull/67540) ([Kseniia Sumarokova](https://github.com/kssenii)).
* Fix inserting into stream like engines (Kafka, RabbitMQ, NATS) through HTTP interface. [#67554](https://github.com/ClickHouse/ClickHouse/pull/67554) ([János Benjamin Antal](https://github.com/antaljanosbenjamin)).
* Fix for function `toStartOfWeek` which returned the wrong result with a small `DateTime64` value. [#67558](https://github.com/ClickHouse/ClickHouse/pull/67558) ([Yarik Briukhovetskyi](https://github.com/yariks5s)).
* Fix creation of view with recursive CTE. [#67587](https://github.com/ClickHouse/ClickHouse/pull/67587) ([Yakov Olkhovskiy](https://github.com/yakov-olkhovskiy)).
* Fix `Logical error: 'file_offset_of_buffer_end <= read_until_position'` in filesystem cache. Closes [#57508](https://github.com/ClickHouse/ClickHouse/issues/57508). [#67623](https://github.com/ClickHouse/ClickHouse/pull/67623) ([Kseniia Sumarokova](https://github.com/kssenii)).
* Fixes [#62282](https://github.com/ClickHouse/ClickHouse/issues/62282). Removed the call to `convertFieldToString()` and added datatype specific serialization code. Parameterized view substitution was broken for multiple datatypes when parameter value was a function or expression returning datatype instance. [#67654](https://github.com/ClickHouse/ClickHouse/pull/67654) ([Shankar](https://github.com/shiyer7474)).
* Fix crash on `percent_rank`. `percent_rank`'s default frame type is changed to `range unbounded preceding and unbounded following`. `IWindowFunction`'s default window frame is considered and now window functions without window frame definition in sql can be put into different `WindowTransfomer`s properly. [#67661](https://github.com/ClickHouse/ClickHouse/pull/67661) ([lgbo](https://github.com/lgbo-ustc)).
* Fix reloading SQL UDFs with UNION. Previously, restarting the server could make UDF invalid. [#67665](https://github.com/ClickHouse/ClickHouse/pull/67665) ([Antonio Andelic](https://github.com/antonio2368)).
* Fix possible logical error "Unexpected return type from if" with experimental Variant type and enabled setting `use_variant_as_common_type ` in function if with Tuples and Maps. [#67687](https://github.com/ClickHouse/ClickHouse/pull/67687) ([Kruglov Pavel](https://github.com/Avogar)).
* Due to a bug in Linux Kernel, a query can hung in `TimerDescriptor::drain`. This closes [#37686](https://github.com/ClickHouse/ClickHouse/issues/37686). [#67702](https://github.com/ClickHouse/ClickHouse/pull/67702) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
* Fix completion of `RESTORE ON CLUSTER` command. [#67720](https://github.com/ClickHouse/ClickHouse/pull/67720) ([Vitaly Baranov](https://github.com/vitlibar)).
* Fix dictionary hang in case of CANNOT_SCHEDULE_TASK while loading. [#67751](https://github.com/ClickHouse/ClickHouse/pull/67751) ([Azat Khuzhin](https://github.com/azat)).
* Queries like `SELECT count() FROM t WHERE cast(c = 1 or c = 9999 AS Bool) SETTINGS use_skip_indexes=1` with bloom filter indexes on `c` now work correctly. [#67781](https://github.com/ClickHouse/ClickHouse/pull/67781) ([jsc0218](https://github.com/jsc0218)).
* Fix wrong aggregation result in some queries with aggregation without keys and filter, close [#67419](https://github.com/ClickHouse/ClickHouse/issues/67419). [#67804](https://github.com/ClickHouse/ClickHouse/pull/67804) ([vdimir](https://github.com/vdimir)).
* Validate experimental/suspicious data types in ALTER ADD/MODIFY COLUMN. [#67911](https://github.com/ClickHouse/ClickHouse/pull/67911) ([Kruglov Pavel](https://github.com/Avogar)).
* Fix DateTime64 parsing after constant folding in distributed queries, close [#66773](https://github.com/ClickHouse/ClickHouse/issues/66773). [#67920](https://github.com/ClickHouse/ClickHouse/pull/67920) ([vdimir](https://github.com/vdimir)).
* Fix wrong `count()` result when there is non-deterministic function in predicate. [#67922](https://github.com/ClickHouse/ClickHouse/pull/67922) ([János Benjamin Antal](https://github.com/antaljanosbenjamin)).
* Fixed the calculation of the maximum thread soft limit in containerized environments where the usable CPU count is limited. [#67963](https://github.com/ClickHouse/ClickHouse/pull/67963) ([Robert Schulze](https://github.com/rschu1ze)).
* Now ClickHouse doesn't consider part as broken if projection doesn't exist on disk but exists in `checksums.txt`. [#68003](https://github.com/ClickHouse/ClickHouse/pull/68003) ([alesapin](https://github.com/alesapin)).
* Fixed skipping of untouched parts in mutations with new analyzer. Previously with enabled analyzer data in part could be rewritten by mutation even if mutation doesn't affect this part according to predicate. [#68052](https://github.com/ClickHouse/ClickHouse/pull/68052) ([Anton Popov](https://github.com/CurtizJ)).
* Removes an incorrect optimization to remove sorting in subqueries that use `OFFSET`. Fixes [#67906](https://github.com/ClickHouse/ClickHouse/issues/67906). [#68099](https://github.com/ClickHouse/ClickHouse/pull/68099) ([Graham Campbell](https://github.com/GrahamCampbell)).
* Attempt to fix `Block structure mismatch in AggregatingStep stream: different types` for aggregate projection optimization. [#68107](https://github.com/ClickHouse/ClickHouse/pull/68107) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
* Try fix postgres crash when query is cancelled. [#68288](https://github.com/ClickHouse/ClickHouse/pull/68288) ([Kseniia Sumarokova](https://github.com/kssenii)).
* Fix missing sync replica mode in query `SYSTEM SYNC REPLICA`. [#68326](https://github.com/ClickHouse/ClickHouse/pull/68326) ([Duc Canh Le](https://github.com/canhld94)).
### <a id="247"></a> ClickHouse release 24.7, 2024-07-30 ### <a id="247"></a> ClickHouse release 24.7, 2024-07-30
#### Backward Incompatible Change #### Backward Incompatible Change

View File

@ -322,17 +322,21 @@ if (DISABLE_OMIT_FRAME_POINTER)
set (CMAKE_ASM_FLAGS_ADD "${CMAKE_ASM_FLAGS_ADD} -fno-omit-frame-pointer -mno-omit-leaf-frame-pointer") set (CMAKE_ASM_FLAGS_ADD "${CMAKE_ASM_FLAGS_ADD} -fno-omit-frame-pointer -mno-omit-leaf-frame-pointer")
endif() endif()
# Before you start hating your debugger because it refuses to show variables ('<optimized out>'), try building with -DDEBUG_O_LEVEL="0"
# https://stackoverflow.com/questions/63386189/whats-the-difference-between-a-compilers-o0-option-and-og-option/63386263#63386263
set(DEBUG_O_LEVEL "g" CACHE STRING "The -Ox level used for debug builds")
set (CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} ${COMPILER_FLAGS} ${CMAKE_CXX_FLAGS_ADD}") set (CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} ${COMPILER_FLAGS} ${CMAKE_CXX_FLAGS_ADD}")
set (CMAKE_CXX_FLAGS_RELWITHDEBINFO "${CMAKE_CXX_FLAGS_RELWITHDEBINFO} -O3 ${DEBUG_INFO_FLAGS} ${CMAKE_CXX_FLAGS_ADD}") set (CMAKE_CXX_FLAGS_RELWITHDEBINFO "${CMAKE_CXX_FLAGS_RELWITHDEBINFO} -O3 ${DEBUG_INFO_FLAGS} ${CMAKE_CXX_FLAGS_ADD}")
set (CMAKE_CXX_FLAGS_DEBUG "${CMAKE_CXX_FLAGS_DEBUG} -Og ${DEBUG_INFO_FLAGS} ${CMAKE_CXX_FLAGS_ADD}") set (CMAKE_CXX_FLAGS_DEBUG "${CMAKE_CXX_FLAGS_DEBUG} -O${DEBUG_O_LEVEL} ${DEBUG_INFO_FLAGS} ${CMAKE_CXX_FLAGS_ADD}")
set (CMAKE_C_FLAGS "${CMAKE_C_FLAGS} ${COMPILER_FLAGS} ${CMAKE_C_FLAGS_ADD}") set (CMAKE_C_FLAGS "${CMAKE_C_FLAGS} ${COMPILER_FLAGS} ${CMAKE_C_FLAGS_ADD}")
set (CMAKE_C_FLAGS_RELWITHDEBINFO "${CMAKE_C_FLAGS_RELWITHDEBINFO} -O3 ${DEBUG_INFO_FLAGS} ${CMAKE_C_FLAGS_ADD}") set (CMAKE_C_FLAGS_RELWITHDEBINFO "${CMAKE_C_FLAGS_RELWITHDEBINFO} -O3 ${DEBUG_INFO_FLAGS} ${CMAKE_C_FLAGS_ADD}")
set (CMAKE_C_FLAGS_DEBUG "${CMAKE_C_FLAGS_DEBUG} -Og ${DEBUG_INFO_FLAGS} ${CMAKE_C_FLAGS_ADD}") set (CMAKE_C_FLAGS_DEBUG "${CMAKE_C_FLAGS_DEBUG} -O${DEBUG_O_LEVEL} ${DEBUG_INFO_FLAGS} ${CMAKE_C_FLAGS_ADD}")
set (CMAKE_ASM_FLAGS "${CMAKE_ASM_FLAGS} ${COMPILER_FLAGS} ${CMAKE_ASM_FLAGS_ADD}") set (CMAKE_ASM_FLAGS "${CMAKE_ASM_FLAGS} ${COMPILER_FLAGS} ${CMAKE_ASM_FLAGS_ADD}")
set (CMAKE_ASM_FLAGS_RELWITHDEBINFO "${CMAKE_ASM_FLAGS_RELWITHDEBINFO} -O3 ${DEBUG_INFO_FLAGS} ${CMAKE_ASM_FLAGS_ADD}") set (CMAKE_ASM_FLAGS_RELWITHDEBINFO "${CMAKE_ASM_FLAGS_RELWITHDEBINFO} -O3 ${DEBUG_INFO_FLAGS} ${CMAKE_ASM_FLAGS_ADD}")
set (CMAKE_ASM_FLAGS_DEBUG "${CMAKE_ASM_FLAGS_DEBUG} -Og ${DEBUG_INFO_FLAGS} ${CMAKE_ASM_FLAGS_ADD}") set (CMAKE_ASM_FLAGS_DEBUG "${CMAKE_ASM_FLAGS_DEBUG} -O${DEBUG_O_LEVEL} ${DEBUG_INFO_FLAGS} ${CMAKE_ASM_FLAGS_ADD}")
if (OS_DARWIN) if (OS_DARWIN)
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -stdlib=libc++") set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -stdlib=libc++")
@ -601,7 +605,9 @@ if (NATIVE_BUILD_TARGETS
execute_process( execute_process(
COMMAND ${CMAKE_COMMAND} -E make_directory "${NATIVE_BUILD_DIR}" COMMAND ${CMAKE_COMMAND} -E make_directory "${NATIVE_BUILD_DIR}"
COMMAND_ECHO STDOUT) COMMAND_ECHO STDOUT
COMMAND_ERROR_IS_FATAL ANY
)
execute_process( execute_process(
COMMAND ${CMAKE_COMMAND} COMMAND ${CMAKE_COMMAND}
@ -613,9 +619,13 @@ if (NATIVE_BUILD_TARGETS
"-DENABLE_CLICKHOUSE_SELF_EXTRACTING=${ENABLE_CLICKHOUSE_SELF_EXTRACTING}" "-DENABLE_CLICKHOUSE_SELF_EXTRACTING=${ENABLE_CLICKHOUSE_SELF_EXTRACTING}"
${PROJECT_SOURCE_DIR} ${PROJECT_SOURCE_DIR}
WORKING_DIRECTORY "${NATIVE_BUILD_DIR}" WORKING_DIRECTORY "${NATIVE_BUILD_DIR}"
COMMAND_ECHO STDOUT) COMMAND_ECHO STDOUT
COMMAND_ERROR_IS_FATAL ANY
)
execute_process( execute_process(
COMMAND ${CMAKE_COMMAND} --build "${NATIVE_BUILD_DIR}" --target ${NATIVE_BUILD_TARGETS} COMMAND ${CMAKE_COMMAND} --build "${NATIVE_BUILD_DIR}" --target ${NATIVE_BUILD_TARGETS}
COMMAND_ECHO STDOUT) COMMAND_ECHO STDOUT
COMMAND_ERROR_IS_FATAL ANY
)
endif () endif ()

View File

@ -51,8 +51,14 @@ if (NOT "$ENV{CFLAGS}" STREQUAL ""
endif() endif()
# Default toolchain - this is needed to avoid dependency on OS files. # Default toolchain - this is needed to avoid dependency on OS files.
execute_process(COMMAND uname -s OUTPUT_VARIABLE OS) execute_process(COMMAND uname -s
execute_process(COMMAND uname -m OUTPUT_VARIABLE ARCH) OUTPUT_VARIABLE OS
COMMAND_ERROR_IS_FATAL ANY
)
execute_process(COMMAND uname -m
OUTPUT_VARIABLE ARCH
COMMAND_ERROR_IS_FATAL ANY
)
# By default, prefer clang on Linux # By default, prefer clang on Linux
# But note, that you still may change the compiler with -DCMAKE_C_COMPILER/-DCMAKE_CXX_COMPILER. # But note, that you still may change the compiler with -DCMAKE_C_COMPILER/-DCMAKE_CXX_COMPILER.

View File

@ -8,6 +8,8 @@ endif ()
# when instantiated from JSON.cpp. Try again when libcxx(abi) and Clang are upgraded to 16. # when instantiated from JSON.cpp. Try again when libcxx(abi) and Clang are upgraded to 16.
set (CMAKE_CXX_STANDARD 20) set (CMAKE_CXX_STANDARD 20)
configure_file(GitHash.cpp.in GitHash.generated.cpp)
set (SRCS set (SRCS
argsToConfig.cpp argsToConfig.cpp
cgroupsv2.cpp cgroupsv2.cpp
@ -33,6 +35,7 @@ set (SRCS
safeExit.cpp safeExit.cpp
throwError.cpp throwError.cpp
Numa.cpp Numa.cpp
GitHash.generated.cpp
) )
add_library (common ${SRCS}) add_library (common ${SRCS})

View File

@ -27,27 +27,6 @@ bool cgroupsV2Enabled()
#endif #endif
} }
bool cgroupsV2MemoryControllerEnabled()
{
#if defined(OS_LINUX)
chassert(cgroupsV2Enabled());
/// According to https://docs.kernel.org/admin-guide/cgroup-v2.html, file "cgroup.controllers" defines which controllers are available
/// for the current + child cgroups. The set of available controllers can be restricted from level to level using file
/// "cgroups.subtree_control". It is therefore sufficient to check the bottom-most nested "cgroup.controllers" file.
fs::path cgroup_dir = cgroupV2PathOfProcess();
if (cgroup_dir.empty())
return false;
std::ifstream controllers_file(cgroup_dir / "cgroup.controllers");
if (!controllers_file.is_open())
return false;
std::string controllers;
std::getline(controllers_file, controllers);
return controllers.find("memory") != std::string::npos;
#else
return false;
#endif
}
fs::path cgroupV2PathOfProcess() fs::path cgroupV2PathOfProcess()
{ {
#if defined(OS_LINUX) #if defined(OS_LINUX)
@ -71,3 +50,28 @@ fs::path cgroupV2PathOfProcess()
return {}; return {};
#endif #endif
} }
std::optional<std::string> getCgroupsV2PathContainingFile([[maybe_unused]] std::string_view file_name)
{
#if defined(OS_LINUX)
if (!cgroupsV2Enabled())
return {};
fs::path current_cgroup = cgroupV2PathOfProcess();
if (current_cgroup.empty())
return {};
/// Return the bottom-most nested file. If there is no such file at the current
/// level, try again at the parent level as settings are inherited.
while (current_cgroup != default_cgroups_mount.parent_path())
{
const auto path = current_cgroup / file_name;
if (fs::exists(path))
return {current_cgroup};
current_cgroup = current_cgroup.parent_path();
}
return {};
#else
return {};
#endif
}

View File

@ -1,6 +1,7 @@
#pragma once #pragma once
#include <filesystem> #include <filesystem>
#include <string_view>
#if defined(OS_LINUX) #if defined(OS_LINUX)
/// I think it is possible to mount the cgroups hierarchy somewhere else (e.g. when in containers). /// I think it is possible to mount the cgroups hierarchy somewhere else (e.g. when in containers).
@ -11,11 +12,11 @@ static inline const std::filesystem::path default_cgroups_mount = "/sys/fs/cgrou
/// Is cgroups v2 enabled on the system? /// Is cgroups v2 enabled on the system?
bool cgroupsV2Enabled(); bool cgroupsV2Enabled();
/// Is the memory controller of cgroups v2 enabled on the system?
/// Assumes that cgroupsV2Enabled() is enabled.
bool cgroupsV2MemoryControllerEnabled();
/// Detects which cgroup v2 the process belongs to and returns the filesystem path to the cgroup. /// Detects which cgroup v2 the process belongs to and returns the filesystem path to the cgroup.
/// Returns an empty path the cgroup cannot be determined. /// Returns an empty path the cgroup cannot be determined.
/// Assumes that cgroupsV2Enabled() is enabled. /// Assumes that cgroupsV2Enabled() is enabled.
std::filesystem::path cgroupV2PathOfProcess(); std::filesystem::path cgroupV2PathOfProcess();
/// Returns the most nested cgroup dir containing the specified file.
/// If cgroups v2 is not enabled - returns an empty optional.
std::optional<std::string> getCgroupsV2PathContainingFile([[maybe_unused]] std::string_view file_name);

View File

@ -19,9 +19,6 @@ std::optional<uint64_t> getCgroupsV2MemoryLimit()
if (!cgroupsV2Enabled()) if (!cgroupsV2Enabled())
return {}; return {};
if (!cgroupsV2MemoryControllerEnabled())
return {};
std::filesystem::path current_cgroup = cgroupV2PathOfProcess(); std::filesystem::path current_cgroup = cgroupV2PathOfProcess();
if (current_cgroup.empty()) if (current_cgroup.empty())
return {}; return {};

View File

@ -2,11 +2,11 @@
# NOTE: VERSION_REVISION has nothing common with DBMS_TCP_PROTOCOL_VERSION, # NOTE: VERSION_REVISION has nothing common with DBMS_TCP_PROTOCOL_VERSION,
# only DBMS_TCP_PROTOCOL_VERSION should be incremented on protocol changes. # only DBMS_TCP_PROTOCOL_VERSION should be incremented on protocol changes.
SET(VERSION_REVISION 54489) SET(VERSION_REVISION 54490)
SET(VERSION_MAJOR 24) SET(VERSION_MAJOR 24)
SET(VERSION_MINOR 8) SET(VERSION_MINOR 9)
SET(VERSION_PATCH 1) SET(VERSION_PATCH 1)
SET(VERSION_GITHASH 3f8b27d7accd2b5ec4afe7d0dd459115323304af) SET(VERSION_GITHASH e02b434d2fc0c4fbee29ca675deab7474d274608)
SET(VERSION_DESCRIBE v24.8.1.1-testing) SET(VERSION_DESCRIBE v24.9.1.1-testing)
SET(VERSION_STRING 24.8.1.1) SET(VERSION_STRING 24.9.1.1)
# end of autochange # end of autochange

View File

@ -9,10 +9,18 @@ endif ()
file(GLOB bprefix "/usr/local/llvm${COMPILER_VERSION_MAJOR}/lib/clang/${COMPILER_VERSION_MAJOR}/lib/${system_processor}-portbld-freebsd*/") file(GLOB bprefix "/usr/local/llvm${COMPILER_VERSION_MAJOR}/lib/clang/${COMPILER_VERSION_MAJOR}/lib/${system_processor}-portbld-freebsd*/")
message(STATUS "-Bprefix: ${bprefix}") message(STATUS "-Bprefix: ${bprefix}")
execute_process(COMMAND ${CMAKE_CXX_COMPILER} -Bprefix=${bprefix} --print-file-name=libclang_rt.builtins-${system_processor}.a OUTPUT_VARIABLE BUILTINS_LIBRARY OUTPUT_STRIP_TRAILING_WHITESPACE) execute_process(COMMAND
${CMAKE_CXX_COMPILER} -Bprefix=${bprefix} --print-file-name=libclang_rt.builtins-${system_processor}.a
OUTPUT_VARIABLE BUILTINS_LIBRARY
COMMAND_ERROR_IS_FATAL ANY
OUTPUT_STRIP_TRAILING_WHITESPACE)
# --print-file-name simply prints what you passed in case of nothing was resolved, so let's try one other possible option # --print-file-name simply prints what you passed in case of nothing was resolved, so let's try one other possible option
if (BUILTINS_LIBRARY STREQUAL "libclang_rt.builtins-${system_processor}.a") if (BUILTINS_LIBRARY STREQUAL "libclang_rt.builtins-${system_processor}.a")
execute_process(COMMAND ${CMAKE_CXX_COMPILER} -Bprefix=${bprefix} --print-file-name=libclang_rt.builtins.a OUTPUT_VARIABLE BUILTINS_LIBRARY OUTPUT_STRIP_TRAILING_WHITESPACE) execute_process(COMMAND
${CMAKE_CXX_COMPILER} -Bprefix=${bprefix} --print-file-name=libclang_rt.builtins.a
OUTPUT_VARIABLE BUILTINS_LIBRARY
COMMAND_ERROR_IS_FATAL ANY
OUTPUT_STRIP_TRAILING_WHITESPACE)
endif() endif()
if (BUILTINS_LIBRARY STREQUAL "libclang_rt.builtins.a") if (BUILTINS_LIBRARY STREQUAL "libclang_rt.builtins.a")
message(FATAL_ERROR "libclang_rt.builtins had not been found") message(FATAL_ERROR "libclang_rt.builtins had not been found")

View File

@ -42,19 +42,9 @@ endif ()
# But use 2 parallel jobs, since: # But use 2 parallel jobs, since:
# - this is what llvm does # - this is what llvm does
# - and I've verfied that lld-11 does not use all available CPU time (in peak) while linking one binary # - and I've verfied that lld-11 does not use all available CPU time (in peak) while linking one binary
if (CMAKE_BUILD_TYPE_UC STREQUAL "RELWITHDEBINFO" AND ENABLE_THINLTO) if (CMAKE_BUILD_TYPE_UC STREQUAL "RELWITHDEBINFO" AND ENABLE_THINLTO AND PARALLEL_LINK_JOBS GREATER 2)
if (ARCH_AARCH64) message(STATUS "ThinLTO provides its own parallel linking - limiting parallel link jobs to 2.")
# aarch64 builds start to often fail with OOMs (reason not yet clear), for now let's limit the concurrency set (PARALLEL_LINK_JOBS 2)
message(STATUS "ThinLTO provides its own parallel linking - limiting parallel link jobs to 1.")
set (PARALLEL_LINK_JOBS 1)
if (LINKER_NAME MATCHES "lld")
math(EXPR LTO_JOBS ${NUMBER_OF_LOGICAL_CORES}/4)
set (CMAKE_EXE_LINKER_FLAGS_RELWITHDEBINFO "${CMAKE_EXE_LINKER_FLAGS_RELWITHDEBINFO} -Wl,--thinlto-jobs=${LTO_JOBS}")
endif()
elseif (PARALLEL_LINK_JOBS GREATER 2)
message(STATUS "ThinLTO provides its own parallel linking - limiting parallel link jobs to 2.")
set (PARALLEL_LINK_JOBS 2)
endif ()
endif() endif()
message(STATUS "Building sub-tree with ${PARALLEL_COMPILE_JOBS} compile jobs and ${PARALLEL_LINK_JOBS} linker jobs (system: ${NUMBER_OF_LOGICAL_CORES} cores, ${TOTAL_PHYSICAL_MEMORY} MB RAM, 'OFF' means the native core count).") message(STATUS "Building sub-tree with ${PARALLEL_COMPILE_JOBS} compile jobs and ${PARALLEL_LINK_JOBS} linker jobs (system: ${NUMBER_OF_LOGICAL_CORES} cores, ${TOTAL_PHYSICAL_MEMORY} MB RAM, 'OFF' means the native core count).")

View File

@ -5,7 +5,11 @@ set (DEFAULT_LIBS "-nodefaultlibs")
# We need builtins from Clang's RT even without libcxx - for ubsan+int128. # We need builtins from Clang's RT even without libcxx - for ubsan+int128.
# See https://bugs.llvm.org/show_bug.cgi?id=16404 # See https://bugs.llvm.org/show_bug.cgi?id=16404
execute_process (COMMAND ${CMAKE_CXX_COMPILER} --target=${CMAKE_CXX_COMPILER_TARGET} --print-libgcc-file-name --rtlib=compiler-rt OUTPUT_VARIABLE BUILTINS_LIBRARY OUTPUT_STRIP_TRAILING_WHITESPACE) execute_process (COMMAND
${CMAKE_CXX_COMPILER} --target=${CMAKE_CXX_COMPILER_TARGET} --print-libgcc-file-name --rtlib=compiler-rt
OUTPUT_VARIABLE BUILTINS_LIBRARY
COMMAND_ERROR_IS_FATAL ANY
OUTPUT_STRIP_TRAILING_WHITESPACE)
# Apparently, in clang-19, the UBSan support library for C++ was moved out into ubsan_standalone_cxx.a, so we have to include both. # Apparently, in clang-19, the UBSan support library for C++ was moved out into ubsan_standalone_cxx.a, so we have to include both.
if (SANITIZE STREQUAL undefined) if (SANITIZE STREQUAL undefined)

View File

@ -5,7 +5,11 @@ if (NOT CMAKE_CXX_COMPILER_ID MATCHES "Clang")
endif () endif ()
# Print details to output # Print details to output
execute_process(COMMAND ${CMAKE_CXX_COMPILER} --version OUTPUT_VARIABLE COMPILER_SELF_IDENTIFICATION OUTPUT_STRIP_TRAILING_WHITESPACE) execute_process(COMMAND ${CMAKE_CXX_COMPILER} --version
OUTPUT_VARIABLE COMPILER_SELF_IDENTIFICATION
COMMAND_ERROR_IS_FATAL ANY
OUTPUT_STRIP_TRAILING_WHITESPACE
)
message (STATUS "Using compiler:\n${COMPILER_SELF_IDENTIFICATION}") message (STATUS "Using compiler:\n${COMPILER_SELF_IDENTIFICATION}")
# Require minimum compiler versions # Require minimum compiler versions

View File

@ -90,7 +90,10 @@ endfunction()
# Function get_cmake_properties returns list of all propreties that cmake supports # Function get_cmake_properties returns list of all propreties that cmake supports
function(get_cmake_properties outvar) function(get_cmake_properties outvar)
execute_process(COMMAND cmake --help-property-list OUTPUT_VARIABLE cmake_properties) execute_process(COMMAND cmake --help-property-list
OUTPUT_VARIABLE cmake_properties
COMMAND_ERROR_IS_FATAL ANY
)
# Convert command output into a CMake list # Convert command output into a CMake list
string(REGEX REPLACE ";" "\\\\;" cmake_properties "${cmake_properties}") string(REGEX REPLACE ";" "\\\\;" cmake_properties "${cmake_properties}")
string(REGEX REPLACE "\n" ";" cmake_properties "${cmake_properties}") string(REGEX REPLACE "\n" ";" cmake_properties "${cmake_properties}")

View File

@ -209,9 +209,8 @@ endif()
option(ENABLE_USEARCH "Enable USearch" ${ENABLE_LIBRARIES}) option(ENABLE_USEARCH "Enable USearch" ${ENABLE_LIBRARIES})
if (ENABLE_USEARCH) if (ENABLE_USEARCH)
add_contrib (FP16-cmake FP16) add_contrib (FP16-cmake FP16)
add_contrib (robin-map-cmake robin-map)
add_contrib (SimSIMD-cmake SimSIMD) add_contrib (SimSIMD-cmake SimSIMD)
add_contrib (usearch-cmake usearch) # requires: FP16, robin-map, SimdSIMD add_contrib (usearch-cmake usearch) # requires: FP16, SimdSIMD
else () else ()
message(STATUS "Not using USearch") message(STATUS "Not using USearch")
endif () endif ()

2
contrib/SimSIMD vendored

@ -1 +1 @@
Subproject commit de2cb75b9e9e3389d5e1e51fd9f8ed151f3c17cf Subproject commit 91a76d1ac519b3b9dc8957734a3dabd985f00c26

2
contrib/aws vendored

@ -1 +1 @@
Subproject commit 1c2946bfcb7f1e3ae0a858de0b59d4f1a7b4ccaf Subproject commit d5450d76abda556ce145ddabe7e0cc6a7644ec59

2
contrib/aws-crt-cpp vendored

@ -1 +1 @@
Subproject commit f532d6abc0d2b0d8b5d6fe9e7c51eaedbe4afbd0 Subproject commit e5aa45cacfdcda7719ead38760e7c61076f5745f

View File

@ -37,7 +37,9 @@ message(STATUS "Packaging with tzdata version: ${TZDATA_VERSION}")
execute_process(COMMAND execute_process(COMMAND
bash -c "cd ${TZDIR} && find * -type f -and ! -name '*.tab' -and ! -name 'localtime' | LC_ALL=C sort | paste -sd ';' -" bash -c "cd ${TZDIR} && find * -type f -and ! -name '*.tab' -and ! -name 'localtime' | LC_ALL=C sort | paste -sd ';' -"
OUTPUT_STRIP_TRAILING_WHITESPACE OUTPUT_STRIP_TRAILING_WHITESPACE
OUTPUT_VARIABLE TIMEZONES) OUTPUT_VARIABLE TIMEZONES
COMMAND_ERROR_IS_FATAL ANY
)
file(APPEND ${TIMEZONES_FILE} "// autogenerated by ClickHouse/contrib/cctz-cmake/CMakeLists.txt\n") file(APPEND ${TIMEZONES_FILE} "// autogenerated by ClickHouse/contrib/cctz-cmake/CMakeLists.txt\n")
file(APPEND ${TIMEZONES_FILE} "#include <incbin.h>\n") file(APPEND ${TIMEZONES_FILE} "#include <incbin.h>\n")

View File

@ -359,7 +359,9 @@ else ()
execute_process( execute_process(
COMMAND mkdir -p ${PROTOC_BUILD_DIR} COMMAND mkdir -p ${PROTOC_BUILD_DIR}
COMMAND_ECHO STDOUT) COMMAND_ECHO STDOUT
COMMAND_ERROR_IS_FATAL ANY
)
execute_process( execute_process(
COMMAND ${CMAKE_COMMAND} COMMAND ${CMAKE_COMMAND}
@ -375,11 +377,15 @@ else ()
"-DABSL_ENABLE_INSTALL=0" "-DABSL_ENABLE_INSTALL=0"
"${protobuf_source_dir}" "${protobuf_source_dir}"
WORKING_DIRECTORY "${PROTOC_BUILD_DIR}" WORKING_DIRECTORY "${PROTOC_BUILD_DIR}"
COMMAND_ECHO STDOUT) COMMAND_ECHO STDOUT
COMMAND_ERROR_IS_FATAL ANY
)
execute_process( execute_process(
COMMAND ${CMAKE_COMMAND} --build "${PROTOC_BUILD_DIR}" COMMAND ${CMAKE_COMMAND} --build "${PROTOC_BUILD_DIR}"
COMMAND_ECHO STDOUT) COMMAND_ECHO STDOUT
COMMAND_ERROR_IS_FATAL ANY
)
endif () endif ()
add_executable(protoc IMPORTED GLOBAL) add_executable(protoc IMPORTED GLOBAL)

View File

@ -51,8 +51,9 @@ if (NOT CMAKE_HOST_SYSTEM_NAME STREQUAL CMAKE_SYSTEM_NAME
set(OPENSSL_SOURCE_DIR "${ClickHouse_SOURCE_DIR}/contrib/openssl-cmake") set(OPENSSL_SOURCE_DIR "${ClickHouse_SOURCE_DIR}/contrib/openssl-cmake")
execute_process( execute_process(
COMMAND mkdir -p ${OPENSSL_BUILD_DIR} COMMAND mkdir -p ${OPENSSL_BUILD_DIR}
COMMAND_ECHO STDOUT COMMAND_ECHO STDOUT
COMMAND_ERROR_IS_FATAL ANY
) )
if (CMAKE_HOST_SYSTEM_PROCESSOR MATCHES "amd64|x86_64") if (CMAKE_HOST_SYSTEM_PROCESSOR MATCHES "amd64|x86_64")
@ -89,15 +90,21 @@ if (NOT CMAKE_HOST_SYSTEM_NAME STREQUAL CMAKE_SYSTEM_NAME
"-DClickHouse_SOURCE_DIR=${ClickHouse_SOURCE_DIR}" "-DClickHouse_SOURCE_DIR=${ClickHouse_SOURCE_DIR}"
"${OPENSSL_SOURCE_DIR}" "${OPENSSL_SOURCE_DIR}"
WORKING_DIRECTORY "${OPENSSL_BUILD_DIR}" WORKING_DIRECTORY "${OPENSSL_BUILD_DIR}"
COMMAND_ECHO STDOUT) COMMAND_ECHO STDOUT
COMMAND_ERROR_IS_FATAL ANY
)
execute_process( execute_process(
COMMAND ${CMAKE_COMMAND} --build "${OPENSSL_BUILD_DIR}" COMMAND ${CMAKE_COMMAND} --build "${OPENSSL_BUILD_DIR}"
COMMAND_ECHO STDOUT) COMMAND_ECHO STDOUT
COMMAND_ERROR_IS_FATAL ANY
)
execute_process( execute_process(
COMMAND ${CMAKE_COMMAND} --install "${OPENSSL_BUILD_DIR}" COMMAND ${CMAKE_COMMAND} --install "${OPENSSL_BUILD_DIR}"
COMMAND_ECHO STDOUT) COMMAND_ECHO STDOUT
COMMAND_ERROR_IS_FATAL ANY
)
# It's not important on which file we depend, we just want to specify right order # It's not important on which file we depend, we just want to specify right order
add_library(openssl_for_grpc STATIC IMPORTED GLOBAL) add_library(openssl_for_grpc STATIC IMPORTED GLOBAL)
@ -108,8 +115,9 @@ if (NOT CMAKE_HOST_SYSTEM_NAME STREQUAL CMAKE_SYSTEM_NAME
set (GRPC_CPP_PLUGIN_BUILD_DIR "${_gRPC_BINARY_DIR}/build") set (GRPC_CPP_PLUGIN_BUILD_DIR "${_gRPC_BINARY_DIR}/build")
execute_process( execute_process(
COMMAND mkdir -p ${GRPC_CPP_PLUGIN_BUILD_DIR} COMMAND mkdir -p ${GRPC_CPP_PLUGIN_BUILD_DIR}
COMMAND_ECHO STDOUT COMMAND_ECHO STDOUT
COMMAND_ERROR_IS_FATAL ANY
) )
set(abseil_source_dir "${ClickHouse_SOURCE_DIR}/contrib/abseil-cpp") set(abseil_source_dir "${ClickHouse_SOURCE_DIR}/contrib/abseil-cpp")
@ -140,11 +148,15 @@ if (NOT CMAKE_HOST_SYSTEM_NAME STREQUAL CMAKE_SYSTEM_NAME
"-DgRPC_SSL_PROVIDER=package" "-DgRPC_SSL_PROVIDER=package"
"${_gRPC_SOURCE_DIR}" "${_gRPC_SOURCE_DIR}"
WORKING_DIRECTORY "${GRPC_CPP_PLUGIN_BUILD_DIR}" WORKING_DIRECTORY "${GRPC_CPP_PLUGIN_BUILD_DIR}"
COMMAND_ECHO STDOUT) COMMAND_ECHO STDOUT
COMMAND_ERROR_IS_FATAL ANY
)
execute_process( execute_process(
COMMAND ${CMAKE_COMMAND} --build "${GRPC_CPP_PLUGIN_BUILD_DIR}" COMMAND ${CMAKE_COMMAND} --build "${GRPC_CPP_PLUGIN_BUILD_DIR}"
COMMAND_ECHO STDOUT) COMMAND_ECHO STDOUT
COMMAND_ERROR_IS_FATAL ANY
)
add_executable(grpc_cpp_plugin IMPORTED GLOBAL) add_executable(grpc_cpp_plugin IMPORTED GLOBAL)
set_target_properties (grpc_cpp_plugin PROPERTIES IMPORTED_LOCATION "${GRPC_CPP_PLUGIN_BUILD_DIR}/grpc_cpp_plugin") set_target_properties (grpc_cpp_plugin PROPERTIES IMPORTED_LOCATION "${GRPC_CPP_PLUGIN_BUILD_DIR}/grpc_cpp_plugin")

2
contrib/icu vendored

@ -1 +1 @@
Subproject commit 7750081bda4b3bc1768ae03849ec70f67ea10625 Subproject commit 4216173eeeb39c1d4caaa54a68860e800412d273

2
contrib/libunwind vendored

@ -1 +1 @@
Subproject commit a89d904befea07814628c6ce0b44083c4e149c62 Subproject commit 601db0b0e03018c01710470a37703b618f9cf08b

1
contrib/robin-map vendored

@ -1 +0,0 @@
Subproject commit 851a59e0e3063ee0e23089062090a73fd3de482d

View File

@ -1 +0,0 @@
# See contrib/usearch-cmake/CMakeLists.txt

2
contrib/usearch vendored

@ -1 +1 @@
Subproject commit 30810452bec5d3d3aa0931bb5d761e2f09aa6356 Subproject commit e21a5778a0d4469ddaf38c94b7be0196bb701ee4

View File

@ -1,5 +1,4 @@
set(FP16_PROJECT_DIR "${ClickHouse_SOURCE_DIR}/contrib/FP16") set(FP16_PROJECT_DIR "${ClickHouse_SOURCE_DIR}/contrib/FP16")
set(ROBIN_MAP_PROJECT_DIR "${ClickHouse_SOURCE_DIR}/contrib/robin-map")
set(SIMSIMD_PROJECT_DIR "${ClickHouse_SOURCE_DIR}/contrib/SimSIMD") set(SIMSIMD_PROJECT_DIR "${ClickHouse_SOURCE_DIR}/contrib/SimSIMD")
set(USEARCH_PROJECT_DIR "${ClickHouse_SOURCE_DIR}/contrib/usearch") set(USEARCH_PROJECT_DIR "${ClickHouse_SOURCE_DIR}/contrib/usearch")
@ -7,8 +6,17 @@ add_library(_usearch INTERFACE)
target_include_directories(_usearch SYSTEM INTERFACE target_include_directories(_usearch SYSTEM INTERFACE
${FP16_PROJECT_DIR}/include ${FP16_PROJECT_DIR}/include
${ROBIN_MAP_PROJECT_DIR}/include
${SIMSIMD_PROJECT_DIR}/include ${SIMSIMD_PROJECT_DIR}/include
${USEARCH_PROJECT_DIR}/include) ${USEARCH_PROJECT_DIR}/include)
target_compile_definitions(_usearch INTERFACE USEARCH_USE_FP16LIB)
# target_compile_definitions(_usearch INTERFACE USEARCH_USE_SIMSIMD)
# ^^ simsimd is not enabled at the moment. Reasons:
# - Vectorization is important for raw scans but not so much for HNSW. We use usearch only for HNSW.
# - Simsimd does compile-time dispatch (choice of SIMD kernels determined by capabilities of the build machine) or dynamic dispatch (SIMD
# kernels chosen at runtime based on cpuid instruction). Since current builds are limited to SSE 4.2 (x86) and NEON (ARM), the speedup of
# the former would be moderate compared to AVX-512 / SVE. The latter is at the moment too fragile with respect to portability across x86
# and ARM machines ... certain conbinations of quantizations / distance functions / SIMD instructions are not implemented at the moment.
add_library(ch_contrib::usearch ALIAS _usearch) add_library(ch_contrib::usearch ALIAS _usearch)

View File

@ -47,8 +47,7 @@
"docker/test/stateful": { "docker/test/stateful": {
"name": "clickhouse/stateful-test", "name": "clickhouse/stateful-test",
"dependent": [ "dependent": [
"docker/test/stress", "docker/test/stress"
"docker/test/upgrade"
] ]
}, },
"docker/test/unit": { "docker/test/unit": {
@ -59,10 +58,6 @@
"name": "clickhouse/stress-test", "name": "clickhouse/stress-test",
"dependent": [] "dependent": []
}, },
"docker/test/upgrade": {
"name": "clickhouse/upgrade-check",
"dependent": []
},
"docker/test/integration/runner": { "docker/test/integration/runner": {
"name": "clickhouse/integration-tests-runner", "name": "clickhouse/integration-tests-runner",
"dependent": [] "dependent": []

View File

@ -93,6 +93,3 @@ RUN ln -snf /usr/share/zoneinfo/$TZ /etc/localtime && echo $TZ > /etc/timezone
ENV COMMIT_SHA='' ENV COMMIT_SHA=''
ENV PULL_REQUEST_NUMBER='' ENV PULL_REQUEST_NUMBER=''
ENV COPY_CLICKHOUSE_BINARY_TO_OUTPUT=0 ENV COPY_CLICKHOUSE_BINARY_TO_OUTPUT=0
COPY run.sh /
CMD ["/bin/bash", "/run.sh"]

View File

@ -35,7 +35,6 @@ RUN mkdir -p /tmp/clickhouse-odbc-tmp \
ENV TZ=Europe/Amsterdam ENV TZ=Europe/Amsterdam
ENV MAX_RUN_TIME=9000
RUN ln -snf /usr/share/zoneinfo/$TZ /etc/localtime && echo $TZ > /etc/timezone RUN ln -snf /usr/share/zoneinfo/$TZ /etc/localtime && echo $TZ > /etc/timezone
ARG sqllogic_test_repo="https://github.com/gregrahn/sqllogictest.git" ARG sqllogic_test_repo="https://github.com/gregrahn/sqllogictest.git"

View File

@ -94,7 +94,7 @@ function run_tests()
export -f run_tests export -f run_tests
timeout "${MAX_RUN_TIME:-9000}" bash -c run_tests || echo "timeout reached" >&2 run_tests
#/process_functional_tests_result.py || echo -e "failure\tCannot parse results" > /test_output/check_status.tsv #/process_functional_tests_result.py || echo -e "failure\tCannot parse results" > /test_output/check_status.tsv

View File

@ -22,7 +22,6 @@ ARG sqltest_repo="https://github.com/elliotchance/sqltest/"
RUN git clone ${sqltest_repo} RUN git clone ${sqltest_repo}
ENV TZ=UTC ENV TZ=UTC
ENV MAX_RUN_TIME=900
RUN ln -snf /usr/share/zoneinfo/$TZ /etc/localtime && echo $TZ > /etc/timezone RUN ln -snf /usr/share/zoneinfo/$TZ /etc/localtime && echo $TZ > /etc/timezone
COPY run.sh / COPY run.sh /

View File

@ -10,7 +10,3 @@ RUN apt-get update -y \
npm \ npm \
&& apt-get clean \ && apt-get clean \
&& rm -rf /var/lib/apt/lists/* /var/cache/debconf /tmp/* && rm -rf /var/lib/apt/lists/* /var/cache/debconf /tmp/*
COPY create.sql /
COPY run.sh /
CMD ["/bin/bash", "/run.sh"]

View File

@ -1 +0,0 @@
../stateless/setup_minio.sh

View File

@ -65,7 +65,6 @@ ENV TZ=Europe/Amsterdam
RUN ln -snf /usr/share/zoneinfo/$TZ /etc/localtime && echo $TZ > /etc/timezone RUN ln -snf /usr/share/zoneinfo/$TZ /etc/localtime && echo $TZ > /etc/timezone
ENV NUM_TRIES=1 ENV NUM_TRIES=1
ENV MAX_RUN_TIME=0
# Unrelated to vars in setup_minio.sh, but should be the same there # Unrelated to vars in setup_minio.sh, but should be the same there
# to have the same binaries for local running scenario # to have the same binaries for local running scenario
@ -86,18 +85,6 @@ RUN curl -L --no-verbose -O 'https://archive.apache.org/dist/hadoop/common/hadoo
ENV MINIO_ROOT_USER="clickhouse" ENV MINIO_ROOT_USER="clickhouse"
ENV MINIO_ROOT_PASSWORD="clickhouse" ENV MINIO_ROOT_PASSWORD="clickhouse"
ENV EXPORT_S3_STORAGE_POLICIES=1 ENV EXPORT_S3_STORAGE_POLICIES=1
ENV CLICKHOUSE_GRPC_CLIENT="/usr/share/clickhouse-utils/grpc-client/clickhouse-grpc-client.py"
RUN npm install -g azurite@3.30.0 \ RUN npm install -g azurite@3.30.0 \
&& npm install -g tslib && npm install -g node && npm install -g tslib && npm install -g node
COPY run.sh /
COPY setup_minio.sh /
COPY setup_hdfs_minicluster.sh /
COPY attach_gdb.lib /
COPY utils.lib /
# We store stress_tests.lib in stateless image to avoid duplication of this file in stress and upgrade tests
COPY stress_tests.lib /
CMD ["/bin/bash", "/run.sh"]

View File

@ -22,8 +22,5 @@ RUN apt-get update -y \
&& apt-get clean \ && apt-get clean \
&& rm -rf /var/lib/apt/lists/* /var/cache/debconf /tmp/* && rm -rf /var/lib/apt/lists/* /var/cache/debconf /tmp/*
COPY run.sh /
ENV EXPORT_S3_STORAGE_POLICIES=1 ENV EXPORT_S3_STORAGE_POLICIES=1
CMD ["/bin/bash", "/run.sh"]

View File

@ -1,29 +0,0 @@
# rebuild in #33610
# docker build -t clickhouse/upgrade-check .
ARG FROM_TAG=latest
FROM clickhouse/stateful-test:$FROM_TAG
RUN apt-get update -y \
&& env DEBIAN_FRONTEND=noninteractive \
apt-get install --yes --no-install-recommends \
bash \
tzdata \
parallel \
expect \
python3 \
python3-lxml \
python3-termcolor \
python3-requests \
curl \
sudo \
openssl \
netcat-openbsd \
brotli \
&& apt-get clean \
&& rm -rf /var/lib/apt/lists/* /var/cache/debconf /tmp/*
COPY run.sh /
ENV EXPORT_S3_STORAGE_POLICIES=1
CMD ["/bin/bash", "/run.sh"]

View File

@ -56,7 +56,5 @@ RUN apt-get update \
&& apt-get clean \ && apt-get clean \
&& rm -rf /var/lib/apt/lists/* /var/cache/debconf /tmp/* && rm -rf /var/lib/apt/lists/* /var/cache/debconf /tmp/*
COPY process_functional_tests_result.py /
COPY --from=clickhouse/cctools:0d6b90a7a490 /opt/gdb /opt/gdb COPY --from=clickhouse/cctools:0d6b90a7a490 /opt/gdb /opt/gdb
ENV PATH="/opt/gdb/bin:${PATH}" ENV PATH="/opt/gdb/bin:${PATH}"

View File

@ -0,0 +1,29 @@
---
sidebar_position: 1
sidebar_label: 2024
---
# 2024 Changelog
### ClickHouse release v24.3.7.30-lts (c8a28cf4331) FIXME as compared to v24.3.6.48-lts (b2d33c3c45d)
#### Improvement
* Backported in [#68103](https://github.com/ClickHouse/ClickHouse/issues/68103): Distinguish booleans and integers while parsing values for custom settings: ``` SET custom_a = true; SET custom_b = 1; ```. [#62206](https://github.com/ClickHouse/ClickHouse/pull/62206) ([Vitaly Baranov](https://github.com/vitlibar)).
#### Bug Fix (user-visible misbehavior in an official stable release)
* Backported in [#67931](https://github.com/ClickHouse/ClickHouse/issues/67931): Fixing the `Not-ready Set` error after the `PREWHERE` optimization for StorageMerge. [#65057](https://github.com/ClickHouse/ClickHouse/pull/65057) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
* Backported in [#68062](https://github.com/ClickHouse/ClickHouse/issues/68062): Fix boolean literals in query sent to external database (for engines like `PostgreSQL`). [#66282](https://github.com/ClickHouse/ClickHouse/pull/66282) ([vdimir](https://github.com/vdimir)).
* Backported in [#67812](https://github.com/ClickHouse/ClickHouse/issues/67812): Only relevant to the experimental Variant data type. Fix crash with Variant + AggregateFunction type. [#67122](https://github.com/ClickHouse/ClickHouse/pull/67122) ([Kruglov Pavel](https://github.com/Avogar)).
* Backported in [#67848](https://github.com/ClickHouse/ClickHouse/issues/67848): Fixes [#66026](https://github.com/ClickHouse/ClickHouse/issues/66026). Avoid unresolved table function arguments traversal in `ReplaceTableNodeToDummyVisitor`. [#67522](https://github.com/ClickHouse/ClickHouse/pull/67522) ([Dmitry Novik](https://github.com/novikd)).
* Backported in [#68271](https://github.com/ClickHouse/ClickHouse/issues/68271): Fix inserting into stream like engines (Kafka, RabbitMQ, NATS) through HTTP interface. [#67554](https://github.com/ClickHouse/ClickHouse/pull/67554) ([János Benjamin Antal](https://github.com/antaljanosbenjamin)).
* Backported in [#67806](https://github.com/ClickHouse/ClickHouse/issues/67806): Fix reloading SQL UDFs with UNION. Previously, restarting the server could make UDF invalid. [#67665](https://github.com/ClickHouse/ClickHouse/pull/67665) ([Antonio Andelic](https://github.com/antonio2368)).
* Backported in [#67834](https://github.com/ClickHouse/ClickHouse/issues/67834): Fix potential stack overflow in `JSONMergePatch` function. Renamed this function from `jsonMergePatch` to `JSONMergePatch` because the previous name was wrong. The previous name is still kept for compatibility. Improved diagnostic of errors in the function. This closes [#67304](https://github.com/ClickHouse/ClickHouse/issues/67304). [#67756](https://github.com/ClickHouse/ClickHouse/pull/67756) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
* Backported in [#68206](https://github.com/ClickHouse/ClickHouse/issues/68206): Fix wrong `count()` result when there is non-deterministic function in predicate. [#67922](https://github.com/ClickHouse/ClickHouse/pull/67922) ([János Benjamin Antal](https://github.com/antaljanosbenjamin)).
* Backported in [#68089](https://github.com/ClickHouse/ClickHouse/issues/68089): Fixed the calculation of the maximum thread soft limit in containerized environments where the usable CPU count is limited. [#67963](https://github.com/ClickHouse/ClickHouse/pull/67963) ([Robert Schulze](https://github.com/rschu1ze)).
* Backported in [#68120](https://github.com/ClickHouse/ClickHouse/issues/68120): Fixed skipping of untouched parts in mutations with new analyzer. Previously with enabled analyzer data in part could be rewritten by mutation even if mutation doesn't affect this part according to predicate. [#68052](https://github.com/ClickHouse/ClickHouse/pull/68052) ([Anton Popov](https://github.com/CurtizJ)).
#### NOT FOR CHANGELOG / INSIGNIFICANT
* Update version after release. [#67676](https://github.com/ClickHouse/ClickHouse/pull/67676) ([robot-clickhouse](https://github.com/robot-clickhouse)).
* Backported in [#68074](https://github.com/ClickHouse/ClickHouse/issues/68074): Add an explicit error for `ALTER MODIFY SQL SECURITY` on non-view tables. [#67953](https://github.com/ClickHouse/ClickHouse/pull/67953) ([pufit](https://github.com/pufit)).

View File

@ -0,0 +1,16 @@
---
sidebar_position: 1
sidebar_label: 2024
---
# 2024 Changelog
### ClickHouse release v24.3.8.13-lts (84bbfc70f5d) FIXME as compared to v24.3.7.30-lts (c8a28cf4331)
#### Bug Fix (user-visible misbehavior in an official stable release)
* Backported in [#68562](https://github.com/ClickHouse/ClickHouse/issues/68562): Fix indexHint function case found by fuzzer. [#66286](https://github.com/ClickHouse/ClickHouse/pull/66286) ([Anton Popov](https://github.com/CurtizJ)).
* Backported in [#68114](https://github.com/ClickHouse/ClickHouse/issues/68114): Fix possible PARAMETER_OUT_OF_BOUND error during reading variant subcolumn. [#66659](https://github.com/ClickHouse/ClickHouse/pull/66659) ([Kruglov Pavel](https://github.com/Avogar)).
* Backported in [#67989](https://github.com/ClickHouse/ClickHouse/issues/67989): Validate experimental/suspicious data types in ALTER ADD/MODIFY COLUMN. [#67911](https://github.com/ClickHouse/ClickHouse/pull/67911) ([Kruglov Pavel](https://github.com/Avogar)).
* Backported in [#68335](https://github.com/ClickHouse/ClickHouse/issues/68335): Try fix postgres crash when query is cancelled. [#68288](https://github.com/ClickHouse/ClickHouse/pull/68288) ([Kseniia Sumarokova](https://github.com/kssenii)).
* Backported in [#68392](https://github.com/ClickHouse/ClickHouse/issues/68392): Fix missing sync replica mode in query `SYSTEM SYNC REPLICA`. [#68326](https://github.com/ClickHouse/ClickHouse/pull/68326) ([Duc Canh Le](https://github.com/canhld94)).

View File

@ -240,7 +240,7 @@ libhdfs3 support HDFS namenode HA.
## Storage Settings {#storage-settings} ## Storage Settings {#storage-settings}
- [hdfs_truncate_on_insert](/docs/en/operations/settings/settings.md#hdfs_truncate_on_insert) - allows to truncate file before insert into it. Disabled by default. - [hdfs_truncate_on_insert](/docs/en/operations/settings/settings.md#hdfs_truncate_on_insert) - allows to truncate file before insert into it. Disabled by default.
- [hdfs_create_multiple_files](/docs/en/operations/settings/settings.md#hdfs_allow_create_multiple_files) - allows to create a new file on each insert if format has suffix. Disabled by default. - [hdfs_create_new_file_on_insert](/docs/en/operations/settings/settings.md#hdfs_create_new_file_on_insert) - allows to create a new file on each insert if format has suffix. Disabled by default.
- [hdfs_skip_empty_files](/docs/en/operations/settings/settings.md#hdfs_skip_empty_files) - allows to skip empty files while reading. Disabled by default. - [hdfs_skip_empty_files](/docs/en/operations/settings/settings.md#hdfs_skip_empty_files) - allows to skip empty files while reading. Disabled by default.
**See Also** **See Also**

View File

@ -225,7 +225,7 @@ CREATE TABLE table_with_asterisk (name String, value UInt32)
## Storage Settings {#storage-settings} ## Storage Settings {#storage-settings}
- [s3_truncate_on_insert](/docs/en/operations/settings/settings.md#s3_truncate_on_insert) - allows to truncate file before insert into it. Disabled by default. - [s3_truncate_on_insert](/docs/en/operations/settings/settings.md#s3_truncate_on_insert) - allows to truncate file before insert into it. Disabled by default.
- [s3_create_multiple_files](/docs/en/operations/settings/settings.md#s3_allow_create_multiple_files) - allows to create a new file on each insert if format has suffix. Disabled by default. - [s3_create_new_file_on_insert](/docs/en/operations/settings/settings.md#s3_create_new_file_on_insert) - allows to create a new file on each insert if format has suffix. Disabled by default.
- [s3_skip_empty_files](/docs/en/operations/settings/settings.md#s3_skip_empty_files) - allows to skip empty files while reading. Disabled by default. - [s3_skip_empty_files](/docs/en/operations/settings/settings.md#s3_skip_empty_files) - allows to skip empty files while reading. Disabled by default.
## S3-related Settings {#settings} ## S3-related Settings {#settings}

View File

@ -22,10 +22,10 @@ ORDER BY Distance(vectors, Point)
LIMIT N LIMIT N
``` ```
`vectors` contains N-dimensional values of type [Array(Float32)](../../../sql-reference/data-types/array.md), for example embeddings. `vectors` contains N-dimensional values of type [Array(Float32)](../../../sql-reference/data-types/array.md) or Array(Float64), for example
Function `Distance` computes the distance between two vectors. Often, the Euclidean (L2) distance is chosen as distance function but [other embeddings. Function `Distance` computes the distance between two vectors. Often, the Euclidean (L2) distance is chosen as distance function
distance functions](/docs/en/sql-reference/functions/distance-functions.md) are also possible. `Point` is the reference point, e.g. `(0.17, but [other distance functions](/docs/en/sql-reference/functions/distance-functions.md) are also possible. `Point` is the reference point,
0.33, ...)`, and `N` limits the number of search results. e.g. `(0.17, 0.33, ...)`, and `N` limits the number of search results.
This query returns the top-`N` closest points to the reference point. Parameter `N` limits the number of returned values which is useful for This query returns the top-`N` closest points to the reference point. Parameter `N` limits the number of returned values which is useful for
situations where `MaxDistance` is difficult to determine in advance. situations where `MaxDistance` is difficult to determine in advance.
@ -59,6 +59,8 @@ Parameters:
- `ef_construction`: (optional, default: 128) - `ef_construction`: (optional, default: 128)
- `ef_search`: (optional, default: 64) - `ef_search`: (optional, default: 64)
Value 0 for parameters `m`, `ef_construction`, and `ef_search` refers to the default value.
Example: Example:
```sql ```sql

View File

@ -359,13 +359,14 @@ DESC format(JSONEachRow, '{"int" : 42, "float" : 42.42, "string" : "Hello, World
Dates, DateTimes: Dates, DateTimes:
```sql ```sql
DESC format(JSONEachRow, '{"date" : "2022-01-01", "datetime" : "2022-01-01 00:00:00"}') DESC format(JSONEachRow, '{"date" : "2022-01-01", "datetime" : "2022-01-01 00:00:00", "datetime64" : "2022-01-01 00:00:00.000"}')
``` ```
```response ```response
┌─name─────┬─type────────────────────┬─default_type─┬─default_expression─┬─comment─┬─codec_expression─┬─ttl_expression─┐ ┌─name───────┬─type────────────────────┬─default_type─┬─default_expression─┬─comment─┬─codec_expression─┬─ttl_expression─┐
│ date │ Nullable(Date) │ │ │ │ │ │ │ date │ Nullable(Date) │ │ │ │ │ │
│ datetime │ Nullable(DateTime64(9)) │ │ │ │ │ │ │ datetime │ Nullable(DateTime) │ │ │ │ │ │
└──────────┴─────────────────────────┴──────────────┴────────────────────┴─────────┴──────────────────┴────────────────┘ │ datetime64 │ Nullable(DateTime64(9)) │ │ │ │ │ │
└────────────┴─────────────────────────┴──────────────┴────────────────────┴─────────┴──────────────────┴────────────────┘
``` ```
Arrays: Arrays:
@ -759,12 +760,13 @@ DESC format(CSV, 'Hello world!,World hello!')
Dates, DateTimes: Dates, DateTimes:
```sql ```sql
DESC format(CSV, '"2020-01-01","2020-01-01 00:00:00"') DESC format(CSV, '"2020-01-01","2020-01-01 00:00:00","2022-01-01 00:00:00.000"')
``` ```
```response ```response
┌─name─┬─type────────────────────┬─default_type─┬─default_expression─┬─comment─┬─codec_expression─┬─ttl_expression─┐ ┌─name─┬─type────────────────────┬─default_type─┬─default_expression─┬─comment─┬─codec_expression─┬─ttl_expression─┐
│ c1 │ Nullable(Date) │ │ │ │ │ │ │ c1 │ Nullable(Date) │ │ │ │ │ │
│ c2 │ Nullable(DateTime64(9)) │ │ │ │ │ │ │ c2 │ Nullable(DateTime) │ │ │ │ │ │
│ c3 │ Nullable(DateTime64(9)) │ │ │ │ │ │
└──────┴─────────────────────────┴──────────────┴────────────────────┴─────────┴──────────────────┴────────────────┘ └──────┴─────────────────────────┴──────────────┴────────────────────┴─────────┴──────────────────┴────────────────┘
``` ```
@ -956,12 +958,13 @@ DESC format(TSKV, 'int=42 float=42.42 bool=true string=Hello,World!\n')
Dates, DateTimes: Dates, DateTimes:
```sql ```sql
DESC format(TSV, '2020-01-01 2020-01-01 00:00:00') DESC format(TSV, '2020-01-01 2020-01-01 00:00:00 2022-01-01 00:00:00.000')
``` ```
```response ```response
┌─name─┬─type────────────────────┬─default_type─┬─default_expression─┬─comment─┬─codec_expression─┬─ttl_expression─┐ ┌─name─┬─type────────────────────┬─default_type─┬─default_expression─┬─comment─┬─codec_expression─┬─ttl_expression─┐
│ c1 │ Nullable(Date) │ │ │ │ │ │ │ c1 │ Nullable(Date) │ │ │ │ │ │
│ c2 │ Nullable(DateTime64(9)) │ │ │ │ │ │ │ c2 │ Nullable(DateTime) │ │ │ │ │ │
│ c3 │ Nullable(DateTime64(9)) │ │ │ │ │ │
└──────┴─────────────────────────┴──────────────┴────────────────────┴─────────┴──────────────────┴────────────────┘ └──────┴─────────────────────────┴──────────────┴────────────────────┴─────────┴──────────────────┴────────────────┘
``` ```
@ -1126,12 +1129,13 @@ DESC format(Values, $$(42, 42.42, true, 'Hello,World!')$$)
Dates, DateTimes: Dates, DateTimes:
```sql ```sql
DESC format(Values, $$('2020-01-01', '2020-01-01 00:00:00')$$) DESC format(Values, $$('2020-01-01', '2020-01-01 00:00:00', '2022-01-01 00:00:00.000')$$)
``` ```
```response ```response
┌─name─┬─type────────────────────┬─default_type─┬─default_expression─┬─comment─┬─codec_expression─┬─ttl_expression─┐ ┌─name─┬─type────────────────────┬─default_type─┬─default_expression─┬─comment─┬─codec_expression─┬─ttl_expression─┐
│ c1 │ Nullable(Date) │ │ │ │ │ │ │ c1 │ Nullable(Date) │ │ │ │ │ │
│ c2 │ Nullable(DateTime64(9)) │ │ │ │ │ │ │ c2 │ Nullable(DateTime) │ │ │ │ │ │
│ c3 │ Nullable(DateTime64(9)) │ │ │ │ │ │
└──────┴─────────────────────────┴──────────────┴────────────────────┴─────────┴──────────────────┴────────────────┘ └──────┴─────────────────────────┴──────────────┴────────────────────┴─────────┴──────────────────┴────────────────┘
``` ```
@ -1504,8 +1508,8 @@ DESC format(JSONEachRow, $$
#### input_format_try_infer_datetimes #### input_format_try_infer_datetimes
If enabled, ClickHouse will try to infer type `DateTime64` from string fields in schema inference for text formats. If enabled, ClickHouse will try to infer type `DateTime` or `DateTime64` from string fields in schema inference for text formats.
If all fields from a column in sample data were successfully parsed as datetimes, the result type will be `DateTime64(9)`, If all fields from a column in sample data were successfully parsed as datetimes, the result type will be `DateTime` or `DateTime64(9)` (if any datetime had fractional part),
if at least one field was not parsed as datetime, the result type will be `String`. if at least one field was not parsed as datetime, the result type will be `String`.
Enabled by default. Enabled by default.
@ -1513,39 +1517,66 @@ Enabled by default.
**Examples** **Examples**
```sql ```sql
SET input_format_try_infer_datetimes = 0 SET input_format_try_infer_datetimes = 0;
DESC format(JSONEachRow, $$ DESC format(JSONEachRow, $$
{"datetime" : "2021-01-01 00:00:00.000"} {"datetime" : "2021-01-01 00:00:00", "datetime64" : "2021-01-01 00:00:00.000"}
{"datetime" : "2022-01-01 00:00:00.000"} {"datetime" : "2022-01-01 00:00:00", "datetime64" : "2022-01-01 00:00:00.000"}
$$) $$)
``` ```
```response ```response
┌─name─────┬─type─────────────┬─default_type─┬─default_expression─┬─comment─┬─codec_expression─┬─ttl_expression─┐ ┌─name───────┬─type─────────────┬─default_type─┬─default_expression─┬─comment─┬─codec_expression─┬─ttl_expression─┐
│ datetime │ Nullable(String) │ │ │ │ │ │ │ datetime │ Nullable(String) │ │ │ │ │ │
└──────────┴──────────────────┴──────────────┴────────────────────┴─────────┴──────────────────┴────────────────┘ │ datetime64 │ Nullable(String) │ │ │ │ │ │
└────────────┴──────────────────┴──────────────┴────────────────────┴─────────┴──────────────────┴────────────────┘
``` ```
```sql ```sql
SET input_format_try_infer_datetimes = 1 SET input_format_try_infer_datetimes = 1;
DESC format(JSONEachRow, $$ DESC format(JSONEachRow, $$
{"datetime" : "2021-01-01 00:00:00.000"} {"datetime" : "2021-01-01 00:00:00", "datetime64" : "2021-01-01 00:00:00.000"}
{"datetime" : "2022-01-01 00:00:00.000"} {"datetime" : "2022-01-01 00:00:00", "datetime64" : "2022-01-01 00:00:00.000"}
$$) $$)
``` ```
```response ```response
┌─name─────┬─type────────────────────┬─default_type─┬─default_expression─┬─comment─┬─codec_expression─┬─ttl_expression─┐ ┌─name───────┬─type────────────────────┬─default_type─┬─default_expression─┬─comment─┬─codec_expression─┬─ttl_expression─┐
│ datetime │ Nullable(DateTime64(9)) │ │ │ │ │ │ │ datetime │ Nullable(DateTime) │ │ │ │ │ │
└──────────┴─────────────────────────┴──────────────┴────────────────────┴─────────┴──────────────────┴────────────────┘ │ datetime64 │ Nullable(DateTime64(9)) │ │ │ │ │ │
└────────────┴─────────────────────────┴──────────────┴────────────────────┴─────────┴──────────────────┴────────────────┘
``` ```
```sql ```sql
DESC format(JSONEachRow, $$ DESC format(JSONEachRow, $$
{"datetime" : "2021-01-01 00:00:00.000"} {"datetime" : "2021-01-01 00:00:00", "datetime64" : "2021-01-01 00:00:00.000"}
{"datetime" : "unknown"} {"datetime" : "unknown", "datetime64" : "unknown"}
$$) $$)
``` ```
```response ```response
┌─name─────┬─type─────────────┬─default_type─┬─default_expression─┬─comment─┬─codec_expression─┬─ttl_expression─┐ ┌─name───────┬─type─────────────┬─default_type─┬─default_expression─┬─comment─┬─codec_expression─┬─ttl_expression─┐
│ datetime │ Nullable(String) │ │ │ │ │ │ │ datetime │ Nullable(String) │ │ │ │ │ │
└──────────┴──────────────────┴──────────────┴────────────────────┴─────────┴──────────────────┴────────────────┘ │ datetime64 │ Nullable(String) │ │ │ │ │ │
└────────────┴──────────────────┴──────────────┴────────────────────┴─────────┴──────────────────┴────────────────┘
```
#### input_format_try_infer_datetimes_only_datetime64
If enabled, ClickHouse will always infer `DateTime64(9)` when `input_format_try_infer_datetimes` is enabled even if datetime values don't contain fractional part.
Disabled by default.
**Examples**
```sql
SET input_format_try_infer_datetimes = 1;
SET input_format_try_infer_datetimes_only_datetime64 = 1;
DESC format(JSONEachRow, $$
{"datetime" : "2021-01-01 00:00:00", "datetime64" : "2021-01-01 00:00:00.000"}
{"datetime" : "2022-01-01 00:00:00", "datetime64" : "2022-01-01 00:00:00.000"}
$$)
```
```text
┌─name───────┬─type────────────────────┬─default_type─┬─default_expression─┬─comment─┬─codec_expression─┬─ttl_expression─┐
│ datetime │ Nullable(DateTime64(9)) │ │ │ │ │ │
│ datetime64 │ Nullable(DateTime64(9)) │ │ │ │ │ │
└────────────┴─────────────────────────┴──────────────┴────────────────────┴─────────┴──────────────────┴────────────────┘
``` ```
Note: Parsing datetimes during schema inference respect setting [date_time_input_format](/docs/en/operations/settings/settings-formats.md#date_time_input_format) Note: Parsing datetimes during schema inference respect setting [date_time_input_format](/docs/en/operations/settings/settings-formats.md#date_time_input_format)

View File

@ -10,7 +10,7 @@ sidebar_label: Visual Interfaces
### ch-ui {#ch-ui} ### ch-ui {#ch-ui}
[ch-ui](https://github.com/caioricciuti/ch-ui) is a simple React.js app interface for ClickHouse databases, designed for executing queries and visualizing data. Built with React and the ClickHouse client for web, it offers a sleek and user-friendly UI for easy database interactions. [ch-ui](https://github.com/caioricciuti/ch-ui) is a simple React.js app interface for ClickHouse databases designed for executing queries and visualizing data. Built with React and the ClickHouse client for web, it offers a sleek and user-friendly UI for easy database interactions.
Features: Features:
@ -25,7 +25,7 @@ Web interface for ClickHouse in the [Tabix](https://github.com/tabixio/tabix) pr
Features: Features:
- Works with ClickHouse directly from the browser, without the need to install additional software. - Works with ClickHouse directly from the browser without the need to install additional software.
- Query editor with syntax highlighting. - Query editor with syntax highlighting.
- Auto-completion of commands. - Auto-completion of commands.
- Tools for graphical analysis of query execution. - Tools for graphical analysis of query execution.
@ -63,7 +63,7 @@ Features:
- Table list with filtering and metadata. - Table list with filtering and metadata.
- Table preview with filtering and sorting. - Table preview with filtering and sorting.
- Read-only queries execution. - Read-only query execution.
### Redash {#redash} ### Redash {#redash}
@ -75,23 +75,23 @@ Features:
- Powerful editor of queries. - Powerful editor of queries.
- Database explorer. - Database explorer.
- Visualization tools, that allow you to represent data in different forms. - Visualization tool that allows you to represent data in different forms.
### Grafana {#grafana} ### Grafana {#grafana}
[Grafana](https://grafana.com/grafana/plugins/grafana-clickhouse-datasource/) is a platform for monitoring and visualization. [Grafana](https://grafana.com/grafana/plugins/grafana-clickhouse-datasource/) is a platform for monitoring and visualization.
"Grafana allows you to query, visualize, alert on and understand your metrics no matter where they are stored. Create, explore, and share dashboards with your team and foster a data driven culture. Trusted and loved by the community" &mdash; grafana.com. "Grafana allows you to query, visualize, alert on and understand your metrics no matter where they are stored. Create, explore, and share dashboards with your team and foster a data-driven culture. Trusted and loved by the community" &mdash; grafana.com.
ClickHouse datasource plugin provides a support for ClickHouse as a backend database. ClickHouse data source plugin provides support for ClickHouse as a backend database.
### qryn (#qryn) ### qryn {#qryn}
[qryn](https://metrico.in) is a polyglot, high-performance observability stack for ClickHouse _(formerly cLoki)_ with native Grafana integrations allowing users to ingest and analyze logs, metrics and telemetry traces from any agent supporting Loki/LogQL, Prometheus/PromQL, OTLP/Tempo, Elastic, InfluxDB and many more. [qryn](https://metrico.in) is a polyglot, high-performance observability stack for ClickHouse _(formerly cLoki)_ with native Grafana integrations allowing users to ingest and analyze logs, metrics and telemetry traces from any agent supporting Loki/LogQL, Prometheus/PromQL, OTLP/Tempo, Elastic, InfluxDB and many more.
Features: Features:
- Built in Explore UI and LogQL CLI for querying, extracting and visualizing data - Built-in Explore UI and LogQL CLI for querying, extracting and visualizing data
- Native Grafana APIs support for querying, processing, ingesting, tracing and alerting without plugins - Native Grafana APIs support for querying, processing, ingesting, tracing and alerting without plugins
- Powerful pipeline to dynamically search, filter and extract data from logs, events, traces and beyond - Powerful pipeline to dynamically search, filter and extract data from logs, events, traces and beyond
- Ingestion and PUSH APIs transparently compatible with LogQL, PromQL, InfluxDB, Elastic and many more - Ingestion and PUSH APIs transparently compatible with LogQL, PromQL, InfluxDB, Elastic and many more
@ -139,7 +139,7 @@ Features:
### DBM {#dbm} ### DBM {#dbm}
[DBM](https://dbm.incubator.edurt.io/) DBM is a visual management tool for ClickHouse! [DBM](https://github.com/devlive-community/dbm) DBM is a visual management tool for ClickHouse!
Features: Features:
@ -151,7 +151,7 @@ Features:
- Support custom query - Support custom query
- Support multiple data sources management(connection test, monitoring) - Support multiple data sources management(connection test, monitoring)
- Support monitor (processor, connection, query) - Support monitor (processor, connection, query)
- Support migrate data - Support migrating data
### Bytebase {#bytebase} ### Bytebase {#bytebase}
@ -169,7 +169,7 @@ Features:
### Zeppelin-Interpreter-for-ClickHouse {#zeppelin-interpreter-for-clickhouse} ### Zeppelin-Interpreter-for-ClickHouse {#zeppelin-interpreter-for-clickhouse}
[Zeppelin-Interpreter-for-ClickHouse](https://github.com/SiderZhang/Zeppelin-Interpreter-for-ClickHouse) is a [Zeppelin](https://zeppelin.apache.org) interpreter for ClickHouse. Compared with JDBC interpreter, it can provide better timeout control for long running queries. [Zeppelin-Interpreter-for-ClickHouse](https://github.com/SiderZhang/Zeppelin-Interpreter-for-ClickHouse) is a [Zeppelin](https://zeppelin.apache.org) interpreter for ClickHouse. Compared with the JDBC interpreter, it can provide better timeout control for long-running queries.
### ClickCat {#clickcat} ### ClickCat {#clickcat}
@ -179,7 +179,7 @@ Features:
- An online SQL editor which can run your SQL code without any installing. - An online SQL editor which can run your SQL code without any installing.
- You can observe all processes and mutations. For those unfinished processes, you can kill them in ui. - You can observe all processes and mutations. For those unfinished processes, you can kill them in ui.
- The Metrics contains Cluster Analysis,Data Analysis,Query Analysis. - The Metrics contain Cluster Analysis, Data Analysis, and Query Analysis.
### ClickVisual {#clickvisual} ### ClickVisual {#clickvisual}
@ -332,7 +332,7 @@ Learn more about the product at [TABLUM.IO](https://tablum.io/)
### CKMAN {#ckman} ### CKMAN {#ckman}
[CKMAN] (https://www.github.com/housepower/ckman) is a tool for managing and monitoring ClickHouse clusters! [CKMAN](https://www.github.com/housepower/ckman) is a tool for managing and monitoring ClickHouse clusters!
Features: Features:

View File

@ -307,8 +307,22 @@ SELECT dictGet('dict', 'B', 2);
## Named collections for accessing PostgreSQL database ## Named collections for accessing PostgreSQL database
The description of parameters see [postgresql](../sql-reference/table-functions/postgresql.md). The description of parameters see [postgresql](../sql-reference/table-functions/postgresql.md). Additionally, there are aliases:
- `username` for `user`
- `db` for `database`.
Parameter `addresses_expr` is used in a collection instead of `host:port`. The parameter is optional, because there are other optional ones: `host`, `hostname`, `port`. The following pseudo code explains the priority:
```sql
CASE
WHEN collection['addresses_expr'] != '' THEN collection['addresses_expr']
WHEN collection['host'] != '' THEN collection['host'] || ':' || if(collection['port'] != '', collection['port'], '5432')
WHEN collection['hostname'] != '' THEN collection['hostname'] || ':' || if(collection['port'] != '', collection['port'], '5432')
END
```
Example of creation:
```sql ```sql
CREATE NAMED COLLECTION mypg AS CREATE NAMED COLLECTION mypg AS
user = 'pguser', user = 'pguser',
@ -316,7 +330,7 @@ password = 'jw8s0F4',
host = '127.0.0.1', host = '127.0.0.1',
port = 5432, port = 5432,
database = 'test', database = 'test',
schema = 'test_schema', schema = 'test_schema'
``` ```
Example of configuration: Example of configuration:
@ -369,6 +383,10 @@ SELECT * FROM mypgtable;
└───┘ └───┘
``` ```
:::note
PostgreSQL copies data from the named collection when the table is being created. A change in the collection does not affect the existing tables.
:::
### Example of using named collections with database with engine PostgreSQL ### Example of using named collections with database with engine PostgreSQL
```sql ```sql

View File

@ -1041,3 +1041,27 @@ Compression rates of LZ4 or ZSTD improve on average by 20-40%.
This setting works best for tables with no primary key or a low-cardinality primary key, i.e. a table with only few distinct primary key values. This setting works best for tables with no primary key or a low-cardinality primary key, i.e. a table with only few distinct primary key values.
High-cardinality primary keys, e.g. involving timestamp columns of type `DateTime64`, are not expected to benefit from this setting. High-cardinality primary keys, e.g. involving timestamp columns of type `DateTime64`, are not expected to benefit from this setting.
## lightweight_mutation_projection_mode
By default, lightweight delete `DELETE` does not work for tables with projections. This is because rows in a projection may be affected by a `DELETE` operation. So the default value would be `throw`.
However, this option can change the behavior. With the value either `drop` or `rebuild`, deletes will work with projections. `drop` would delete the projection so it might be fast in the current query as projection gets deleted but slow in future queries as no projection attached.
`rebuild` would rebuild the projection which might affect the performance of the current query, but might speedup for future queries. A good thing is that these options would only work in the part level,
which means projections in the part that don't get touched would stay intact instead of triggering any action like drop or rebuild.
Possible values:
- throw, drop, rebuild
Default value: throw
## deduplicate_merge_projection_mode
Whether to allow create projection for the table with non-classic MergeTree, that is not (Replicated, Shared) MergeTree. If allowed, what is the action when merge projections, either drop or rebuild. So classic MergeTree would ignore this setting.
It also controls `OPTIMIZE DEDUPLICATE` as well, but has effect on all MergeTree family members. Similar to the option `lightweight_mutation_projection_mode`, it is also part level.
Possible values:
- throw, drop, rebuild
Default value: throw

View File

@ -1381,7 +1381,7 @@ Default value: `2`.
Close connection before returning connection to the pool. Close connection before returning connection to the pool.
Default value: true. Default value: false.
## odbc_bridge_connection_pool_size {#odbc-bridge-connection-pool-size} ## odbc_bridge_connection_pool_size {#odbc-bridge-connection-pool-size}
@ -5620,6 +5620,19 @@ Minimal size of block to compress in CROSS JOIN. Zero value means - disable this
Default value: `1GiB`. Default value: `1GiB`.
## use_json_alias_for_old_object_type
When enabled, `JSON` data type alias will be used to create an old [Object('json')](../../sql-reference/data-types/json.md) type instead of the new [JSON](../../sql-reference/data-types/newjson.md) type.
This setting requires server restart to take effect when changed.
Default value: `false`.
## type_json_skip_duplicated_paths
When enabled, ClickHouse will skip duplicated paths during parsing of [JSON](../../sql-reference/data-types/newjson.md) object. Only the value of the first occurrence of each path will be inserted.
Default value: `false`
## restore_replace_external_engines_to_null ## restore_replace_external_engines_to_null
For testing purposes. Replaces all external engines to Null to not initiate external connections. For testing purposes. Replaces all external engines to Null to not initiate external connections.
@ -5654,3 +5667,9 @@ Possible values:
- 1 — the [TimeSeries](../../engines/table-engines/integrations/time-series.md) table engine is enabled. - 1 — the [TimeSeries](../../engines/table-engines/integrations/time-series.md) table engine is enabled.
Default value: `0`. Default value: `0`.
## create_if_not_exists
Enable `IF NOT EXISTS` for `CREATE` statement by default. If either this setting or `IF NOT EXISTS` is specified and a table with the provided name already exists, no exception will be thrown.
Default value: `false`.

View File

@ -17,7 +17,8 @@ Columns:
- `duration_ms` ([UInt64](../../sql-reference/data-types/int-uint.md)) — How long the last refresh attempt took. - `duration_ms` ([UInt64](../../sql-reference/data-types/int-uint.md)) — How long the last refresh attempt took.
- `next_refresh_time` ([DateTime](../../sql-reference/data-types/datetime.md)) — Time at which the next refresh is scheduled to start. - `next_refresh_time` ([DateTime](../../sql-reference/data-types/datetime.md)) — Time at which the next refresh is scheduled to start.
- `remaining_dependencies` ([Array(String)](../../sql-reference/data-types/array.md)) — If the view has [refresh dependencies](../../sql-reference/statements/create/view.md#refresh-dependencies), this array contains the subset of those dependencies that are not satisfied for the current refresh yet. If `status = 'WaitingForDependencies'`, a refresh is ready to start as soon as these dependencies are fulfilled. - `remaining_dependencies` ([Array(String)](../../sql-reference/data-types/array.md)) — If the view has [refresh dependencies](../../sql-reference/statements/create/view.md#refresh-dependencies), this array contains the subset of those dependencies that are not satisfied for the current refresh yet. If `status = 'WaitingForDependencies'`, a refresh is ready to start as soon as these dependencies are fulfilled.
- `exception` ([String](../../sql-reference/data-types/string.md)) — if `last_refresh_result = 'Exception'`, i.e. the last refresh attempt failed, this column contains the corresponding error message and stack trace. - `exception` ([String](../../sql-reference/data-types/string.md)) — if `last_refresh_result = 'Error'`, i.e. the last refresh attempt failed, this column contains the corresponding error message and stack trace.
- `retry` ([UInt64](../../sql-reference/data-types/int-uint.md)) — If nonzero, the current or next refresh is a retry (see `refresh_retries` refresh setting), and `retry` is the 1-based index of that retry.
- `refresh_count` ([UInt64](../../sql-reference/data-types/int-uint.md)) — Number of successful refreshes since last server restart or table creation. - `refresh_count` ([UInt64](../../sql-reference/data-types/int-uint.md)) — Number of successful refreshes since last server restart or table creation.
- `progress` ([Float64](../../sql-reference/data-types/float.md)) — Progress of the current refresh, between 0 and 1. - `progress` ([Float64](../../sql-reference/data-types/float.md)) — Progress of the current refresh, between 0 and 1.
- `read_rows` ([UInt64](../../sql-reference/data-types/int-uint.md)) — Number of rows read by the current refresh so far. - `read_rows` ([UInt64](../../sql-reference/data-types/int-uint.md)) — Number of rows read by the current refresh so far.

View File

@ -12,57 +12,59 @@ This specification describes the binary format that can be used for binary encod
The table below describes how each data type is represented in binary format. Each data type encoding consist of 1 byte that indicates the type and some optional additional information. The table below describes how each data type is represented in binary format. Each data type encoding consist of 1 byte that indicates the type and some optional additional information.
`var_uint` in the binary encoding means that the size is encoded using Variable-Length Quantity compression. `var_uint` in the binary encoding means that the size is encoded using Variable-Length Quantity compression.
| ClickHouse data type | Binary encoding | | ClickHouse data type | Binary encoding |
|--------------------------------------------------------------------------------------|------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| |-----------------------------------------------------------------------------------------------------------|----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|
| `Nothing` | `0x00` | | `Nothing` | `0x00` |
| `UInt8` | `0x01` | | `UInt8` | `0x01` |
| `UInt16` | `0x02` | | `UInt16` | `0x02` |
| `UInt32` | `0x03` | | `UInt32` | `0x03` |
| `UInt64` | `0x04` | | `UInt64` | `0x04` |
| `UInt128` | `0x05` | | `UInt128` | `0x05` |
| `UInt256` | `0x06` | | `UInt256` | `0x06` |
| `Int8` | `0x07` | | `Int8` | `0x07` |
| `Int16` | `0x08` | | `Int16` | `0x08` |
| `Int32` | `0x09` | | `Int32` | `0x09` |
| `Int64` | `0x0A` | | `Int64` | `0x0A` |
| `Int128` | `0x0B` | | `Int128` | `0x0B` |
| `Int256` | `0x0C` | | `Int256` | `0x0C` |
| `Float32` | `0x0D` | | `Float32` | `0x0D` |
| `Float64` | `0x0E` | | `Float64` | `0x0E` |
| `Date` | `0x0F` | | `Date` | `0x0F` |
| `Date32` | `0x10` | | `Date32` | `0x10` |
| `DateTime` | `0x11` | | `DateTime` | `0x11` |
| `DateTime(time_zone)` | `0x12<var_uint_time_zone_name_size><time_zone_name_data>` | | `DateTime(time_zone)` | `0x12<var_uint_time_zone_name_size><time_zone_name_data>` |
| `DateTime64(P)` | `0x13<uint8_precision>` | | `DateTime64(P)` | `0x13<uint8_precision>` |
| `DateTime64(P, time_zone)` | `0x14<uint8_precision><var_uint_time_zone_name_size><time_zone_name_data>` | | `DateTime64(P, time_zone)` | `0x14<uint8_precision><var_uint_time_zone_name_size><time_zone_name_data>` |
| `String` | `0x15` | | `String` | `0x15` |
| `FixedString(N)` | `0x16<var_uint_size>` | | `FixedString(N)` | `0x16<var_uint_size>` |
| `Enum8` | `0x17<var_uint_number_of_elements><var_uint_name_size_1><name_data_1><int8_value_1>...<var_uint_name_size_N><name_data_N><int8_value_N>` | | `Enum8` | `0x17<var_uint_number_of_elements><var_uint_name_size_1><name_data_1><int8_value_1>...<var_uint_name_size_N><name_data_N><int8_value_N>` |
| `Enum16` | `0x18<var_uint_number_of_elements><var_uint_name_size_1><name_data_1><int16_little_endian_value_1>...><var_uint_name_size_N><name_data_N><int16_little_endian_value_N>` | | `Enum16` | `0x18<var_uint_number_of_elements><var_uint_name_size_1><name_data_1><int16_little_endian_value_1>...><var_uint_name_size_N><name_data_N><int16_little_endian_value_N>` |
| `Decimal32(P, S)` | `0x19<uint8_precision><uint8_scale>` | | `Decimal32(P, S)` | `0x19<uint8_precision><uint8_scale>` |
| `Decimal64(P, S)` | `0x1A<uint8_precision><uint8_scale>` | | `Decimal64(P, S)` | `0x1A<uint8_precision><uint8_scale>` |
| `Decimal128(P, S)` | `0x1B<uint8_precision><uint8_scale>` | | `Decimal128(P, S)` | `0x1B<uint8_precision><uint8_scale>` |
| `Decimal256(P, S)` | `0x1C<uint8_precision><uint8_scale>` | | `Decimal256(P, S)` | `0x1C<uint8_precision><uint8_scale>` |
| `UUID` | `0x1D` | | `UUID` | `0x1D` |
| `Array(T)` | `0x1E<nested_type_encoding>` | | `Array(T)` | `0x1E<nested_type_encoding>` |
| `Tuple(T1, ..., TN)` | `0x1F<var_uint_number_of_elements><nested_type_encoding_1>...<nested_type_encoding_N>` | | `Tuple(T1, ..., TN)` | `0x1F<var_uint_number_of_elements><nested_type_encoding_1>...<nested_type_encoding_N>` |
| `Tuple(name1 T1, ..., nameN TN)` | `0x20<var_uint_number_of_elements><var_uint_name_size_1><name_data_1><nested_type_encoding_1>...<var_uint_name_size_N><name_data_N><nested_type_encoding_N>` | | `Tuple(name1 T1, ..., nameN TN)` | `0x20<var_uint_number_of_elements><var_uint_name_size_1><name_data_1><nested_type_encoding_1>...<var_uint_name_size_N><name_data_N><nested_type_encoding_N>` |
| `Set` | `0x21` | | `Set` | `0x21` |
| `Interval` | `0x22<interval_kind>` (see [interval kind binary encoding](#interval-kind-binary-encoding)) | | `Interval` | `0x22<interval_kind>` (see [interval kind binary encoding](#interval-kind-binary-encoding)) |
| `Nullable(T)` | `0x23<nested_type_encoding>` | | `Nullable(T)` | `0x23<nested_type_encoding>` |
| `Function` | `0x24<var_uint_number_of_arguments><argument_type_encoding_1>...<argument_type_encoding_N><return_type_encoding>` | | `Function` | `0x24<var_uint_number_of_arguments><argument_type_encoding_1>...<argument_type_encoding_N><return_type_encoding>` |
| `AggregateFunction(function_name(param_1, ..., param_N), arg_T1, ..., arg_TN)` | `0x25<var_uint_version><var_uint_function_name_size><function_name_data><var_uint_number_of_parameters><param_1>...<param_N><var_uint_number_of_arguments><argument_type_encoding_1>...<argument_type_encoding_N>` (see [aggregate function parameter binary encoding](#aggregate-function-parameter-binary-encoding)) | | `AggregateFunction(function_name(param_1, ..., param_N), arg_T1, ..., arg_TN)` | `0x25<var_uint_version><var_uint_function_name_size><function_name_data><var_uint_number_of_parameters><param_1>...<param_N><var_uint_number_of_arguments><argument_type_encoding_1>...<argument_type_encoding_N>` (see [aggregate function parameter binary encoding](#aggregate-function-parameter-binary-encoding)) |
| `LowCardinality(T)` | `0x26<nested_type_encoding>` | | `LowCardinality(T)` | `0x26<nested_type_encoding>` |
| `Map(K, V)` | `0x27<key_type_encoding><value_type_encoding>` | | `Map(K, V)` | `0x27<key_type_encoding><value_type_encoding>` |
| `IPv4` | `0x28` | | `IPv4` | `0x28` |
| `IPv6` | `0x29` | | `IPv6` | `0x29` |
| `Variant(T1, ..., TN)` | `0x2A<var_uint_number_of_variants><variant_type_encoding_1>...<variant_type_encoding_N>` | | `Variant(T1, ..., TN)` | `0x2A<var_uint_number_of_variants><variant_type_encoding_1>...<variant_type_encoding_N>` |
| `Dynamic(max_types=N)` | `0x2B<uint8_max_types>` | | `Dynamic(max_types=N)` | `0x2B<uint8_max_types>` |
| `Custom type` (`Ring`, `Polygon`, etc) | `0x2C<var_uint_type_name_size><type_name_data>` | | `Custom type` (`Ring`, `Polygon`, etc) | `0x2C<var_uint_type_name_size><type_name_data>` |
| `Bool` | `0x2D` | | `Bool` | `0x2D` |
| `SimpleAggregateFunction(function_name(param_1, ..., param_N), arg_T1, ..., arg_TN)` | `0x2E<var_uint_function_name_size><function_name_data><var_uint_number_of_parameters><param_1>...<param_N><var_uint_number_of_arguments><argument_type_encoding_1>...<argument_type_encoding_N>` (see [aggregate function parameter binary encoding](#aggregate-function-parameter-binary-encoding)) | | `SimpleAggregateFunction(function_name(param_1, ..., param_N), arg_T1, ..., arg_TN)` | `0x2E<var_uint_function_name_size><function_name_data><var_uint_number_of_parameters><param_1>...<param_N><var_uint_number_of_arguments><argument_type_encoding_1>...<argument_type_encoding_N>` (see [aggregate function parameter binary encoding](#aggregate-function-parameter-binary-encoding)) |
| `Nested(name1 T1, ..., nameN TN)` | `0x2F<var_uint_number_of_elements><var_uint_name_size_1><name_data_1><nested_type_encoding_1>...<var_uint_name_size_N><name_data_N><nested_type_encoding_N>` | | `Nested(name1 T1, ..., nameN TN)` | `0x2F<var_uint_number_of_elements><var_uint_name_size_1><name_data_1><nested_type_encoding_1>...<var_uint_name_size_N><name_data_N><nested_type_encoding_N>` |
| `JSON(max_dynamic_paths=N, max_dynamic_types=M, path Type, SKIP skip_path, SKIP REGEXP skip_path_regexp)` | `0x30<uint8_serialization_version><var_int_max_dynamic_paths><uint8_max_dynamic_types><var_uint_number_of_typed_paths><var_uint_path_name_size_1><path_name_data_1><encoded_type_1>...<var_uint_number_of_skip_paths><var_uint_skip_path_size_1><skip_path_data_1>...<var_uint_number_of_skip_path_regexps><var_uint_skip_path_regexp_size_1><skip_path_data_regexp_1>...` |
For type `JSON` byte `uint8_serialization_version` indicates the version of the serialization. Right now the version is always 0 but can change in future if new arguments will be introduced for `JSON` type.
### Interval kind binary encoding ### Interval kind binary encoding

View File

@ -14,7 +14,7 @@ To declare a column of `Dynamic` type, use the following syntax:
<column_name> Dynamic(max_types=N) <column_name> Dynamic(max_types=N)
``` ```
Where `N` is an optional parameter between `1` and `255` indicating how many different data types can be stored inside a column with type `Dynamic` across single block of data that is stored separately (for example across single data part for MergeTree table). If this limit is exceeded, all new types will be converted to type `String`. Default value of `max_types` is `32`. Where `N` is an optional parameter between `0` and `254` indicating how many different data types can be stored as separate subcolumns inside a column with type `Dynamic` across single block of data that is stored separately (for example across single data part for MergeTree table). If this limit is exceeded, all values with new types will be stored together in a special shared data structure in binary form. Default value of `max_types` is `32`.
:::note :::note
The Dynamic data type is an experimental feature. To use it, set `allow_experimental_dynamic_type = 1`. The Dynamic data type is an experimental feature. To use it, set `allow_experimental_dynamic_type = 1`.
@ -224,41 +224,43 @@ SELECT d::Dynamic(max_types=5) as d2, dynamicType(d2) FROM test;
└───────┴────────────────┘ └───────┴────────────────┘
``` ```
If `K < N`, then the values with the rarest types are converted to `String`: If `K < N`, then the values with the rarest types will be inserted into a single special subcolumn, but still will be accessible:
```text ```text
CREATE TABLE test (d Dynamic(max_types=4)) ENGINE = Memory; CREATE TABLE test (d Dynamic(max_types=4)) ENGINE = Memory;
INSERT INTO test VALUES (NULL), (42), (43), ('42.42'), (true), ([1, 2, 3]); INSERT INTO test VALUES (NULL), (42), (43), ('42.42'), (true), ([1, 2, 3]);
SELECT d, dynamicType(d), d::Dynamic(max_types=2) as d2, dynamicType(d2) FROM test; SELECT d, dynamicType(d), d::Dynamic(max_types=2) as d2, dynamicType(d2), isDynamicElementInSharedData(d2) FROM test;
``` ```
```text ```text
┌─d───────┬─dynamicType(d)─┬─d2──────┬─dynamicType(d2)─┐ ┌─d───────┬─dynamicType(d)─┬─d2──────┬─dynamicType(d2)─┬─isDynamicElementInSharedData(d2)─
│ ᴺᵁᴸᴸ │ None │ ᴺᵁᴸᴸ │ None │ │ ᴺᵁᴸᴸ │ None │ ᴺᵁᴸᴸ │ None │ false │
│ 42 │ Int64 │ 42 │ Int64 │ │ 42 │ Int64 │ 42 │ Int64 │ false │
│ 43 │ Int64 │ 43 │ Int64 │ │ 43 │ Int64 │ 43 │ Int64 │ false │
│ 42.42 │ String │ 42.42 │ String │ │ 42.42 │ String │ 42.42 │ String │ false │
│ true │ Bool │ true │ String │ true │ Bool │ true │ Bool │ true
│ [1,2,3] │ Array(Int64) │ [1,2,3] │ String │ [1,2,3] │ Array(Int64) │ [1,2,3] │ Array(Int64) │ true
└─────────┴────────────────┴─────────┴─────────────────┘ └─────────┴────────────────┴─────────┴─────────────────┴──────────────────────────────────
``` ```
If `K=1`, all types are converted to `String`: Functions `isDynamicElementInSharedData` returns `true` for rows that are stored in a special shared data structure inside `Dynamic` and as we can see, resulting column contains only 2 types that are not stored in shared data structure.
If `K=0`, all types will be inserted into single special subcolumn:
```text ```text
CREATE TABLE test (d Dynamic(max_types=4)) ENGINE = Memory; CREATE TABLE test (d Dynamic(max_types=4)) ENGINE = Memory;
INSERT INTO test VALUES (NULL), (42), (43), ('42.42'), (true), ([1, 2, 3]); INSERT INTO test VALUES (NULL), (42), (43), ('42.42'), (true), ([1, 2, 3]);
SELECT d, dynamicType(d), d::Dynamic(max_types=1) as d2, dynamicType(d2) FROM test; SELECT d, dynamicType(d), d::Dynamic(max_types=0) as d2, dynamicType(d2), isDynamicElementInSharedData(d2) FROM test;
``` ```
```text ```text
┌─d───────┬─dynamicType(d)─┬─d2──────┬─dynamicType(d2)─┐ ┌─d───────┬─dynamicType(d)─┬─d2──────┬─dynamicType(d2)─┬─isDynamicElementInSharedData(d2)─
│ ᴺᵁᴸᴸ │ None │ ᴺᵁᴸᴸ │ None │ │ ᴺᵁᴸᴸ │ None │ ᴺᵁᴸᴸ │ None │ false │
│ 42 │ Int64 │ 42 │ String │ 42 │ Int64 │ 42 │ Int64 │ true
│ 43 │ Int64 │ 43 │ String │ 43 │ Int64 │ 43 │ Int64 │ true
│ 42.42 │ String │ 42.42 │ String │ │ 42.42 │ String │ 42.42 │ String │ true │
│ true │ Bool │ true │ String │ true │ Bool │ true │ Bool │ true
│ [1,2,3] │ Array(Int64) │ [1,2,3] │ String │ [1,2,3] │ Array(Int64) │ [1,2,3] │ Array(Int64) │ true
└─────────┴────────────────┴─────────┴─────────────────┘ └─────────┴────────────────┴─────────┴─────────────────┴──────────────────────────────────
``` ```
## Reading Dynamic type from the data ## Reading Dynamic type from the data
@ -411,17 +413,17 @@ SELECT d, dynamicType(d) FROM test ORDER by d;
## Reaching the limit in number of different data types stored inside Dynamic ## Reaching the limit in number of different data types stored inside Dynamic
`Dynamic` data type can store only limited number of different data types inside. By default, this limit is 32, but you can change it in type declaration using syntax `Dynamic(max_types=N)` where N is between 1 and 255 (due to implementation details, it's impossible to have more than 255 different data types inside Dynamic). `Dynamic` data type can store only limited number of different data types as separate subcolumns. By default, this limit is 32, but you can change it in type declaration using syntax `Dynamic(max_types=N)` where N is between 0 and 254 (due to implementation details, it's impossible to have more than 254 different data types that can be stored as separate subcolumns inside Dynamic).
When the limit is reached, all new data types inserted to `Dynamic` column will be casted to `String` and stored as `String` values. When the limit is reached, all new data types inserted to `Dynamic` column will be inserted into a single shared data structure that stores values with different data types in binary form.
Let's see what happens when the limit is reached in different scenarios. Let's see what happens when the limit is reached in different scenarios.
### Reaching the limit during data parsing ### Reaching the limit during data parsing
During parsing of `Dynamic` values from the data, when the limit is reached for current block of data, all new values will be inserted as `String` values: During parsing of `Dynamic` values from the data, when the limit is reached for current block of data, all new values will be inserted into shared data structure:
```sql ```sql
SELECT d, dynamicType(d) FROM format(JSONEachRow, 'd Dynamic(max_types=3)', ' SELECT d, dynamicType(d), isDynamicElementInSharedData(d) FROM format(JSONEachRow, 'd Dynamic(max_types=3)', '
{"d" : 42} {"d" : 42}
{"d" : [1, 2, 3]} {"d" : [1, 2, 3]}
{"d" : "Hello, World!"} {"d" : "Hello, World!"}
@ -432,22 +434,22 @@ SELECT d, dynamicType(d) FROM format(JSONEachRow, 'd Dynamic(max_types=3)', '
``` ```
```text ```text
┌─d──────────────────────────┬─dynamicType(d)─┐ ┌─d──────────────────────┬─dynamicType(d)─────────────────┬─isDynamicElementInSharedData(d)─┐
│ 42 │ Int64 │ │ 42 │ Int64 │ false
│ [1,2,3] │ Array(Int64) │ │ [1,2,3] │ Array(Int64) │ false
│ Hello, World! │ String │ │ Hello, World! │ String │ false
│ 2020-01-01 │ String │ 2020-01-01 │ Date │ true
│ ["str1", "str2", "str3"] │ String │ ['str1','str2','str3'] │ Array(String) │ true
{"a" : 1, "b" : [1, 2, 3]} │ String (1,[1,2,3]) │ Tuple(a Int64, b Array(Int64)) │ true
└────────────────────────────┴────────────────┘ └────────────────────────┴────────────────────────────────┴─────────────────────────────────┘
``` ```
As we can see, after inserting 3 different data types `Int64`, `Array(Int64)` and `String` all new types were converted to `String`. As we can see, after inserting 3 different data types `Int64`, `Array(Int64)` and `String` all new types were inserted into special shared data structure.
### During merges of data parts in MergeTree table engines ### During merges of data parts in MergeTree table engines
During merge of several data parts in MergeTree table the `Dynamic` column in the resulting data part can reach the limit of different data types inside and won't be able to store all types from source parts. During merge of several data parts in MergeTree table the `Dynamic` column in the resulting data part can reach the limit of different data types that can be stored in separate subcolumns inside and won't be able to store all types as subcolumns from source parts.
In this case ClickHouse chooses what types will remain after merge and what types will be casted to `String`. In most cases ClickHouse tries to keep the most frequent types and cast the rarest types to `String`, but it depends on the implementation. In this case ClickHouse chooses what types will remain as separate subcolumns after merge and what types will be inserted into shared data structure. In most cases ClickHouse tries to keep the most frequent types and store the rarest types in shared data structure, but it depends on the implementation.
Let's see an example of such merge. First, let's create a table with `Dynamic` column, set the limit of different data types to `3` and insert values with `5` different types: Let's see an example of such merge. First, let's create a table with `Dynamic` column, set the limit of different data types to `3` and insert values with `5` different types:
@ -463,17 +465,17 @@ INSERT INTO test SELECT number, 'str_' || toString(number) FROM numbers(1);
Each insert will create a separate data pert with `Dynamic` column containing single type: Each insert will create a separate data pert with `Dynamic` column containing single type:
```sql ```sql
SELECT count(), dynamicType(d), _part FROM test GROUP BY _part, dynamicType(d) ORDER BY _part; SELECT count(), dynamicType(d), isDynamicElementInSharedData(d), _part FROM test GROUP BY _part, dynamicType(d), isDynamicElementInSharedData(d) ORDER BY _part, count();
``` ```
```text ```text
┌─count()─┬─dynamicType(d)──────┬─_part─────┐ ┌─count()─┬─dynamicType(d)──────┬─isDynamicElementInSharedData(d)─┬─_part─────┐
│ 5 │ UInt64 │ all_1_1_0 │ │ 5 │ UInt64 │ false │ all_1_1_0 │
│ 4 │ Array(UInt64) │ all_2_2_0 │ │ 4 │ Array(UInt64) │ false │ all_2_2_0 │
│ 3 │ Date │ all_3_3_0 │ │ 3 │ Date │ false │ all_3_3_0 │
│ 2 │ Map(UInt64, UInt64) │ all_4_4_0 │ │ 2 │ Map(UInt64, UInt64) │ false │ all_4_4_0 │
│ 1 │ String │ all_5_5_0 │ │ 1 │ String │ false │ all_5_5_0 │
└─────────┴─────────────────────┴───────────┘ └─────────┴─────────────────────┴─────────────────────────────────┴───────────
``` ```
Now, let's merge all parts into one and see what will happen: Now, let's merge all parts into one and see what will happen:
@ -481,18 +483,20 @@ Now, let's merge all parts into one and see what will happen:
```sql ```sql
SYSTEM START MERGES test; SYSTEM START MERGES test;
OPTIMIZE TABLE test FINAL; OPTIMIZE TABLE test FINAL;
SELECT count(), dynamicType(d), _part FROM test GROUP BY _part, dynamicType(d) ORDER BY _part; SELECT count(), dynamicType(d), isDynamicElementInSharedData(d), _part FROM test GROUP BY _part, dynamicType(d), isDynamicElementInSharedData(d) ORDER BY _part, count() desc;
``` ```
```text ```text
┌─count()─┬─dynamicType(d)─┬─_part─────┐ ┌─count()─┬─dynamicType(d)──────┬─isDynamicElementInSharedData(d)─┬─_part─────┐
│ 5 │ UInt64 │ all_1_5_2 │ │ 5 │ UInt64 │ false │ all_1_5_2 │
│ 6 │ String │ all_1_5_2 │ │ 4 │ Array(UInt64) │ false │ all_1_5_2 │
│ 4 │ Array(UInt64) │ all_1_5_2 │ │ 3 │ Date │ false │ all_1_5_2 │
└─────────┴────────────────┴───────────┘ │ 2 │ Map(UInt64, UInt64) │ true │ all_1_5_2 │
│ 1 │ String │ true │ all_1_5_2 │
└─────────┴─────────────────────┴─────────────────────────────────┴───────────┘
``` ```
As we can see, ClickHouse kept the most frequent types `UInt64` and `Array(UInt64)` and casted all other types to `String`. As we can see, ClickHouse kept the most frequent types `UInt64` and `Array(UInt64)` as subcolumns and inserted all other types into shared data.
## JSONExtract functions with Dynamic ## JSONExtract functions with Dynamic
@ -509,22 +513,23 @@ SELECT JSONExtract('{"a" : [1, 2, 3]}', 'a', 'Dynamic') AS dynamic, dynamicType(
``` ```
```sql ```sql
SELECT JSONExtract('{"obj" : {"a" : 42, "b" : "Hello", "c" : [1,2,3]}}', 'obj', 'Map(String, Variant(UInt32, String, Array(UInt32)))') AS map_of_dynamics, mapApply((k, v) -> (k, variantType(v)), map_of_dynamics) AS map_of_dynamic_types``` SELECT JSONExtract('{"obj" : {"a" : 42, "b" : "Hello", "c" : [1,2,3]}}', 'obj', 'Map(String, Dynamic)') AS map_of_dynamics, mapApply((k, v) -> (k, dynamicType(v)), map_of_dynamics) AS map_of_dynamic_types
```
```text ```text
┌─map_of_dynamics──────────────────┬─map_of_dynamic_types────────────────────────────┐ ┌─map_of_dynamics──────────────────┬─map_of_dynamic_types────────────────────────────────────
│ {'a':42,'b':'Hello','c':[1,2,3]} │ {'a':'UInt32','b':'String','c':'Array(UInt32)'} │ │ {'a':42,'b':'Hello','c':[1,2,3]} │ {'a':'Int64','b':'String','c':'Array(Nullable(Int64))'} │
└──────────────────────────────────┴─────────────────────────────────────────────────┘ └──────────────────────────────────┴─────────────────────────────────────────────────────────
``` ```
```sql ```sql
SELECT JSONExtractKeysAndValues('{"a" : 42, "b" : "Hello", "c" : [1,2,3]}', 'Variant(UInt32, String, Array(UInt32))') AS dynamics, arrayMap(x -> (x.1, variantType(x.2)), dynamics) AS dynamic_types``` SELECT JSONExtractKeysAndValues('{"a" : 42, "b" : "Hello", "c" : [1,2,3]}', 'Dynamic') AS dynamics, arrayMap(x -> (x.1, dynamicType(x.2)), dynamics) AS dynamic_types```
``` ```
```text ```text
┌─dynamics───────────────────────────────┬─dynamic_types─────────────────────────────────────────┐ ┌─dynamics───────────────────────────────┬─dynamic_types─────────────────────────────────────────────────
│ [('a',42),('b','Hello'),('c',[1,2,3])] │ [('a','UInt32'),('b','String'),('c','Array(UInt32)')] │ │ [('a',42),('b','Hello'),('c',[1,2,3])] │ [('a','Int64'),('b','String'),('c','Array(Nullable(Int64))')] │
└────────────────────────────────────────┴───────────────────────────────────────────────────────┘ └────────────────────────────────────────┴───────────────────────────────────────────────────────────────
``` ```
### Binary output format ### Binary output format

View File

@ -19,7 +19,8 @@ ClickHouse data types include:
- **Boolean**: ClickHouse has a [`Boolean` type](./boolean.md) - **Boolean**: ClickHouse has a [`Boolean` type](./boolean.md)
- **Strings**: [`String`](./string.md) and [`FixedString`](./fixedstring.md) - **Strings**: [`String`](./string.md) and [`FixedString`](./fixedstring.md)
- **Dates**: use [`Date`](./date.md) and [`Date32`](./date32.md) for days, and [`DateTime`](./datetime.md) and [`DateTime64`](./datetime64.md) for instances in time - **Dates**: use [`Date`](./date.md) and [`Date32`](./date32.md) for days, and [`DateTime`](./datetime.md) and [`DateTime64`](./datetime64.md) for instances in time
- **JSON**: the [`JSON` object](./json.md) stores a JSON document in a single column - **Object**: the [`Object`](./json.md) stores a JSON document in a single column (deprecated)
- **JSON**: the [`JSON` object](./newjson.md) stores a JSON document in a single column
- **UUID**: a performant option for storing [`UUID` values](./uuid.md) - **UUID**: a performant option for storing [`UUID` values](./uuid.md)
- **Low cardinality types**: use an [`Enum`](./enum.md) when you have a handful of unique values, or use [`LowCardinality`](./lowcardinality.md) when you have up to 10,000 unique values of a column - **Low cardinality types**: use an [`Enum`](./enum.md) when you have a handful of unique values, or use [`LowCardinality`](./lowcardinality.md) when you have up to 10,000 unique values of a column
- **Arrays**: any column can be defined as an [`Array` of values](./array.md) - **Arrays**: any column can be defined as an [`Array` of values](./array.md)

View File

@ -13,7 +13,7 @@ keywords: [object, data type]
Stores JavaScript Object Notation (JSON) documents in a single column. Stores JavaScript Object Notation (JSON) documents in a single column.
`JSON` is an alias for `Object('json')`. `JSON` can be used as an alias to `Object('json')` when setting [use_json_alias_for_old_object_type](../../operations/settings/settings.md#usejsonaliasforoldobjecttype) is enabled.
## Example ## Example
@ -79,5 +79,5 @@ SELECT * FROM json FORMAT JSONEachRow
## Related Content ## Related Content
- [Using JSON in ClickHouse](/docs/en/integrations/data-formats/json) - [Using JSON in ClickHouse](/en/integrations/data-formats/json/overview)
- [Getting Data Into ClickHouse - Part 2 - A JSON detour](https://clickhouse.com/blog/getting-data-into-clickhouse-part-2-json) - [Getting Data Into ClickHouse - Part 2 - A JSON detour](https://clickhouse.com/blog/getting-data-into-clickhouse-part-2-json)

View File

@ -0,0 +1,516 @@
---
slug: /en/sql-reference/data-types/newjson
sidebar_position: 63
sidebar_label: JSON
keywords: [json, data type]
---
# JSON
Stores JavaScript Object Notation (JSON) documents in a single column.
:::note
This feature is experimental and is not production-ready. If you need to work with JSON documents, consider using [this guide](/docs/en/integrations/data-formats/json/overview) instead.
If you want to use JSON type, set `allow_experimental_json_type = 1`.
:::
To declare a column of `JSON` type, use the following syntax:
``` sql
<column_name> JSON(max_dynamic_paths=N, max_dynamic_types=M, some.path TypeName, SKIP path.to.skip, SKIP REGEXP 'paths_regexp')
```
Where:
- `max_dynamic_paths` is an optional parameter indicating how many paths can be stored separately as subcolumns across single block of data that is stored separately (for example across single data part for MergeTree table). If this limit is exceeded, all other paths will be stored together in a single structure. Default value of `max_dynamic_paths` is `1024`.
- `max_dynamic_types` is an optional parameter between `1` and `255` indicating how many different data types can be stored inside a single path column with type `Dynamic` across single block of data that is stored separately (for example across single data part for MergeTree table). If this limit is exceeded, all new types will be converted to type `String`. Default value of `max_dynamic_types` is `32`.
- `some.path TypeName` is an optional type hint for particular path in the JSON. Such paths will be always stored as subcolumns with specified type.
- `SKIP path.to.skip` is an optional hint for particular path that should be skipped during JSON parsing. Such paths will never be stored in the JSON column. If specified path is a nested JSON object, the whole nested object will be skipped.
- `SKIP REGEXP 'path_regexp'` is an optional hint with a regular expression that is used to skip paths during JSON parsing. All paths that match this regular expression will never be stored in the JSON column.
## Creating JSON
Using `JSON` type in table column definition:
```sql
CREATE TABLE test (json JSON) ENGINE = Memory;
INSERT INTO test VALUES ('{"a" : {"b" : 42}, "c" : [1, 2, 3]}'), ('{"f" : "Hello, World!"}'), ('{"a" : {"b" : 43, "e" : 10}, "c" : [4, 5, 6]}');
SELECT json FROM test;
```
```text
┌─json────────────────────────────────────────┐
│ {"a":{"b":"42"},"c":["1","2","3"]} │
│ {"f":"Hello, World!"} │
│ {"a":{"b":"43","e":"10"},"c":["4","5","6"]} │
└─────────────────────────────────────────────┘
```
```sql
CREATE TABLE test (json JSON(a.b UInt32, SKIP a.e)) ENGINE = Memory;
INSERT INTO test VALUES ('{"a" : {"b" : 42}, "c" : [1, 2, 3]}'), ('{"f" : "Hello, World!"}'), ('{"a" : {"b" : 43, "e" : 10}, "c" : [4, 5, 6]}');
SELECT json FROM test;
```
```text
┌─json──────────────────────────────┐
│ {"a":{"b":42},"c":[1,2,3]} │
│ {"a":{"b":0},"f":"Hello, World!"} │
│ {"a":{"b":43},"c":[4,5,6]} │
└───────────────────────────────────┘
```
Using CAST from 'String':
```sql
SELECT '{"a" : {"b" : 42},"c" : [1, 2, 3], "d" : "Hello, World!"}'::JSON as json;
```
```text
┌─json───────────────────────────────────────────┐
│ {"a":{"b":42},"c":[1,2,3],"d":"Hello, World!"} │
└────────────────────────────────────────────────┘
```
CAST from named `Tuple`, `Map` and `Object('json')` to `JSON` type will be supported later.
## Reading JSON paths as subcolumns
JSON type supports reading every path as a separate subcolumn. If type of the requested path was not specified in the JSON type declaration, the subcolumn of the path will always have type [Dynamic](/docs/en/sql-reference/data-types/dynamic.md).
For example:
```sql
CREATE TABLE test (json JSON(a.b UInt32, SKIP a.e)) ENGINE = Memory;
INSERT INTO test VALUES ('{"a" : {"b" : 42, "g" : 42.42}, "c" : [1, 2, 3], "d" : "2020-01-01"}'), ('{"f" : "Hello, World!", "d" : "2020-01-02"}'), ('{"a" : {"b" : 43, "e" : 10, "g" : 43.43}, "c" : [4, 5, 6]}');
SELECT json FROM test;
```
```text
┌─json──────────────────────────────────────────────────┐
│ {"a":{"b":42,"g":42.42},"c":[1,2,3],"d":"2020-01-01"} │
│ {"a":{"b":0},"d":"2020-01-02","f":"Hello, World!"} │
│ {"a":{"b":43,"g":43.43},"c":[4,5,6]} │
└───────────────────────────────────────────────────────┘
```
```sql
SELECT json.a.b, json.a.g, json.c, json.d FROM test;
```
```text
┌─json.a.b─┬─json.a.g─┬─json.c──┬─json.d─────┐
│ 42 │ 42.42 │ [1,2,3] │ 2020-01-01 │
│ 0 │ ᴺᵁᴸᴸ │ ᴺᵁᴸᴸ │ 2020-01-02 │
│ 43 │ 43.43 │ [4,5,6] │ ᴺᵁᴸᴸ │
└──────────┴──────────┴─────────┴────────────┘
```
If the requested path wasn't found in the data, it will be filled with `NULL` values:
```sql
SELECT json.non.existing.path FROM test;
```
```text
┌─json.non.existing.path─┐
│ ᴺᵁᴸᴸ │
│ ᴺᵁᴸᴸ │
│ ᴺᵁᴸᴸ │
└────────────────────────┘
```
Let's check the data types of returned subcolumns:
```sql
SELECT toTypeName(json.a.b), toTypeName(json.a.g), toTypeName(json.c), toTypeName(json.d) FROM test;
```
```text
┌─toTypeName(json.a.b)─┬─toTypeName(json.a.g)─┬─toTypeName(json.c)─┬─toTypeName(json.d)─┐
│ UInt32 │ Dynamic │ Dynamic │ Dynamic │
│ UInt32 │ Dynamic │ Dynamic │ Dynamic │
│ UInt32 │ Dynamic │ Dynamic │ Dynamic │
└──────────────────────┴──────────────────────┴────────────────────┴────────────────────┘
```
As we can see, for `a.b` the type is `UInt32` as we specified in the JSON type declaration, and for all other subcolumns the type is `Dynamic`.
It is also possible to read subcolumns of a `Dynamic` type using special syntax `json.some.path.:TypeName`:
```sql
select json.a.g.:Float64, dynamicType(json.a.g), json.d.:Date, dynamicType(json.d) FROM test;
```
```text
┌─json.a.g.:`Float64`─┬─dynamicType(json.a.g)─┬─json.d.:`Date`─┬─dynamicType(json.d)─┐
│ 42.42 │ Float64 │ 2020-01-01 │ Date │
│ ᴺᵁᴸᴸ │ None │ 2020-01-02 │ Date │
│ 43.43 │ Float64 │ ᴺᵁᴸᴸ │ None │
└─────────────────────┴───────────────────────┴────────────────┴─────────────────────┘
```
`Dynamic` subcolumns can be casted to any data type. In this case the exception will be thrown if internal type inside `Dynamic` cannot be casted to the requested type:
```sql
select json.a.g::UInt64 as uint FROM test;
```
```text
┌─uint─┐
│ 42 │
│ 0 │
│ 43 │
└──────┘
```
```sql
select json.a.g::UUID as float FROM test;
```
```text
Received exception:
Code: 48. DB::Exception: Conversion between numeric types and UUID is not supported. Probably the passed UUID is unquoted: while executing 'FUNCTION CAST(__table1.json.a.g :: 2, 'UUID'_String :: 1) -> CAST(__table1.json.a.g, 'UUID'_String) UUID : 0'. (NOT_IMPLEMENTED)
```
## Reading JSON sub-objects as subcolumns
JSON type supports reading nested objects as subcolumns with type `JSON` using special syntax `json.^some.path`:
```sql
CREATE TABLE test (json JSON) ENGINE = Memory;
INSERT INTO test VALUES ('{"a" : {"b" : {"c" : 42, "g" : 42.42}}, "c" : [1, 2, 3], "d" : {"e" : {"f" : {"g" : "Hello, World", "h" : [1, 2, 3]}}}}'), ('{"f" : "Hello, World!", "d" : {"e" : {"f" : {"h" : [4, 5, 6]}}}}'), ('{"a" : {"b" : {"c" : 43, "e" : 10, "g" : 43.43}}, "c" : [4, 5, 6]}');
SELECT json FROM test;
```
```text
┌─json────────────────────────────────────────────────────────────────────────────────────────┐
│ {"a":{"b":{"c":42,"g":42.42}},"c":[1,2,3],"d":{"e":{"f":{"g":"Hello, World","h":[1,2,3]}}}} │
│ {"d":{"e":{"f":{"h":[4,5,6]}}},"f":"Hello, World!"} │
│ {"a":{"b":{"c":43,"e":10,"g":43.43}},"c":[4,5,6]} │
└─────────────────────────────────────────────────────────────────────────────────────────────┘
```
```sql
SELECT json.^a.b, json.^d.e.f FROM test;
```
```text
┌─json.^`a`.b───────────────┬─json.^`d`.e.f────────────────────┐
│ {"c":42,"g":42.42} │ {"g":"Hello, World","h":[1,2,3]} │
│ {} │ {"h":[4,5,6]} │
│ {"c":43,"e":10,"g":43.43} │ {} │
└───────────────────────────┴──────────────────────────────────┘
```
:::note
Reading sub-objects as subcolumns may be inefficient, as this may require almost full scan of the JSON data.
:::
## Types inference for paths
During JSON parsing ClickHouse tries to detect the most appropriate data type for each JSON path. It works similar to [automatic schema inference from input data](/docs/en/interfaces/schema-inference.md) and controlled by the same settings:
- [input_format_try_infer_integers](/docs/en/interfaces/schema-inference.md#inputformattryinferintegers)
- [input_format_try_infer_dates](/docs/en/interfaces/schema-inference.md#inputformattryinferdates)
- [input_format_try_infer_datetimes](/docs/en/interfaces/schema-inference.md#inputformattryinferdatetimes)
- [schema_inference_make_columns_nullable](/docs/en/interfaces/schema-inference.md#schemainferencemakecolumnsnullable)
- [input_format_json_try_infer_numbers_from_strings](/docs/en/interfaces/schema-inference.md#inputformatjsontryinfernumbersfromstrings)
- [input_format_json_infer_incomplete_types_as_strings](/docs/en/interfaces/schema-inference.md#inputformatjsoninferincompletetypesasstrings)
- [input_format_json_read_numbers_as_strings](/docs/en/interfaces/schema-inference.md#inputformatjsonreadnumbersasstrings)
- [input_format_json_read_bools_as_strings](/docs/en/interfaces/schema-inference.md#inputformatjsonreadboolsasstrings)
- [input_format_json_read_bools_as_numbers](/docs/en/interfaces/schema-inference.md#inputformatjsonreadboolsasnumbers)
- [input_format_json_read_arrays_as_strings](/docs/en/interfaces/schema-inference.md#inputformatjsonreadarraysasstrings)
Let's see some examples:
```sql
SELECT JSONAllPathsWithTypes('{"a" : "2020-01-01", "b" : "2020-01-01 10:00:00"}'::JSON) AS paths_with_types settings input_format_try_infer_dates=1, input_format_try_infer_datetimes=1;
```
```text
┌─paths_with_types─────────────────┐
│ {'a':'Date','b':'DateTime64(9)'} │
└──────────────────────────────────┘
```
```sql
SELECT JSONAllPathsWithTypes('{"a" : "2020-01-01", "b" : "2020-01-01 10:00:00"}'::JSON) AS paths_with_types settings input_format_try_infer_dates=0, input_format_try_infer_datetimes=0;
```
```text
┌─paths_with_types────────────┐
│ {'a':'String','b':'String'} │
└─────────────────────────────┘
```
```sql
SELECT JSONAllPathsWithTypes('{"a" : [1, 2, 3]}'::JSON) AS paths_with_types settings schema_inference_make_columns_nullable=1;
```
```text
┌─paths_with_types───────────────┐
│ {'a':'Array(Nullable(Int64))'} │
└────────────────────────────────┘
```
```sql
SELECT JSONAllPathsWithTypes('{"a" : [1, 2, 3]}'::JSON) AS paths_with_types settings schema_inference_make_columns_nullable=0;
```
```text
┌─paths_with_types─────┐
│ {'a':'Array(Int64)'} │
└──────────────────────┘
```
## Handling arrays of JSON objects
JSON paths that contains an array of objects are parsed as type `Array(JSON)` and inserted into `Dynamic` column for this path. To read an array of objects you can extract it from `Dynamic` column as a subcolumn:
```sql
CREATE TABLE test (json JSON) ENGINE = Memory;
INSERT INTO test VALUES
('{"a" : {"b" : [{"c" : 42, "d" : "Hello", "f" : [[{"g" : 42.42}]], "k" : {"j" : 1000}}, {"c" : 43}, {"e" : [1, 2, 3], "d" : "My", "f" : [[{"g" : 43.43, "h" : "2020-01-01"}]], "k" : {"j" : 2000}}]}}'),
('{"a" : {"b" : [1, 2, 3]}}'),
('{"a" : {"b" : [{"c" : 44, "f" : [[{"h" : "2020-01-02"}]]}, {"e" : [4, 5, 6], "d" : "World", "f" : [[{"g" : 44.44}]], "k" : {"j" : 3000}}]}}');
SELECT json FROM test;
```
```text3
┌─json────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────┐
│ {"a":{"b":[{"c":"42","d":"Hello","f":[[{"g":42.42}]],"k":{"j":"1000"}},{"c":"43"},{"d":"My","e":["1","2","3"],"f":[[{"g":43.43,"h":"2020-01-01"}]],"k":{"j":"2000"}}]}} │
│ {"a":{"b":["1","2","3"]}} │
│ {"a":{"b":[{"c":"44","f":[[{"h":"2020-01-02"}]]},{"d":"World","e":["4","5","6"],"f":[[{"g":44.44}]],"k":{"j":"3000"}}]}} │
└─────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────┘
```
```sql
SELECT json.a.b, dynamicType(json.a.b) FROM test;
```
```text
┌─json.a.b──────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────┬─dynamicType(json.a.b)────────────────────────────────────┐
│ ['{"c":"42","d":"Hello","f":[[{"g":42.42}]],"k":{"j":"1000"}}','{"c":"43"}','{"d":"My","e":["1","2","3"],"f":[[{"g":43.43,"h":"2020-01-01"}]],"k":{"j":"2000"}}'] │ Array(JSON(max_dynamic_types=16, max_dynamic_paths=256)) │
│ [1,2,3] │ Array(Nullable(Int64)) │
│ ['{"c":"44","f":[[{"h":"2020-01-02"}]]}','{"d":"World","e":["4","5","6"],"f":[[{"g":44.44}]],"k":{"j":"3000"}}'] │ Array(JSON(max_dynamic_types=16, max_dynamic_paths=256)) │
└───────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────┴──────────────────────────────────────────────────────────┘
```
As you can notice, the `max_dynamic_types/max_dynamic_paths` parameters of the nested `JSON` type were reduced compared to the default values. It's needed to avoid number of subcolumns to grow uncontrolled on nested arrays of JSON objects.
Let's try to read subcolumns from this nested `JSON` column:
```sql
SELECT json.a.b.:`Array(JSON)`.c, json.a.b.:`Array(JSON)`.f, json.a.b.:`Array(JSON)`.d FROM test;
```
```text
┌─json.a.b.:`Array(JSON)`.c─┬─json.a.b.:`Array(JSON)`.f───────────────────────────────────┬─json.a.b.:`Array(JSON)`.d─┐
│ [42,43,NULL] │ [[['{"g":42.42}']],NULL,[['{"g":43.43,"h":"2020-01-01"}']]] │ ['Hello',NULL,'My'] │
│ [] │ [] │ [] │
│ [44,NULL] │ [[['{"h":"2020-01-02"}']],[['{"g":44.44}']]] │ [NULL,'World'] │
└───────────────────────────┴─────────────────────────────────────────────────────────────┴───────────────────────────┘
```
We can avoid writing `Array(JSON)` subcolumn name using special syntax:
```sql
SELECT json.a.b[].c, json.a.b[].f, json.a.b[].d FROM test;
```
```text
┌─json.a.b.:`Array(JSON)`.c─┬─json.a.b.:`Array(JSON)`.f───────────────────────────────────┬─json.a.b.:`Array(JSON)`.d─┐
│ [42,43,NULL] │ [[['{"g":42.42}']],NULL,[['{"g":43.43,"h":"2020-01-01"}']]] │ ['Hello',NULL,'My'] │
│ [] │ [] │ [] │
│ [44,NULL] │ [[['{"h":"2020-01-02"}']],[['{"g":44.44}']]] │ [NULL,'World'] │
└───────────────────────────┴─────────────────────────────────────────────────────────────┴───────────────────────────┘
```
The number of `[]` after path indicates the array level. `json.path[][]` will be transformed to `json.path.:Array(Array(JSON))`
Let's check the paths and types inside our `Array(JSON)`:
```sql
SELECT DISTINCT arrayJoin(JSONAllPathsWithTypes(arrayJoin(json.a.b[]))) FROM test;
```
```text
┌─arrayJoin(JSONAllPathsWithTypes(arrayJoin(json.a.b.:`Array(JSON)`)))──┐
│ ('c','Int64') │
│ ('d','String') │
│ ('f','Array(Array(JSON(max_dynamic_types=8, max_dynamic_paths=64)))') │
│ ('k.j','Int64') │
│ ('e','Array(Nullable(Int64))') │
└───────────────────────────────────────────────────────────────────────┘
```
Let's read subcolumns from `Array(JSON)` column:
```sql
SELECT json.a.b[].c.:Int64, json.a.b[].f[][].g.:Float64, json.a.b[].f[][].h.:Date FROM test;
```
```text
┌─json.a.b.:`Array(JSON)`.c.:`Int64`─┬─json.a.b.:`Array(JSON)`.f.:`Array(Array(JSON))`.g.:`Float64`─┬─json.a.b.:`Array(JSON)`.f.:`Array(Array(JSON))`.h.:`Date`─┐
│ [42,43,NULL] │ [[[42.42]],[],[[43.43]]] │ [[[NULL]],[],[['2020-01-01']]] │
│ [] │ [] │ [] │
│ [44,NULL] │ [[[NULL]],[[44.44]]] │ [[['2020-01-02']],[[NULL]]] │
└────────────────────────────────────┴──────────────────────────────────────────────────────────────┴───────────────────────────────────────────────────────────┘
```
We can also read sub-object subcolumns from nested `JSON` column:
```sql
SELECT json.a.b[].^k FROM test
```
```text
┌─json.a.b.:`Array(JSON)`.^`k`─────────┐
│ ['{"j":"1000"}','{}','{"j":"2000"}'] │
│ [] │
│ ['{}','{"j":"3000"}'] │
└──────────────────────────────────────┘
```
## Reading JSON type from the data
All text formats (JSONEachRow, TSV, CSV, CustomSeparated, Values, etc) supports reading `JSON` type.
Examples:
```sql
SELECT json FROM format(JSONEachRow, 'json JSON(a.b.c UInt32, SKIP a.b.d, SKIP d.e, SKIP REGEXP \'b.*\')', '
{"json" : {"a" : {"b" : {"c" : 1, "d" : [0, 1]}}, "b" : "2020-01-01", "c" : 42, "d" : {"e" : {"f" : ["s1", "s2"]}, "i" : [1, 2, 3]}}}
{"json" : {"a" : {"b" : {"c" : 2, "d" : [2, 3]}}, "b" : [1, 2, 3], "c" : null, "d" : {"e" : {"g" : 43}, "i" : [4, 5, 6]}}}
{"json" : {"a" : {"b" : {"c" : 3, "d" : [4, 5]}}, "b" : {"c" : 10}, "e" : "Hello, World!"}}
{"json" : {"a" : {"b" : {"c" : 4, "d" : [6, 7]}}, "c" : 43}}
{"json" : {"a" : {"b" : {"c" : 5, "d" : [8, 9]}}, "b" : {"c" : 11, "j" : [1, 2, 3]}, "d" : {"e" : {"f" : ["s3", "s4"], "g" : 44}, "h" : "2020-02-02 10:00:00"}}}
')
```
```text
┌─json──────────────────────────────────────────────────────────┐
│ {"a":{"b":{"c":1}},"c":"42","d":{"i":["1","2","3"]}} │
│ {"a":{"b":{"c":2}},"d":{"i":["4","5","6"]}} │
│ {"a":{"b":{"c":3}},"e":"Hello, World!"} │
│ {"a":{"b":{"c":4}},"c":"43"} │
│ {"a":{"b":{"c":5}},"d":{"h":"2020-02-02 10:00:00.000000000"}} │
└───────────────────────────────────────────────────────────────┘
```
For text formats like CSV/TSV/etc `JSON` is parsed from a string containing JSON object
```sql
SELECT json FROM format(TSV, 'json JSON(a.b.c UInt32, SKIP a.b.d, SKIP REGEXP \'b.*\')',
'{"a" : {"b" : {"c" : 1, "d" : [0, 1]}}, "b" : "2020-01-01", "c" : 42, "d" : {"e" : {"f" : ["s1", "s2"]}, "i" : [1, 2, 3]}}
{"a" : {"b" : {"c" : 2, "d" : [2, 3]}}, "b" : [1, 2, 3], "c" : null, "d" : {"e" : {"g" : 43}, "i" : [4, 5, 6]}}
{"a" : {"b" : {"c" : 3, "d" : [4, 5]}}, "b" : {"c" : 10}, "e" : "Hello, World!"}
{"a" : {"b" : {"c" : 4, "d" : [6, 7]}}, "c" : 43}
{"a" : {"b" : {"c" : 5, "d" : [8, 9]}}, "b" : {"c" : 11, "j" : [1, 2, 3]}, "d" : {"e" : {"f" : ["s3", "s4"], "g" : 44}, "h" : "2020-02-02 10:00:00"}}')
```
```text
┌─json──────────────────────────────────────────────────────────┐
│ {"a":{"b":{"c":1}},"c":"42","d":{"i":["1","2","3"]}} │
│ {"a":{"b":{"c":2}},"d":{"i":["4","5","6"]}} │
│ {"a":{"b":{"c":3}},"e":"Hello, World!"} │
│ {"a":{"b":{"c":4}},"c":"43"} │
│ {"a":{"b":{"c":5}},"d":{"h":"2020-02-02 10:00:00.000000000"}} │
└───────────────────────────────────────────────────────────────┘
```
## Reaching the limit of dynamic paths inside JSON
`JSON` data type can store only limited number of paths as separate subcolumns inside. By default, this limit is 1024, but you can change it in type declaration using parameter `max_dynamic_paths`.
When the limit is reached, all new paths inserted to `JSON` column will be stored in a single shared data structure. It's still possible to read such paths as subcolumns, but it will require reading the whole
shared data structure to extract the values of this path. This limit is needed to avoid the enormous number of different subcolumns that can make the table unusable.
Let's see what happens when the limit is reached in different scenarios.
### Reaching the limit during data parsing
During parsing of `JSON` object from the data, when the limit is reached for current block of data, all new paths will be stored in a shared data structure. We can check it using introspection functions `JSONDynamicPaths, JSONSharedDataPaths`:
```sql
SELECT json, JSONDynamicPaths(json), JSONSharedDataPaths(json) FROM format(JSONEachRow, 'json JSON(max_dynamic_paths=3)', '
{"json" : {"a" : {"b" : 42}, "c" : [1, 2, 3]}}
{"json" : {"a" : {"b" : 43}, "d" : "2020-01-01"}}
{"json" : {"a" : {"b" : 44}, "c" : [4, 5, 6]}}
{"json" : {"a" : {"b" : 43}, "d" : "2020-01-02", "e" : "Hello", "f" : {"g" : 42.42}}}
{"json" : {"a" : {"b" : 43}, "c" : [7, 8, 9], "f" : {"g" : 43.43}, "h" : "World"}}
')
```
```text
┌─json───────────────────────────────────────────────────────────┬─JSONDynamicPaths(json)─┬─JSONSharedDataPaths(json)─┐
│ {"a":{"b":"42"},"c":["1","2","3"]} │ ['a.b','c','d'] │ [] │
│ {"a":{"b":"43"},"d":"2020-01-01"} │ ['a.b','c','d'] │ [] │
│ {"a":{"b":"44"},"c":["4","5","6"]} │ ['a.b','c','d'] │ [] │
│ {"a":{"b":"43"},"d":"2020-01-02","e":"Hello","f":{"g":42.42}} │ ['a.b','c','d'] │ ['e','f.g'] │
│ {"a":{"b":"43"},"c":["7","8","9"],"f":{"g":43.43},"h":"World"} │ ['a.b','c','d'] │ ['f.g','h'] │
└────────────────────────────────────────────────────────────────┴────────────────────────┴───────────────────────────┘
```
As we can see, after inserting paths `e` and `f.g` the limit was reached and we inserted them into shared data structure.
### During merges of data parts in MergeTree table engines
During merge of several data parts in MergeTree table the `JSON` column in the resulting data part can reach the limit of dynamic paths won't be able to store all paths from source parts as subcolumns.
In this case ClickHouse chooses what paths will remain as subcolumns after merge and what types will be stored in the shared data structure. In most cases ClickHouse tries to keep paths that contains
the largest number of non-null values and move the rarest paths to the shared data structure, but it depends on the implementation.
Let's see an example of such merge. First, let's create a table with `JSON` column, set the limit of dynamic paths to `3` and insert values with `5` different paths:
```sql
CREATE TABLE test (id UInt64, json JSON(max_dynamic_paths=3)) engine=MergeTree ORDER BY id;
SYSTEM STOP MERGES test;
INSERT INTO test SELECT number, formatRow('JSONEachRow', number as a) FROM numbers(5);
INSERT INTO test SELECT number, formatRow('JSONEachRow', number as b) FROM numbers(4);
INSERT INTO test SELECT number, formatRow('JSONEachRow', number as c) FROM numbers(3);
INSERT INTO test SELECT number, formatRow('JSONEachRow', number as d) FROM numbers(2);
INSERT INTO test SELECT number, formatRow('JSONEachRow', number as e) FROM numbers(1);
```
Each insert will create a separate data pert with `JSON` column containing single path:
```sql
SELECT count(), JSONDynamicPaths(json) AS dynamic_paths, JSONSharedDataPaths(json) AS shared_data_paths, _part FROM test GROUP BY _part, dynamic_paths, shared_data_paths ORDER BY _part ASC
```
```text
┌─count()─┬─dynamic_paths─┬─shared_data_paths─┬─_part─────┐
│ 5 │ ['a'] │ [] │ all_1_1_0 │
│ 4 │ ['b'] │ [] │ all_2_2_0 │
│ 3 │ ['c'] │ [] │ all_3_3_0 │
│ 2 │ ['d'] │ [] │ all_4_4_0 │
│ 1 │ ['e'] │ [] │ all_5_5_0 │
└─────────┴───────────────┴───────────────────┴───────────┘
```
Now, let's merge all parts into one and see what will happen:
```sql
SYSTEM START MERGES test;
OPTIMIZE TABLE test FINAL;
SELECT count(), dynamicType(d), _part FROM test GROUP BY _part, dynamicType(d) ORDER BY _part;
```
```text
┌─count()─┬─dynamic_paths─┬─shared_data_paths─┬─_part─────┐
│ 1 │ ['a','b','c'] │ ['e'] │ all_1_5_2 │
│ 2 │ ['a','b','c'] │ ['d'] │ all_1_5_2 │
│ 12 │ ['a','b','c'] │ [] │ all_1_5_2 │
└─────────┴───────────────┴───────────────────┴───────────┘
```
As we can see, ClickHouse kept the most frequent paths `a`, `b` and `c` and moved paths `e` and `d` to shared data structure.
## Introspection functions
There are several functions that can help to inspect the content of the JSON column: [JSONAllPaths](../functions/json-functions.md#jsonallpaths), [JSONAllPathsWithTypes](../functions/json-functions.md#jsonallpathswithtypes), [JSONDynamicPaths](../functions/json-functions.md#jsondynamicpaths), [JSONDynamicPathsWithTypes](../functions/json-functions.md#jsondynamicpathswithtypes), [JSONSharedDataPaths](../functions/json-functions.md#jsonshareddatapaths), [JSONSharedDataPathsWithTypes](../functions/json-functions.md#jsonshareddatapathswithtypes).
## Tips for better usage of the JSON type
Before creating `JSON` column and loading data into it, consider the following tips:
- Investigate your data and specify as many path hints with types as you can. It will make the storage and the reading much more efficient.
- Think about what paths you will need and what paths you will never need. Specify paths that you won't need in the SKIP section and SKIP REGEXP if needed. It will improve the storage.
- Don't set `max_dynamic_paths` parameter to very high values, it can make the storage and reading less efficient.

View File

@ -1155,3 +1155,207 @@ SELECT jsonMergePatch('{"a":1}', '{"name": "joey"}', '{"name": "tom"}', '{"name"
│ {"a":1,"name":"zoey"} │ │ {"a":1,"name":"zoey"} │
└───────────────────────┘ └───────────────────────┘
``` ```
### JSONAllPaths
Returns the list of all paths stored in each row in [JSON](../data-types/newjson.md) column.
**Syntax**
``` sql
JSONAllPaths(json)
```
**Arguments**
- `json` — [JSON](../data-types/newjson.md).
**Returned value**
- An array of paths. [Array(String)](../data-types/array.md).
**Example**
``` sql
CREATE TABLE test (json JSON(max_dynamic_paths=1)) ENGINE = Memory;
INSERT INTO test FORMAT JSONEachRow {"json" : {"a" : 42}}, {"json" : {"b" : "Hello"}}, {"json" : {"a" : [1, 2, 3], "c" : "2020-01-01"}}
SELECT json, JSONAllPaths(json) FROM test;
```
```text
┌─json─────────────────────────────────┬─JSONAllPaths(json)─┐
│ {"a":"42"} │ ['a'] │
│ {"b":"Hello"} │ ['b'] │
│ {"a":["1","2","3"],"c":"2020-01-01"} │ ['a','c'] │
└──────────────────────────────────────┴────────────────────┘
```
### JSONAllPathsWithTypes
Returns the map of all paths and their data types stored in each row in [JSON](../data-types/newjson.md) column.
**Syntax**
``` sql
JSONAllPathsWithTypes(json)
```
**Arguments**
- `json` — [JSON](../data-types/newjson.md).
**Returned value**
- An array of paths. [Map(String, String)](../data-types/array.md).
**Example**
``` sql
CREATE TABLE test (json JSON(max_dynamic_paths=1)) ENGINE = Memory;
INSERT INTO test FORMAT JSONEachRow {"json" : {"a" : 42}}, {"json" : {"b" : "Hello"}}, {"json" : {"a" : [1, 2, 3], "c" : "2020-01-01"}}
SELECT json, JSONAllPathsWithTypes(json) FROM test;
```
```text
┌─json─────────────────────────────────┬─JSONAllPathsWithTypes(json)───────────────┐
│ {"a":"42"} │ {'a':'Int64'} │
│ {"b":"Hello"} │ {'b':'String'} │
│ {"a":["1","2","3"],"c":"2020-01-01"} │ {'a':'Array(Nullable(Int64))','c':'Date'} │
└──────────────────────────────────────┴───────────────────────────────────────────┘
```
### JSONDynamicPaths
Returns the list of dynamic paths that are stored as separate subcolumns in [JSON](../data-types/newjson.md) column.
**Syntax**
``` sql
JSONDynamicPaths(json)
```
**Arguments**
- `json` — [JSON](../data-types/newjson.md).
**Returned value**
- An array of paths. [Array(String)](../data-types/array.md).
**Example**
``` sql
CREATE TABLE test (json JSON(max_dynamic_paths=1)) ENGINE = Memory;
INSERT INTO test FORMAT JSONEachRow {"json" : {"a" : 42}}, {"json" : {"b" : "Hello"}}, {"json" : {"a" : [1, 2, 3], "c" : "2020-01-01"}}
SELECT json, JSONDynamicPaths(json) FROM test;
```
```text
┌─json─────────────────────────────────┬─JSONDynamicPaths(json)─┐
| {"a":"42"} │ ['a'] │
│ {"b":"Hello"} │ [] │
│ {"a":["1","2","3"],"c":"2020-01-01"} │ ['a'] │
└──────────────────────────────────────┴────────────────────────┘
```
### JSONDynamicPathsWithTypes
Returns the map of dynamic paths that are stored as separate subcolumns and their types in each row in [JSON](../data-types/newjson.md) column.
**Syntax**
``` sql
JSONAllPathsWithTypes(json)
```
**Arguments**
- `json` — [JSON](../data-types/newjson.md).
**Returned value**
- An array of paths. [Map(String, String)](../data-types/array.md).
**Example**
``` sql
CREATE TABLE test (json JSON(max_dynamic_paths=1)) ENGINE = Memory;
INSERT INTO test FORMAT JSONEachRow {"json" : {"a" : 42}}, {"json" : {"b" : "Hello"}}, {"json" : {"a" : [1, 2, 3], "c" : "2020-01-01"}}
SELECT json, JSONDynamicPathsWithTypes(json) FROM test;
```
```text
┌─json─────────────────────────────────┬─JSONDynamicPathsWithTypes(json)─┐
│ {"a":"42"} │ {'a':'Int64'} │
│ {"b":"Hello"} │ {} │
│ {"a":["1","2","3"],"c":"2020-01-01"} │ {'a':'Array(Nullable(Int64))'} │
└──────────────────────────────────────┴─────────────────────────────────┘
```
### JSONSharedDataPaths
Returns the list of paths that are stored in shared data structure in [JSON](../data-types/newjson.md) column.
**Syntax**
``` sql
JSONSharedDataPaths(json)
```
**Arguments**
- `json` — [JSON](../data-types/newjson.md).
**Returned value**
- An array of paths. [Array(String)](../data-types/array.md).
**Example**
``` sql
CREATE TABLE test (json JSON(max_dynamic_paths=1)) ENGINE = Memory;
INSERT INTO test FORMAT JSONEachRow {"json" : {"a" : 42}}, {"json" : {"b" : "Hello"}}, {"json" : {"a" : [1, 2, 3], "c" : "2020-01-01"}}
SELECT json, JSONSharedDataPaths(json) FROM test;
```
```text
┌─json─────────────────────────────────┬─JSONSharedDataPaths(json)─┐
│ {"a":"42"} │ [] │
│ {"b":"Hello"} │ ['b'] │
│ {"a":["1","2","3"],"c":"2020-01-01"} │ ['c'] │
└──────────────────────────────────────┴───────────────────────────┘
```
### JSONSharedDataPathsWithTypes
Returns the map of paths that are stored in shared data structure and their types in each row in [JSON](../data-types/newjson.md) column.
**Syntax**
``` sql
JSONSharedDataPathsWithTypes(json)
```
**Arguments**
- `json` — [JSON](../data-types/newjson.md).
**Returned value**
- An array of paths. [Map(String, String)](../data-types/array.md).
**Example**
``` sql
CREATE TABLE test (json JSON(max_dynamic_paths=1)) ENGINE = Memory;
INSERT INTO test FORMAT JSONEachRow {"json" : {"a" : 42}}, {"json" : {"b" : "Hello"}}, {"json" : {"a" : [1, 2, 3], "c" : "2020-01-01"}}
SELECT json, JSONSharedDataPathsWithTypes(json) FROM test;
```
```text
┌─json─────────────────────────────────┬─JSONSharedDataPathsWithTypes(json)─┐
│ {"a":"42"} │ {} │
│ {"b":"Hello"} │ {'b':'String'} │
│ {"a":["1","2","3"],"c":"2020-01-01"} │ {'c':'Date'} │
└──────────────────────────────────────┴────────────────────────────────────┘
```

View File

@ -6103,30 +6103,23 @@ Result:
└───────┴───────────────┴──────┴──────────────┴──────────────┴──────────────────────┘ └───────┴───────────────┴──────┴──────────────┴──────────────┴──────────────────────┘
``` ```
## toInterval(Year\|Quarter\|Month\|Week\|Day\|Hour\|Minute\|Second) ## toIntervalYear
Converts a Number type argument to an [Interval](../data-types/special-data-types/interval.md) data type. Returns an interval of `n` years of data type [IntervalYear](../data-types/special-data-types/interval.md).
**Syntax** **Syntax**
``` sql ``` sql
toIntervalSecond(number) toIntervalYear(n)
toIntervalMinute(number)
toIntervalHour(number)
toIntervalDay(number)
toIntervalWeek(number)
toIntervalMonth(number)
toIntervalQuarter(number)
toIntervalYear(number)
``` ```
**Arguments** **Arguments**
- `number` — Duration of interval. Positive integer number. - `n` — Number of years. Integer numbers or string representations thereof, and float numbers. [(U)Int*](../data-types/int-uint.md)/[Float*](../data-types/float.md)/[String](../data-types/string.md).
**Returned values** **Returned values**
- The value in `Interval` data type. - Interval of `n` years. [IntervalYear](../data-types/special-data-types/interval.md).
**Example** **Example**
@ -6134,20 +6127,387 @@ Query:
``` sql ``` sql
WITH WITH
toDate('2019-01-01') AS date, toDate('2024-06-15') AS date,
INTERVAL 1 WEEK AS interval_week, toIntervalYear(1) AS interval_to_year
toIntervalWeek(1) AS interval_to_week SELECT date + interval_to_year AS result
SELECT
date + interval_week,
date + interval_to_week;
``` ```
Result: Result:
```response ```response
┌─plus(date, interval_week)─┬─plus(date, interval_to_week)─┐ ┌─────result─┐
│ 2019-01-08 │ 2019-01-08 │ │ 2025-06-15 │
└───────────────────────────┴──────────────────────────────┘ └────────────┘
```
## toIntervalQuarter
Returns an interval of `n` quarters of data type [IntervalQuarter](../data-types/special-data-types/interval.md).
**Syntax**
``` sql
toIntervalQuarter(n)
```
**Arguments**
- `n` — Number of quarters. Integer numbers or string representations thereof, and float numbers. [(U)Int*](../data-types/int-uint.md)/[Float*](../data-types/float.md)/[String](../data-types/string.md).
**Returned values**
- Interval of `n` quarters. [IntervalQuarter](../data-types/special-data-types/interval.md).
**Example**
Query:
``` sql
WITH
toDate('2024-06-15') AS date,
toIntervalQuarter(1) AS interval_to_quarter
SELECT date + interval_to_quarter AS result
```
Result:
```response
┌─────result─┐
│ 2024-09-15 │
└────────────┘
```
## toIntervalMonth
Returns an interval of `n` months of data type [IntervalMonth](../data-types/special-data-types/interval.md).
**Syntax**
``` sql
toIntervalMonth(n)
```
**Arguments**
- `n` — Number of months. Integer numbers or string representations thereof, and float numbers. [(U)Int*](../data-types/int-uint.md)/[Float*](../data-types/float.md)/[String](../data-types/string.md).
**Returned values**
- Interval of `n` months. [IntervalMonth](../data-types/special-data-types/interval.md).
**Example**
Query:
``` sql
WITH
toDate('2024-06-15') AS date,
toIntervalMonth(1) AS interval_to_month
SELECT date + interval_to_month AS result
```
Result:
```response
┌─────result─┐
│ 2024-07-15 │
└────────────┘
```
## toIntervalWeek
Returns an interval of `n` weeks of data type [IntervalWeek](../data-types/special-data-types/interval.md).
**Syntax**
``` sql
toIntervalWeek(n)
```
**Arguments**
- `n` — Number of weeks. Integer numbers or string representations thereof, and float numbers. [(U)Int*](../data-types/int-uint.md)/[Float*](../data-types/float.md)/[String](../data-types/string.md).
**Returned values**
- Interval of `n` weeks. [IntervalWeek](../data-types/special-data-types/interval.md).
**Example**
Query:
``` sql
WITH
toDate('2024-06-15') AS date,
toIntervalWeek(1) AS interval_to_week
SELECT date + interval_to_week AS result
```
Result:
```response
┌─────result─┐
│ 2024-06-22 │
└────────────┘
```
## toIntervalDay
Returns an interval of `n` days of data type [IntervalDay](../data-types/special-data-types/interval.md).
**Syntax**
``` sql
toIntervalDay(n)
```
**Arguments**
- `n` — Number of days. Integer numbers or string representations thereof, and float numbers. [(U)Int*](../data-types/int-uint.md)/[Float*](../data-types/float.md)/[String](../data-types/string.md).
**Returned values**
- Interval of `n` days. [IntervalDay](../data-types/special-data-types/interval.md).
**Example**
Query:
``` sql
WITH
toDate('2024-06-15') AS date,
toIntervalDay(5) AS interval_to_days
SELECT date + interval_to_days AS result
```
Result:
```response
┌─────result─┐
│ 2024-06-20 │
└────────────┘
```
## toIntervalHour
Returns an interval of `n` hours of data type [IntervalHour](../data-types/special-data-types/interval.md).
**Syntax**
``` sql
toIntervalHour(n)
```
**Arguments**
- `n` — Number of hours. Integer numbers or string representations thereof, and float numbers. [(U)Int*](../data-types/int-uint.md)/[Float*](../data-types/float.md)/[String](../data-types/string.md).
**Returned values**
- Interval of `n` hours. [IntervalHour](../data-types/special-data-types/interval.md).
**Example**
Query:
``` sql
WITH
toDate('2024-06-15') AS date,
toIntervalHour(12) AS interval_to_hours
SELECT date + interval_to_hours AS result
```
Result:
```response
┌──────────────result─┐
│ 2024-06-15 12:00:00 │
└─────────────────────┘
```
## toIntervalMinute
Returns an interval of `n` minutes of data type [IntervalMinute](../data-types/special-data-types/interval.md).
**Syntax**
``` sql
toIntervalMinute(n)
```
**Arguments**
- `n` — Number of minutes. Integer numbers or string representations thereof, and float numbers. [(U)Int*](../data-types/int-uint.md)/[Float*](../data-types/float.md)/[String](../data-types/string.md).
**Returned values**
- Interval of `n` minutes. [IntervalMinute](../data-types/special-data-types/interval.md).
**Example**
Query:
``` sql
WITH
toDate('2024-06-15') AS date,
toIntervalMinute(12) AS interval_to_minutes
SELECT date + interval_to_minutes AS result
```
Result:
```response
┌──────────────result─┐
│ 2024-06-15 00:12:00 │
└─────────────────────┘
```
## toIntervalSecond
Returns an interval of `n` seconds of data type [IntervalSecond](../data-types/special-data-types/interval.md).
**Syntax**
``` sql
toIntervalSecond(n)
```
**Arguments**
- `n` — Number of seconds. Integer numbers or string representations thereof, and float numbers. [(U)Int*](../data-types/int-uint.md)/[Float*](../data-types/float.md)/[String](../data-types/string.md).
**Returned values**
- Interval of `n` seconds. [IntervalSecond](../data-types/special-data-types/interval.md).
**Example**
Query:
``` sql
WITH
toDate('2024-06-15') AS date,
toIntervalSecond(30) AS interval_to_seconds
SELECT date + interval_to_seconds AS result
```
Result:
```response
┌──────────────result─┐
│ 2024-06-15 00:00:30 │
└─────────────────────┘
```
## toIntervalMillisecond
Returns an interval of `n` milliseconds of data type [IntervalMillisecond](../data-types/special-data-types/interval.md).
**Syntax**
``` sql
toIntervalMillisecond(n)
```
**Arguments**
- `n` — Number of milliseconds. Integer numbers or string representations thereof, and float numbers. [(U)Int*](../data-types/int-uint.md)/[Float*](../data-types/float.md)/[String](../data-types/string.md).
**Returned values**
- Interval of `n` milliseconds. [IntervalMilliseconds](../data-types/special-data-types/interval.md).
**Example**
Query:
``` sql
WITH
toDateTime('2024-06-15') AS date,
toIntervalMillisecond(30) AS interval_to_milliseconds
SELECT date + interval_to_milliseconds AS result
```
Result:
```response
┌──────────────────result─┐
│ 2024-06-15 00:00:00.030 │
└─────────────────────────┘
```
## toIntervalMicrosecond
Returns an interval of `n` microseconds of data type [IntervalMicrosecond](../data-types/special-data-types/interval.md).
**Syntax**
``` sql
toIntervalMicrosecond(n)
```
**Arguments**
- `n` — Number of microseconds. Integer numbers or string representations thereof, and float numbers. [(U)Int*](../data-types/int-uint.md)/[Float*](../data-types/float.md)/[String](../data-types/string.md).
**Returned values**
- Interval of `n` microseconds. [IntervalMicrosecond](../data-types/special-data-types/interval.md).
**Example**
Query:
``` sql
WITH
toDateTime('2024-06-15') AS date,
toIntervalMicrosecond(30) AS interval_to_microseconds
SELECT date + interval_to_microseconds AS result
```
Result:
```response
┌─────────────────────result─┐
│ 2024-06-15 00:00:00.000030 │
└────────────────────────────┘
```
## toIntervalNanosecond
Returns an interval of `n` nanoseconds of data type [IntervalNanosecond](../data-types/special-data-types/interval.md).
**Syntax**
``` sql
toIntervalNanosecond(n)
```
**Arguments**
- `n` — Number of nanoseconds. Integer numbers or string representations thereof, and float numbers. [(U)Int*](../data-types/int-uint.md)/[Float*](../data-types/float.md)/[String](../data-types/string.md).
**Returned values**
- Interval of `n` nanoseconds. [IntervalNanosecond](../data-types/special-data-types/interval.md).
**Example**
Query:
``` sql
WITH
toDateTime('2024-06-15') AS date,
toIntervalNanosecond(30) AS interval_to_nanoseconds
SELECT date + interval_to_nanoseconds AS result
```
Result:
```response
┌────────────────────────result─┐
│ 2024-06-15 00:00:00.000000030 │
└───────────────────────────────┘
``` ```
## parseDateTime ## parseDateTime

View File

@ -13,8 +13,8 @@ Creates a new view. Views can be [normal](#normal-view), [materialized](#materia
Syntax: Syntax:
``` sql ``` sql
CREATE [OR REPLACE] VIEW [IF NOT EXISTS] [db.]table_name [ON CLUSTER cluster_name] CREATE [OR REPLACE] VIEW [IF NOT EXISTS] [db.]table_name [ON CLUSTER cluster_name]
[DEFINER = { user | CURRENT_USER }] [SQL SECURITY { DEFINER | INVOKER | NONE }] [DEFINER = { user | CURRENT_USER }] [SQL SECURITY { DEFINER | INVOKER | NONE }]
AS SELECT ... AS SELECT ...
[COMMENT 'comment'] [COMMENT 'comment']
``` ```
@ -55,8 +55,8 @@ SELECT * FROM view(column1=value1, column2=value2 ...)
## Materialized View ## Materialized View
``` sql ``` sql
CREATE MATERIALIZED VIEW [IF NOT EXISTS] [db.]table_name [ON CLUSTER] [TO[db.]name] [ENGINE = engine] [POPULATE] CREATE MATERIALIZED VIEW [IF NOT EXISTS] [db.]table_name [ON CLUSTER] [TO[db.]name] [ENGINE = engine] [POPULATE]
[DEFINER = { user | CURRENT_USER }] [SQL SECURITY { DEFINER | INVOKER | NONE }] [DEFINER = { user | CURRENT_USER }] [SQL SECURITY { DEFINER | INVOKER | NONE }]
AS SELECT ... AS SELECT ...
[COMMENT 'comment'] [COMMENT 'comment']
``` ```
@ -92,7 +92,7 @@ Given that `POPULATE` works like `CREATE TABLE ... AS SELECT ...` it has limitat
- It is not supported with Replicated database - It is not supported with Replicated database
- It is not supported in ClickHouse cloud - It is not supported in ClickHouse cloud
Instead a separate `INSERT ... SELECT` can be used. Instead a separate `INSERT ... SELECT` can be used.
::: :::
A `SELECT` query can contain `DISTINCT`, `GROUP BY`, `ORDER BY`, `LIMIT`. Note that the corresponding conversions are performed independently on each block of inserted data. For example, if `GROUP BY` is set, data is aggregated during insertion, but only within a single packet of inserted data. The data wont be further aggregated. The exception is when using an `ENGINE` that independently performs data aggregation, such as `SummingMergeTree`. A `SELECT` query can contain `DISTINCT`, `GROUP BY`, `ORDER BY`, `LIMIT`. Note that the corresponding conversions are performed independently on each block of inserted data. For example, if `GROUP BY` is set, data is aggregated during insertion, but only within a single packet of inserted data. The data wont be further aggregated. The exception is when using an `ENGINE` that independently performs data aggregation, such as `SummingMergeTree`.
@ -110,7 +110,7 @@ To delete a view, use [DROP VIEW](../../../sql-reference/statements/drop.md#drop
`DEFINER` and `SQL SECURITY` allow you to specify which ClickHouse user to use when executing the view's underlying query. `DEFINER` and `SQL SECURITY` allow you to specify which ClickHouse user to use when executing the view's underlying query.
`SQL SECURITY` has three legal values: `DEFINER`, `INVOKER`, or `NONE`. You can specify any existing user or `CURRENT_USER` in the `DEFINER` clause. `SQL SECURITY` has three legal values: `DEFINER`, `INVOKER`, or `NONE`. You can specify any existing user or `CURRENT_USER` in the `DEFINER` clause.
The following table will explain which rights are required for which user in order to select from view. The following table will explain which rights are required for which user in order to select from view.
Note that regardless of the SQL security option, in every case it is still required to have `GRANT SELECT ON <view>` in order to read from it. Note that regardless of the SQL security option, in every case it is still required to have `GRANT SELECT ON <view>` in order to read from it.
| SQL security option | View | Materialized View | | SQL security option | View | Materialized View |
@ -130,7 +130,7 @@ If `DEFINER`/`SQL SECURITY` aren't specified, the default values are used:
If a view is attached without `DEFINER`/`SQL SECURITY` specified, the default value is `SQL SECURITY NONE` for the materialized view and `SQL SECURITY INVOKER` for the normal view. If a view is attached without `DEFINER`/`SQL SECURITY` specified, the default value is `SQL SECURITY NONE` for the materialized view and `SQL SECURITY INVOKER` for the normal view.
To change SQL security for an existing view, use To change SQL security for an existing view, use
```sql ```sql
ALTER TABLE MODIFY SQL SECURITY { DEFINER | INVOKER | NONE } [DEFINER = { user | CURRENT_USER }] ALTER TABLE MODIFY SQL SECURITY { DEFINER | INVOKER | NONE } [DEFINER = { user | CURRENT_USER }]
``` ```
@ -161,6 +161,8 @@ CREATE MATERIALIZED VIEW [IF NOT EXISTS] [db.]table_name
REFRESH EVERY|AFTER interval [OFFSET interval] REFRESH EVERY|AFTER interval [OFFSET interval]
RANDOMIZE FOR interval RANDOMIZE FOR interval
DEPENDS ON [db.]name [, [db.]name [, ...]] DEPENDS ON [db.]name [, [db.]name [, ...]]
SETTINGS name = value [, name = value [, ...]]
[APPEND]
[TO[db.]name] [(columns)] [ENGINE = engine] [EMPTY] [TO[db.]name] [(columns)] [ENGINE = engine] [EMPTY]
AS SELECT ... AS SELECT ...
[COMMENT 'comment'] [COMMENT 'comment']
@ -170,18 +172,23 @@ where `interval` is a sequence of simple intervals:
number SECOND|MINUTE|HOUR|DAY|WEEK|MONTH|YEAR number SECOND|MINUTE|HOUR|DAY|WEEK|MONTH|YEAR
``` ```
Periodically runs the corresponding query and stores its result in a table, atomically replacing the table's previous contents. Periodically runs the corresponding query and stores its result in a table.
* If the query says `APPEND`, each refresh inserts rows into the table without deleting existing rows. The insert is not atomic, just like a regular INSERT SELECT.
* Otherwise each refresh atomically replaces the table's previous contents.
Differences from regular non-refreshable materialized views: Differences from regular non-refreshable materialized views:
* No insert trigger. I.e. when new data is inserted into the table specified in SELECT, it's *not* automatically pushed to the refreshable materialized view. The periodic refresh runs the entire query and replaces the entire table. * No insert trigger. I.e. when new data is inserted into the table specified in SELECT, it's *not* automatically pushed to the refreshable materialized view. The periodic refresh runs the entire query.
* No restrictions on the SELECT query. Table functions (e.g. `url()`), views, UNION, JOIN, are all allowed. * No restrictions on the SELECT query. Table functions (e.g. `url()`), views, UNION, JOIN, are all allowed.
:::note
The settings in the `REFRESH ... SETTINGS` part of the query are refresh settings (e.g. `refresh_retries`), distinct from regular settings (e.g. `max_threads`). Regular settings can be specified using `SETTINGS` at the end of the query.
:::
:::note :::note
Refreshable materialized views are a work in progress. Setting `allow_experimental_refreshable_materialized_view = 1` is required for creating one. Current limitations: Refreshable materialized views are a work in progress. Setting `allow_experimental_refreshable_materialized_view = 1` is required for creating one. Current limitations:
* not compatible with Replicated database or table engines * not compatible with Replicated database or table engines
* It is not supported in ClickHouse Cloud * It is not supported in ClickHouse Cloud
* require [Atomic database engine](../../../engines/database-engines/atomic.md), * require [Atomic database engine](../../../engines/database-engines/atomic.md),
* no retries for failed refresh - we just skip to the next scheduled refresh time,
* no limit on number of concurrent refreshes. * no limit on number of concurrent refreshes.
::: :::
@ -246,15 +253,22 @@ A few more examples:
`DEPENDS ON` only works between refreshable materialized views. Listing a regular table in the `DEPENDS ON` list will prevent the view from ever refreshing (dependencies can be removed with `ALTER`, see below). `DEPENDS ON` only works between refreshable materialized views. Listing a regular table in the `DEPENDS ON` list will prevent the view from ever refreshing (dependencies can be removed with `ALTER`, see below).
::: :::
### Settings
Available refresh settings:
* `refresh_retries` - How many times to retry if refresh query fails with an exception. If all retries fail, skip to the next scheduled refresh time. 0 means no retries, -1 means infinite retries. Default: 0.
* `refresh_retry_initial_backoff_ms` - Delay before the first retry, if `refresh_retries` is not zero. Each subsequent retry doubles the delay, up to `refresh_retry_max_backoff_ms`. Default: 100 ms.
* `refresh_retry_max_backoff_ms` - Limit on the exponential growth of delay between refresh attempts. Default: 60000 ms (1 minute).
### Changing Refresh Parameters {#changing-refresh-parameters} ### Changing Refresh Parameters {#changing-refresh-parameters}
To change refresh parameters: To change refresh parameters:
``` ```
ALTER TABLE [db.]name MODIFY REFRESH EVERY|AFTER ... [RANDOMIZE FOR ...] [DEPENDS ON ...] ALTER TABLE [db.]name MODIFY REFRESH EVERY|AFTER ... [RANDOMIZE FOR ...] [DEPENDS ON ...] [SETTINGS ...]
``` ```
:::note :::note
This replaces refresh schedule *and* dependencies. If the table had a `DEPENDS ON`, doing a `MODIFY REFRESH` without `DEPENDS ON` will remove the dependencies. This replaces *all* refresh parameters at once: schedule, dependencies, settings, and APPEND-ness. E.g. if the table had a `DEPENDS ON`, doing a `MODIFY REFRESH` without `DEPENDS ON` will remove the dependencies.
::: :::
### Other operations ### Other operations
@ -263,6 +277,10 @@ The status of all refreshable materialized views is available in table [`system.
To manually stop, start, trigger, or cancel refreshes use [`SYSTEM STOP|START|REFRESH|CANCEL VIEW`](../system.md#refreshable-materialized-views). To manually stop, start, trigger, or cancel refreshes use [`SYSTEM STOP|START|REFRESH|CANCEL VIEW`](../system.md#refreshable-materialized-views).
:::note
Fun fact: the refresh query is allowed to read from the view that's being refreshed, seeing pre-refresh version of the data. This means you can implement Conway's game of life: https://pastila.nl/?00021a4b/d6156ff819c83d490ad2dcec05676865#O0LGWTO7maUQIA4AcGUtlA==
:::
## Window View [Experimental] ## Window View [Experimental]
:::info :::info

View File

@ -38,8 +38,7 @@ If you anticipate frequent deletes, consider using a [custom partitioning key](/
### Lightweight `DELETE`s with projections ### Lightweight `DELETE`s with projections
By default, `DELETE` does not work for tables with projections. This is because rows in a projection may be affected by a `DELETE` operation and may require the projection to be rebuilt, negatively affecting `DELETE` performance. By default, `DELETE` does not work for tables with projections. This is because rows in a projection may be affected by a `DELETE` operation. But there is a [MergeTree setting](https://clickhouse.com/docs/en/operations/settings/merge-tree-settings) `lightweight_mutation_projection_mode` can change the behavior.
However, there is an option to change this behavior. By changing setting `lightweight_mutation_projection_mode = 'drop'`, deletes will work with projections.
## Performance considerations when using lightweight `DELETE` ## Performance considerations when using lightweight `DELETE`

View File

@ -200,6 +200,7 @@ Hierarchy of privileges:
- `JDBC` - `JDBC`
- `HDFS` - `HDFS`
- `S3` - `S3`
- `POSTGRES`
- [dictGet](#dictget) - [dictGet](#dictget)
- [displaySecretsInShowAndSelect](#displaysecretsinshowandselect) - [displaySecretsInShowAndSelect](#displaysecretsinshowandselect)
- [NAMED COLLECTION ADMIN](#named-collection-admin) - [NAMED COLLECTION ADMIN](#named-collection-admin)
@ -476,6 +477,7 @@ Allows using external data sources. Applies to [table engines](../../engines/tab
- `JDBC`. Level: `GLOBAL` - `JDBC`. Level: `GLOBAL`
- `HDFS`. Level: `GLOBAL` - `HDFS`. Level: `GLOBAL`
- `S3`. Level: `GLOBAL` - `S3`. Level: `GLOBAL`
- `POSTGRES`. Level: `GLOBAL`
The `SOURCES` privilege enables use of all the sources. Also you can grant a privilege for each source individually. To use sources, you need additional privileges. The `SOURCES` privilege enables use of all the sources. Also you can grant a privilege for each source individually. To use sources, you need additional privileges.

View File

@ -400,7 +400,7 @@ SYSTEM SYNC REPLICA [ON CLUSTER cluster_name] [db.]replicated_merge_tree_family_
After running this statement the `[db.]replicated_merge_tree_family_table_name` fetches commands from the common replicated log into its own replication queue, and then the query waits till the replica processes all of the fetched commands. The following modifiers are supported: After running this statement the `[db.]replicated_merge_tree_family_table_name` fetches commands from the common replicated log into its own replication queue, and then the query waits till the replica processes all of the fetched commands. The following modifiers are supported:
- If a `STRICT` modifier was specified then the query waits for the replication queue to become empty. The `STRICT` version may never succeed if new entries constantly appear in the replication queue. - If a `STRICT` modifier was specified then the query waits for the replication queue to become empty. The `STRICT` version may never succeed if new entries constantly appear in the replication queue.
- If a `LIGHTWEIGHT` modifier was specified then the query waits only for `GET_PART`, `ATTACH_PART`, `DROP_RANGE`, `REPLACE_RANGE` and `DROP_PART` entries to be processed. - If a `LIGHTWEIGHT` modifier was specified then the query waits only for `GET_PART`, `ATTACH_PART`, `DROP_RANGE`, `REPLACE_RANGE` and `DROP_PART` entries to be processed.
Additionally, the LIGHTWEIGHT modifier supports an optional FROM 'srcReplicas' clause, where 'srcReplicas' is a comma-separated list of source replica names. This extension allows for more targeted synchronization by focusing only on replication tasks originating from the specified source replicas. Additionally, the LIGHTWEIGHT modifier supports an optional FROM 'srcReplicas' clause, where 'srcReplicas' is a comma-separated list of source replica names. This extension allows for more targeted synchronization by focusing only on replication tasks originating from the specified source replicas.
- If a `PULL` modifier was specified then the query pulls new replication queue entries from ZooKeeper, but does not wait for anything to be processed. - If a `PULL` modifier was specified then the query pulls new replication queue entries from ZooKeeper, but does not wait for anything to be processed.
@ -526,6 +526,10 @@ Trigger an immediate out-of-schedule refresh of a given view.
SYSTEM REFRESH VIEW [db.]name SYSTEM REFRESH VIEW [db.]name
``` ```
### REFRESH VIEW
Wait for the currently running refresh to complete. If the refresh fails, throws an exception. If no refresh is running, completes immediately, throwing an exception if previous refresh failed.
### STOP VIEW, STOP VIEWS ### STOP VIEW, STOP VIEWS
Disable periodic refreshing of the given view or all refreshable views. If a refresh is in progress, cancel it too. Disable periodic refreshing of the given view or all refreshable views. If a refresh is in progress, cancel it too.

View File

@ -103,7 +103,7 @@ LIMIT 2;
└─────────┴─────────┴─────────┘ └─────────┴─────────┴─────────┘
``` ```
### Inserting data from a file into a table: ### Inserting data from a file into a table
``` sql ``` sql
INSERT INTO FUNCTION INSERT INTO FUNCTION

View File

@ -116,7 +116,7 @@ SELECT * from HDFS('hdfs://hdfs1:9000/data/path/date=*/country=*/code=*/*.parque
## Storage Settings {#storage-settings} ## Storage Settings {#storage-settings}
- [hdfs_truncate_on_insert](/docs/en/operations/settings/settings.md#hdfs_truncate_on_insert) - allows to truncate file before insert into it. Disabled by default. - [hdfs_truncate_on_insert](/docs/en/operations/settings/settings.md#hdfs_truncate_on_insert) - allows to truncate file before insert into it. Disabled by default.
- [hdfs_create_multiple_files](/docs/en/operations/settings/settings.md#hdfs_allow_create_multiple_files) - allows to create a new file on each insert if format has suffix. Disabled by default. - [hdfs_create_new_file_on_insert](/docs/en/operations/settings/settings.md#hdfs_create_new_file_on_insert) - allows to create a new file on each insert if format has suffix. Disabled by default.
- [hdfs_skip_empty_files](/docs/en/operations/settings/settings.md#hdfs_skip_empty_files) - allows to skip empty files while reading. Disabled by default. - [hdfs_skip_empty_files](/docs/en/operations/settings/settings.md#hdfs_skip_empty_files) - allows to skip empty files while reading. Disabled by default.
- [ignore_access_denied_multidirectory_globs](/docs/en/operations/settings/settings.md#ignore_access_denied_multidirectory_globs) - allows to ignore permission denied errors for multi-directory globs. - [ignore_access_denied_multidirectory_globs](/docs/en/operations/settings/settings.md#ignore_access_denied_multidirectory_globs) - allows to ignore permission denied errors for multi-directory globs.

View File

@ -290,7 +290,7 @@ SELECT * from s3('s3://data/path/date=*/country=*/code=*/*.parquet') where _date
## Storage Settings {#storage-settings} ## Storage Settings {#storage-settings}
- [s3_truncate_on_insert](/docs/en/operations/settings/settings.md#s3_truncate_on_insert) - allows to truncate file before insert into it. Disabled by default. - [s3_truncate_on_insert](/docs/en/operations/settings/settings.md#s3_truncate_on_insert) - allows to truncate file before insert into it. Disabled by default.
- [s3_create_multiple_files](/docs/en/operations/settings/settings.md#s3_allow_create_multiple_files) - allows to create a new file on each insert if format has suffix. Disabled by default. - [s3_create_new_file_on_insert](/docs/en/operations/settings/settings.md#s3_create_new_file_on_insert) - allows to create a new file on each insert if format has suffix. Disabled by default.
- [s3_skip_empty_files](/docs/en/operations/settings/settings.md#s3_skip_empty_files) - allows to skip empty files while reading. Disabled by default. - [s3_skip_empty_files](/docs/en/operations/settings/settings.md#s3_skip_empty_files) - allows to skip empty files while reading. Disabled by default.
**See Also** **See Also**

View File

@ -146,7 +146,30 @@ SELECT dictGet('dict', 'B', 2);
## Пример использования именованных соединений с базой данных PostgreSQL ## Пример использования именованных соединений с базой данных PostgreSQL
Описание параметров смотрите [postgresql](../sql-reference/table-functions/postgresql.md). Описание параметров смотрите [postgresql](../sql-reference/table-functions/postgresql.md). Дополнительно есть алиасы:
- `username` для `user`
- `db` для `database`.
Параметр `addresses_expr` используется в коллекции вместо `host:port`. Параметр опционален, потому что есть так же другие: `host`, `hostname`, `port`. Следующий псевдокод показывает приоритет:
```sql
CASE
WHEN collection['addresses_expr'] != '' THEN collection['addresses_expr']
WHEN collection['host'] != '' THEN collection['host'] || ':' || if(collection['port'] != '', collection['port'], '5432')
WHEN collection['hostname'] != '' THEN collection['hostname'] || ':' || if(collection['port'] != '', collection['port'], '5432')
END
```
Пример создания:
```sql
CREATE NAMED COLLECTION mypg AS
user = 'pguser',
password = 'jw8s0F4',
host = '127.0.0.1',
port = 5432,
database = 'test',
schema = 'test_schema'
```
Пример конфигурации: Пример конфигурации:
```xml ```xml
@ -199,6 +222,10 @@ SELECT * FROM mypgtable;
└───┘ └───┘
``` ```
:::note
PostgreSQL копирует данные из named collection при создании таблицы. Изменения в коллекции не влияют на существующие таблицы.
:::
### Пример использования именованных соединений базой данных с движком PostgreSQL ### Пример использования именованных соединений базой данных с движком PostgreSQL
```sql ```sql

View File

@ -6,6 +6,7 @@ macro(configure_bash_completion)
COMMAND ${PKG_CONFIG_BIN} --variable=completionsdir bash-completion COMMAND ${PKG_CONFIG_BIN} --variable=completionsdir bash-completion
OUTPUT_VARIABLE ${out} OUTPUT_VARIABLE ${out}
OUTPUT_STRIP_TRAILING_WHITESPACE OUTPUT_STRIP_TRAILING_WHITESPACE
COMMAND_ERROR_IS_FATAL ANY
) )
endif() endif()
string(REPLACE /usr "${CMAKE_INSTALL_PREFIX}" out "${out}") string(REPLACE /usr "${CMAKE_INSTALL_PREFIX}" out "${out}")

View File

@ -75,6 +75,8 @@ public:
const String & default_database_, const String & default_database_,
const String & user_, const String & user_,
const String & password_, const String & password_,
const String & proto_send_chunked_,
const String & proto_recv_chunked_,
const String & quota_key_, const String & quota_key_,
const String & stage, const String & stage,
bool randomize_, bool randomize_,
@ -128,7 +130,9 @@ public:
connections.emplace_back(std::make_unique<ConnectionPool>( connections.emplace_back(std::make_unique<ConnectionPool>(
concurrency, concurrency,
cur_host, cur_port, cur_host, cur_port,
default_database_, user_, password_, quota_key_, default_database_, user_, password_,
proto_send_chunked_, proto_recv_chunked_,
quota_key_,
/* cluster_= */ "", /* cluster_= */ "",
/* cluster_secret_= */ "", /* cluster_secret_= */ "",
/* client_name_= */ std::string(DEFAULT_CLIENT_NAME), /* client_name_= */ std::string(DEFAULT_CLIENT_NAME),
@ -662,6 +666,50 @@ int mainEntryClickHouseBenchmark(int argc, char ** argv)
Strings hosts = options.count("host") ? options["host"].as<Strings>() : Strings({"localhost"}); Strings hosts = options.count("host") ? options["host"].as<Strings>() : Strings({"localhost"});
String proto_send_chunked {"notchunked"};
String proto_recv_chunked {"notchunked"};
if (options.count("proto_caps"))
{
std::string proto_caps_str = options["proto_caps"].as<std::string>();
std::vector<std::string_view> proto_caps;
splitInto<','>(proto_caps, proto_caps_str);
for (auto cap_str : proto_caps)
{
std::string direction;
if (cap_str.starts_with("send_"))
{
direction = "send";
cap_str = cap_str.substr(std::string_view("send_").size());
}
else if (cap_str.starts_with("recv_"))
{
direction = "recv";
cap_str = cap_str.substr(std::string_view("recv_").size());
}
if (cap_str != "chunked" && cap_str != "notchunked" && cap_str != "chunked_optional" && cap_str != "notchunked_optional")
throw Exception(ErrorCodes::BAD_ARGUMENTS, "proto_caps option is incorrect ({})", proto_caps_str);
if (direction.empty())
{
proto_send_chunked = cap_str;
proto_recv_chunked = cap_str;
}
else
{
if (direction == "send")
proto_send_chunked = cap_str;
else
proto_recv_chunked = cap_str;
}
}
}
Benchmark benchmark( Benchmark benchmark(
options["concurrency"].as<unsigned>(), options["concurrency"].as<unsigned>(),
options["delay"].as<double>(), options["delay"].as<double>(),
@ -673,6 +721,8 @@ int mainEntryClickHouseBenchmark(int argc, char ** argv)
options["database"].as<std::string>(), options["database"].as<std::string>(),
options["user"].as<std::string>(), options["user"].as<std::string>(),
options["password"].as<std::string>(), options["password"].as<std::string>(),
proto_send_chunked,
proto_recv_chunked,
options["quota_key"].as<std::string>(), options["quota_key"].as<std::string>(),
options["stage"].as<std::string>(), options["stage"].as<std::string>(),
options.count("randomize"), options.count("randomize"),

View File

@ -38,6 +38,24 @@
<production>{display_name} \e[1;31m:)\e[0m </production> <!-- if it matched to the substring "production" in the server display name --> <production>{display_name} \e[1;31m:)\e[0m </production> <!-- if it matched to the substring "production" in the server display name -->
</prompt_by_server_display_name> </prompt_by_server_display_name>
<!-- Chunked capabilities for native protocol by client.
Can be enabled separately for send and receive channels.
Supported modes:
- chunked - client will only work with server supporting chunked protocol;
- chunked_optional - client prefer server to enable chunked protocol, but can switch to notchunked if server does not support this;
- notchunked - client will only work with server supporting notchunked protocol (current default);
- notchunked_optional - client prefer server notchunked protocol, but can switch to chunked if server does not support this.
-->
<!--
<proto_caps>
<send>chunked_optional</send>
<recv>chunked_optional</recv>
</proto_caps>
-->
<!-- Do not send clickhouse-client to background on C-z -->
<!-- <ignore_shell_suspend>true</ignore_shell_suspend> -->
<!-- <!--
Settings adjustable via command-line parameters Settings adjustable via command-line parameters
can take their defaults from that config file, see examples: can take their defaults from that config file, see examples:

View File

@ -247,6 +247,7 @@ void DisksApp::runInteractiveReplxx()
suggest, suggest,
history_file, history_file,
/* multiline= */ false, /* multiline= */ false,
/* ignore_shell_suspend= */ false,
query_extenders, query_extenders,
query_delimiters, query_delimiters,
word_break_characters.c_str(), word_break_characters.c_str(),

View File

@ -2,7 +2,7 @@
#include <unordered_map> #include <unordered_map>
#include <vector> #include <vector>
#include <Client/ReplxxLineReader.h> #include <Client/LineReader.h>
#include <Loggers/Loggers.h> #include <Loggers/Loggers.h>
#include "DisksClient.h" #include "DisksClient.h"
#include "ICommand_fwd.h" #include "ICommand_fwd.h"

View File

@ -1,6 +1,5 @@
#include "DisksClient.h" #include "DisksClient.h"
#include <Client/ClientBase.h> #include <Client/ClientBase.h>
#include <Client/ReplxxLineReader.h>
#include <Disks/registerDisks.h> #include <Disks/registerDisks.h>
#include <Common/Config/ConfigProcessor.h> #include <Common/Config/ConfigProcessor.h>

View File

@ -5,9 +5,8 @@
#include <string> #include <string>
#include <unordered_map> #include <unordered_map>
#include <vector> #include <vector>
#include <Client/ReplxxLineReader.h>
#include <Loggers/Loggers.h> #include <Loggers/Loggers.h>
#include "Disks/IDisk.h" #include <Disks/IDisk.h>
#include <Interpreters/Context.h> #include <Interpreters/Context.h>
#include <boost/program_options/options_description.hpp> #include <boost/program_options/options_description.hpp>

View File

@ -175,6 +175,11 @@ int mainEntryClickHouseFormat(int argc, char ** argv)
hash_func.update(options["seed"].as<std::string>()); hash_func.update(options["seed"].as<std::string>());
} }
SharedContextHolder shared_context = Context::createShared();
auto context = Context::createGlobal(shared_context.get());
auto context_const = WithContext(context).getContext();
context->makeGlobalContext();
registerInterpreters(); registerInterpreters();
registerFunctions(); registerFunctions();
registerAggregateFunctions(); registerAggregateFunctions();
@ -259,7 +264,11 @@ int mainEntryClickHouseFormat(int argc, char ** argv)
if (!backslash) if (!backslash)
{ {
WriteBufferFromOwnString str_buf; WriteBufferFromOwnString str_buf;
formatAST(*res, str_buf, hilite, oneline || approx_query_length < max_line_length); bool oneline_current_query = oneline || approx_query_length < max_line_length;
IAST::FormatSettings settings(str_buf, oneline_current_query, hilite);
settings.show_secrets = true;
settings.print_pretty_type_names = !oneline_current_query;
res->format(settings);
if (insert_query_payload) if (insert_query_payload)
{ {
@ -302,7 +311,11 @@ int mainEntryClickHouseFormat(int argc, char ** argv)
else else
{ {
WriteBufferFromOwnString str_buf; WriteBufferFromOwnString str_buf;
formatAST(*res, str_buf, hilite, oneline); bool oneline_current_query = oneline || approx_query_length < max_line_length;
IAST::FormatSettings settings(str_buf, oneline_current_query, hilite);
settings.show_secrets = true;
settings.print_pretty_type_names = !oneline_current_query;
res->format(settings);
auto res_string = str_buf.str(); auto res_string = str_buf.str();
WriteBufferFromOStream res_cout(std::cout, 4096); WriteBufferFromOStream res_cout(std::cout, 4096);

View File

@ -314,6 +314,7 @@ void KeeperClient::runInteractiveReplxx()
suggest, suggest,
history_file, history_file,
/* multiline= */ false, /* multiline= */ false,
/* ignore_shell_suspend= */ false,
query_extenders, query_extenders,
query_delimiters, query_delimiters,
word_break_characters, word_break_characters,

View File

@ -66,6 +66,8 @@
/// A minimal file used when the keeper is run without installation /// A minimal file used when the keeper is run without installation
INCBIN(keeper_resource_embedded_xml, SOURCE_DIR "/programs/keeper/keeper_embedded.xml"); INCBIN(keeper_resource_embedded_xml, SOURCE_DIR "/programs/keeper/keeper_embedded.xml");
extern const char * GIT_HASH;
int mainEntryClickHouseKeeper(int argc, char ** argv) int mainEntryClickHouseKeeper(int argc, char ** argv)
{ {
DB::Keeper app; DB::Keeper app;
@ -675,7 +677,7 @@ void Keeper::logRevision() const
"Starting ClickHouse Keeper {} (revision: {}, git hash: {}, build id: {}), PID {}", "Starting ClickHouse Keeper {} (revision: {}, git hash: {}, build id: {}), PID {}",
VERSION_STRING, VERSION_STRING,
ClickHouseRevision::getVersionRevision(), ClickHouseRevision::getVersionRevision(),
git_hash.empty() ? "<unknown>" : git_hash, GIT_HASH,
build_id.empty() ? "<unknown>" : build_id, build_id.empty() ? "<unknown>" : build_id,
getpid()); getpid());
} }

View File

@ -367,7 +367,7 @@ std::string LocalServer::getInitialCreateTableQuery()
else else
table_structure = "(" + table_structure + ")"; table_structure = "(" + table_structure + ")";
return fmt::format("CREATE TABLE {} {} ENGINE = File({}, {});", return fmt::format("CREATE TEMPORARY TABLE {} {} ENGINE = File({}, {});",
table_name, table_structure, data_format, table_file); table_name, table_structure, data_format, table_file);
} }
@ -713,7 +713,7 @@ void LocalServer::processConfig()
if (index_uncompressed_cache_size > max_cache_size) if (index_uncompressed_cache_size > max_cache_size)
{ {
index_uncompressed_cache_size = max_cache_size; index_uncompressed_cache_size = max_cache_size;
LOG_INFO(log, "Lowered index uncompressed cache size to {} because the system has limited RAM", formatReadableSizeWithBinarySuffix(uncompressed_cache_size)); LOG_INFO(log, "Lowered index uncompressed cache size to {} because the system has limited RAM", formatReadableSizeWithBinarySuffix(index_uncompressed_cache_size));
} }
global_context->setIndexUncompressedCache(index_uncompressed_cache_policy, index_uncompressed_cache_size, index_uncompressed_cache_size_ratio); global_context->setIndexUncompressedCache(index_uncompressed_cache_policy, index_uncompressed_cache_size, index_uncompressed_cache_size_ratio);
@ -723,7 +723,7 @@ void LocalServer::processConfig()
if (index_mark_cache_size > max_cache_size) if (index_mark_cache_size > max_cache_size)
{ {
index_mark_cache_size = max_cache_size; index_mark_cache_size = max_cache_size;
LOG_INFO(log, "Lowered index mark cache size to {} because the system has limited RAM", formatReadableSizeWithBinarySuffix(uncompressed_cache_size)); LOG_INFO(log, "Lowered index mark cache size to {} because the system has limited RAM", formatReadableSizeWithBinarySuffix(index_mark_cache_size));
} }
global_context->setIndexMarkCache(index_mark_cache_policy, index_mark_cache_size, index_mark_cache_size_ratio); global_context->setIndexMarkCache(index_mark_cache_policy, index_mark_cache_size, index_mark_cache_size_ratio);
@ -731,7 +731,7 @@ void LocalServer::processConfig()
if (mmap_cache_size > max_cache_size) if (mmap_cache_size > max_cache_size)
{ {
mmap_cache_size = max_cache_size; mmap_cache_size = max_cache_size;
LOG_INFO(log, "Lowered mmap file cache size to {} because the system has limited RAM", formatReadableSizeWithBinarySuffix(uncompressed_cache_size)); LOG_INFO(log, "Lowered mmap file cache size to {} because the system has limited RAM", formatReadableSizeWithBinarySuffix(mmap_cache_size));
} }
global_context->setMMappedFileCache(mmap_cache_size); global_context->setMMappedFileCache(mmap_cache_size);

View File

@ -1420,7 +1420,7 @@ try
if (index_uncompressed_cache_size > max_cache_size) if (index_uncompressed_cache_size > max_cache_size)
{ {
index_uncompressed_cache_size = max_cache_size; index_uncompressed_cache_size = max_cache_size;
LOG_INFO(log, "Lowered index uncompressed cache size to {} because the system has limited RAM", formatReadableSizeWithBinarySuffix(uncompressed_cache_size)); LOG_INFO(log, "Lowered index uncompressed cache size to {} because the system has limited RAM", formatReadableSizeWithBinarySuffix(index_uncompressed_cache_size));
} }
global_context->setIndexUncompressedCache(index_uncompressed_cache_policy, index_uncompressed_cache_size, index_uncompressed_cache_size_ratio); global_context->setIndexUncompressedCache(index_uncompressed_cache_policy, index_uncompressed_cache_size, index_uncompressed_cache_size_ratio);
@ -1430,7 +1430,7 @@ try
if (index_mark_cache_size > max_cache_size) if (index_mark_cache_size > max_cache_size)
{ {
index_mark_cache_size = max_cache_size; index_mark_cache_size = max_cache_size;
LOG_INFO(log, "Lowered index mark cache size to {} because the system has limited RAM", formatReadableSizeWithBinarySuffix(uncompressed_cache_size)); LOG_INFO(log, "Lowered index mark cache size to {} because the system has limited RAM", formatReadableSizeWithBinarySuffix(index_mark_cache_size));
} }
global_context->setIndexMarkCache(index_mark_cache_policy, index_mark_cache_size, index_mark_cache_size_ratio); global_context->setIndexMarkCache(index_mark_cache_policy, index_mark_cache_size, index_mark_cache_size_ratio);
@ -1438,7 +1438,7 @@ try
if (mmap_cache_size > max_cache_size) if (mmap_cache_size > max_cache_size)
{ {
mmap_cache_size = max_cache_size; mmap_cache_size = max_cache_size;
LOG_INFO(log, "Lowered mmap file cache size to {} because the system has limited RAM", formatReadableSizeWithBinarySuffix(uncompressed_cache_size)); LOG_INFO(log, "Lowered mmap file cache size to {} because the system has limited RAM", formatReadableSizeWithBinarySuffix(mmap_cache_size));
} }
global_context->setMMappedFileCache(mmap_cache_size); global_context->setMMappedFileCache(mmap_cache_size);
@ -1449,7 +1449,7 @@ try
if (query_cache_max_size_in_bytes > max_cache_size) if (query_cache_max_size_in_bytes > max_cache_size)
{ {
query_cache_max_size_in_bytes = max_cache_size; query_cache_max_size_in_bytes = max_cache_size;
LOG_INFO(log, "Lowered query cache size to {} because the system has limited RAM", formatReadableSizeWithBinarySuffix(uncompressed_cache_size)); LOG_INFO(log, "Lowered query cache size to {} because the system has limited RAM", formatReadableSizeWithBinarySuffix(query_cache_max_size_in_bytes));
} }
global_context->setQueryCache(query_cache_max_size_in_bytes, query_cache_max_entries, query_cache_query_cache_max_entry_size_in_bytes, query_cache_max_entry_size_in_rows); global_context->setQueryCache(query_cache_max_size_in_bytes, query_cache_max_entries, query_cache_query_cache_max_entry_size_in_bytes, query_cache_max_entry_size_in_rows);
@ -1769,6 +1769,8 @@ try
new_server_settings.http_connections_store_limit, new_server_settings.http_connections_store_limit,
}); });
DNSResolver::instance().setFilterSettings(new_server_settings.dns_allow_resolve_names_to_ipv4, new_server_settings.dns_allow_resolve_names_to_ipv6);
if (global_context->isServerCompletelyStarted()) if (global_context->isServerCompletelyStarted())
CannotAllocateThreadFaultInjector::setFaultProbability(new_server_settings.cannot_allocate_thread_fault_injection_probability); CannotAllocateThreadFaultInjector::setFaultProbability(new_server_settings.cannot_allocate_thread_fault_injection_probability);

View File

@ -150,6 +150,21 @@
--> -->
<tcp_port>9000</tcp_port> <tcp_port>9000</tcp_port>
<!-- Chunked capabilities for native protocol by server.
Can be enabled separately for send and receive channels.
Supported modes:
- chunked - server requires from client to have chunked enabled;
- chunked_optional - server supports both chunked and notchunked protocol;
- notchunked - server requires from client notchunked protocol (current default);
- notchunked_optional - server supports both chunked and notchunked protocol.
-->
<!--
<proto_caps>
<send>notchunked_optional</send>
<recv>notchunked_optional</recv>
</proto_caps>
-->
<!-- Compatibility with MySQL protocol. <!-- Compatibility with MySQL protocol.
ClickHouse will pretend to be MySQL for applications connecting to this port. ClickHouse will pretend to be MySQL for applications connecting to this port.
--> -->

View File

@ -93,7 +93,7 @@ namespace
break; break;
} }
UUID id = parse<UUID>(line); UUID id = parse<UUID>(line.substr(0, line.find('\t')));
line.clear(); line.clear();
String queries; String queries;

View File

@ -82,7 +82,8 @@ public:
Result authenticate(const String & user_name, const String & password) const Result authenticate(const String & user_name, const String & password) const
{ {
Poco::Net::HTTPRequest request{Poco::Net::HTTPRequest::HTTP_GET, this->getURI().getPathAndQuery()}; Poco::Net::HTTPRequest request{
Poco::Net::HTTPRequest::HTTP_GET, this->getURI().getPathAndQuery(), Poco::Net::HTTPRequest::HTTP_1_1};
Poco::Net::HTTPBasicCredentials basic_credentials{user_name, password}; Poco::Net::HTTPBasicCredentials basic_credentials{user_name, password};
basic_credentials.authenticate(request); basic_credentials.authenticate(request);

View File

@ -114,7 +114,7 @@ private:
{ {
if (ind < first.size()) if (ind < first.size())
return first[ind]; return first[ind];
return second[ind % first.size()]; return second[ind - first.size()];
} }
size_t size() const size_t size() const

View File

@ -242,7 +242,8 @@ ASTPtr FunctionNode::toASTImpl(const ConvertToASTOptions & options) const
/// Avoid cast for `IN tuple(...)` expression. /// Avoid cast for `IN tuple(...)` expression.
/// Tuples could be quite big, and adding a type may significantly increase query size. /// Tuples could be quite big, and adding a type may significantly increase query size.
/// It should be safe because set type for `column IN tuple` is deduced from `column` type. /// It should be safe because set type for `column IN tuple` is deduced from `column` type.
if (isNameOfInFunction(function_name) && argument_nodes.size() > 1 && argument_nodes[1]->getNodeType() == QueryTreeNodeType::CONSTANT) if (isNameOfInFunction(function_name) && argument_nodes.size() > 1 && argument_nodes[1]->getNodeType() == QueryTreeNodeType::CONSTANT
&& !static_cast<const ConstantNode *>(argument_nodes[1].get())->hasSourceExpression())
new_options.add_cast_for_constants = false; new_options.add_cast_for_constants = false;
const auto & parameters = getParameters(); const auto & parameters = getParameters();

View File

@ -1,5 +1,5 @@
#include <DataTypes/DataTypeString.h> #include <DataTypes/DataTypeString.h>
#include <DataTypes/DataTypeObject.h> #include <DataTypes/DataTypeObjectDeprecated.h>
#include <DataTypes/DataTypeArray.h> #include <DataTypes/DataTypeArray.h>
#include <DataTypes/NestedUtils.h> #include <DataTypes/NestedUtils.h>
@ -452,10 +452,10 @@ QueryTreeNodePtr IdentifierResolver::tryResolveIdentifierFromCompoundExpression(
if (auto * column = compound_expression->as<ColumnNode>()) if (auto * column = compound_expression->as<ColumnNode>())
{ {
const DataTypePtr & column_type = column->getColumn().getTypeInStorage(); const DataTypePtr & column_type = column->getColumn().getTypeInStorage();
if (column_type->getTypeId() == TypeIndex::Object) if (column_type->getTypeId() == TypeIndex::ObjectDeprecated)
{ {
const auto * object_type = checkAndGetDataType<DataTypeObject>(column_type.get()); const auto & object_type = checkAndGetDataType<DataTypeObjectDeprecated>(*column_type);
if (object_type->getSchemaFormat() == "json" && object_type->hasNullableSubcolumns()) if (object_type.getSchemaFormat() == "json" && object_type.hasNullableSubcolumns())
{ {
QueryTreeNodePtr constant_node_null = std::make_shared<ConstantNode>(Field()); QueryTreeNodePtr constant_node_null = std::make_shared<ConstantNode>(Field());
return constant_node_null; return constant_node_null;
@ -1000,7 +1000,6 @@ QueryTreeNodePtr IdentifierResolver::tryResolveIdentifierFromJoin(const Identifi
if (!join_node_in_resolve_process && from_join_node.isUsingJoinExpression()) if (!join_node_in_resolve_process && from_join_node.isUsingJoinExpression())
{ {
auto & join_using_list = from_join_node.getJoinExpression()->as<ListNode &>(); auto & join_using_list = from_join_node.getJoinExpression()->as<ListNode &>();
for (auto & join_using_node : join_using_list.getNodes()) for (auto & join_using_node : join_using_list.getNodes())
{ {
auto & column_node = join_using_node->as<ColumnNode &>(); auto & column_node = join_using_node->as<ColumnNode &>();

View File

@ -3,7 +3,7 @@
#include <DataTypes/DataTypesNumber.h> #include <DataTypes/DataTypesNumber.h>
#include <DataTypes/DataTypeString.h> #include <DataTypes/DataTypeString.h>
#include <DataTypes/DataTypeNullable.h> #include <DataTypes/DataTypeNullable.h>
#include <DataTypes/DataTypeObject.h> #include <DataTypes/DataTypeObjectDeprecated.h>
#include <DataTypes/DataTypeTuple.h> #include <DataTypes/DataTypeTuple.h>
#include <DataTypes/DataTypeArray.h> #include <DataTypes/DataTypeArray.h>
#include <DataTypes/DataTypeMap.h> #include <DataTypes/DataTypeMap.h>

View File

@ -490,8 +490,6 @@ OperationID BackupsWorker::startMakingBackup(const ASTPtr & query, const Context
/// process_list_element_holder is used to make an element in ProcessList live while BACKUP is working asynchronously. /// process_list_element_holder is used to make an element in ProcessList live while BACKUP is working asynchronously.
auto process_list_element = context_in_use->getProcessListElement(); auto process_list_element = context_in_use->getProcessListElement();
/// Update context to preserve query information in processlist (settings, current_database)
process_list_element->updateContext(context_in_use);
thread_pool.scheduleOrThrowOnError( thread_pool.scheduleOrThrowOnError(
[this, [this,
@ -855,8 +853,6 @@ OperationID BackupsWorker::startRestoring(const ASTPtr & query, ContextMutablePt
/// process_list_element_holder is used to make an element in ProcessList live while RESTORE is working asynchronously. /// process_list_element_holder is used to make an element in ProcessList live while RESTORE is working asynchronously.
auto process_list_element = context_in_use->getProcessListElement(); auto process_list_element = context_in_use->getProcessListElement();
/// Update context to preserve query information in processlist (settings, current_database)
process_list_element->updateContext(context_in_use);
thread_pool.scheduleOrThrowOnError( thread_pool.scheduleOrThrowOnError(
[this, [this,

View File

@ -158,6 +158,8 @@ void ClientApplicationBase::init(int argc, char ** argv)
("config-file,C", po::value<std::string>(), "config-file path") ("config-file,C", po::value<std::string>(), "config-file path")
("proto_caps", po::value<std::string>(), "enable/disable chunked protocol: chunked_optional, notchunked, notchunked_optional, send_chunked, send_chunked_optional, send_notchunked, send_notchunked_optional, recv_chunked, recv_chunked_optional, recv_notchunked, recv_notchunked_optional")
("query,q", po::value<std::vector<std::string>>()->multitoken(), R"(Query. Can be specified multiple times (--query "SELECT 1" --query "SELECT 2") or once with multiple comma-separated queries (--query "SELECT 1; SELECT 2;"). In the latter case, INSERT queries with non-VALUE format must be separated by empty lines.)") ("query,q", po::value<std::vector<std::string>>()->multitoken(), R"(Query. Can be specified multiple times (--query "SELECT 1" --query "SELECT 2") or once with multiple comma-separated queries (--query "SELECT 1; SELECT 2;"). In the latter case, INSERT queries with non-VALUE format must be separated by empty lines.)")
("queries-file", po::value<std::vector<std::string>>()->multitoken(), "file path with queries to execute; multiple files can be specified (--queries-file file1 file2...)") ("queries-file", po::value<std::vector<std::string>>()->multitoken(), "file path with queries to execute; multiple files can be specified (--queries-file file1 file2...)")
("multiquery,n", "Obsolete, does nothing") ("multiquery,n", "Obsolete, does nothing")
@ -337,6 +339,41 @@ void ClientApplicationBase::init(int argc, char ** argv)
if (options.count("server_logs_file")) if (options.count("server_logs_file"))
server_logs_file = options["server_logs_file"].as<std::string>(); server_logs_file = options["server_logs_file"].as<std::string>();
if (options.count("proto_caps"))
{
std::string proto_caps_str = options["proto_caps"].as<std::string>();
std::vector<std::string_view> proto_caps;
splitInto<','>(proto_caps, proto_caps_str);
for (auto cap_str : proto_caps)
{
std::string direction;
if (cap_str.starts_with("send_"))
{
direction = "send";
cap_str = cap_str.substr(std::string_view("send_").size());
}
else if (cap_str.starts_with("recv_"))
{
direction = "recv";
cap_str = cap_str.substr(std::string_view("recv_").size());
}
if (cap_str != "chunked" && cap_str != "notchunked" && cap_str != "chunked_optional" && cap_str != "notchunked_optional")
throw Exception(ErrorCodes::BAD_ARGUMENTS, "proto_caps option is incorrect ({})", proto_caps_str);
if (direction.empty())
{
config().setString("proto_caps.send", std::string(cap_str));
config().setString("proto_caps.recv", std::string(cap_str));
}
else
config().setString("proto_caps." + direction, std::string(cap_str));
}
}
query_processing_stage = QueryProcessingStage::fromString(options["stage"].as<std::string>()); query_processing_stage = QueryProcessingStage::fromString(options["stage"].as<std::string>());
query_kind = parseQueryKind(options["query_kind"].as<std::string>()); query_kind = parseQueryKind(options["query_kind"].as<std::string>());
profile_events.print = options.count("print-profile-events"); profile_events.print = options.count("print-profile-events");

View File

@ -73,9 +73,11 @@
#include <limits> #include <limits>
#include <map> #include <map>
#include <memory> #include <memory>
#include <string_view>
#include <unordered_map> #include <unordered_map>
#include <Common/config_version.h> #include <Common/config_version.h>
#include <base/find_symbols.h>
#include "config.h" #include "config.h"
#include <IO/ReadHelpers.h> #include <IO/ReadHelpers.h>
#include <Processors/Formats/Impl/ValuesBlockInputFormat.h> #include <Processors/Formats/Impl/ValuesBlockInputFormat.h>
@ -329,7 +331,11 @@ ASTPtr ClientBase::parseQuery(const char *& pos, const char * end, const Setting
{ {
output_stream << std::endl; output_stream << std::endl;
WriteBufferFromOStream res_buf(output_stream, 4096); WriteBufferFromOStream res_buf(output_stream, 4096);
formatAST(*res, res_buf); IAST::FormatSettings format_settings(res_buf, /* one_line */ false);
format_settings.hilite = true;
format_settings.show_secrets = true;
format_settings.print_pretty_type_names = true;
res->format(format_settings);
res_buf.finalize(); res_buf.finalize();
output_stream << std::endl << std::endl; output_stream << std::endl << std::endl;
} }
@ -914,6 +920,8 @@ void ClientBase::processTextAsSingleQuery(const String & full_query)
} }
catch (Exception & e) catch (Exception & e)
{ {
if (server_exception)
server_exception->rethrow();
if (!is_interactive) if (!is_interactive)
e.addMessage("(in query: {})", full_query); e.addMessage("(in query: {})", full_query);
throw; throw;
@ -1032,19 +1040,28 @@ void ClientBase::processOrdinaryQuery(const String & query_to_execute, ASTPtr pa
query_interrupt_handler.start(signals_before_stop); query_interrupt_handler.start(signals_before_stop);
SCOPE_EXIT({ query_interrupt_handler.stop(); }); SCOPE_EXIT({ query_interrupt_handler.stop(); });
connection->sendQuery( try {
connection_parameters.timeouts, connection->sendQuery(
query, connection_parameters.timeouts,
query_parameters, query,
client_context->getCurrentQueryId(), query_parameters,
query_processing_stage, client_context->getCurrentQueryId(),
&client_context->getSettingsRef(), query_processing_stage,
&client_context->getClientInfo(), &client_context->getSettingsRef(),
true, &client_context->getClientInfo(),
[&](const Progress & progress) { onProgress(progress); }); true,
[&](const Progress & progress) { onProgress(progress); });
if (send_external_tables)
sendExternalTables(parsed_query);
}
catch (const NetException &)
{
// We still want to attempt to process whatever we already received or can receive (socket receive buffer can be not empty)
receiveResult(parsed_query, signals_before_stop, settings.partial_result_on_first_cancel);
throw;
}
if (send_external_tables)
sendExternalTables(parsed_query);
receiveResult(parsed_query, signals_before_stop, settings.partial_result_on_first_cancel); receiveResult(parsed_query, signals_before_stop, settings.partial_result_on_first_cancel);
break; break;
@ -2537,6 +2554,7 @@ void ClientBase::runInteractive()
*suggest, *suggest,
history_file, history_file,
getClientConfiguration().has("multiline"), getClientConfiguration().has("multiline"),
getClientConfiguration().getBool("ignore_shell_suspend", true),
query_extenders, query_extenders,
query_delimiters, query_delimiters,
word_break_characters, word_break_characters,

View File

@ -5,8 +5,6 @@
#include <Core/Settings.h> #include <Core/Settings.h>
#include <Compression/CompressedReadBuffer.h> #include <Compression/CompressedReadBuffer.h>
#include <Compression/CompressedWriteBuffer.h> #include <Compression/CompressedWriteBuffer.h>
#include <IO/ReadBufferFromPocoSocket.h>
#include <IO/WriteBufferFromPocoSocket.h>
#include <IO/ReadHelpers.h> #include <IO/ReadHelpers.h>
#include <IO/WriteHelpers.h> #include <IO/WriteHelpers.h>
#include <IO/copyData.h> #include <IO/copyData.h>
@ -85,6 +83,7 @@ Connection::~Connection()
Connection::Connection(const String & host_, UInt16 port_, Connection::Connection(const String & host_, UInt16 port_,
const String & default_database_, const String & default_database_,
const String & user_, const String & password_, const String & user_, const String & password_,
const String & proto_send_chunked_, const String & proto_recv_chunked_,
[[maybe_unused]] const SSHKey & ssh_private_key_, [[maybe_unused]] const SSHKey & ssh_private_key_,
const String & jwt_, const String & jwt_,
const String & quota_key_, const String & quota_key_,
@ -95,6 +94,7 @@ Connection::Connection(const String & host_, UInt16 port_,
Protocol::Secure secure_) Protocol::Secure secure_)
: host(host_), port(port_), default_database(default_database_) : host(host_), port(port_), default_database(default_database_)
, user(user_), password(password_) , user(user_), password(password_)
, proto_send_chunked(proto_send_chunked_), proto_recv_chunked(proto_recv_chunked_)
#if USE_SSH #if USE_SSH
, ssh_private_key(ssh_private_key_) , ssh_private_key(ssh_private_key_)
#endif #endif
@ -211,10 +211,10 @@ void Connection::connect(const ConnectionTimeouts & timeouts)
, tcp_keep_alive_timeout_in_sec); , tcp_keep_alive_timeout_in_sec);
} }
in = std::make_shared<ReadBufferFromPocoSocket>(*socket); in = std::make_shared<ReadBufferFromPocoSocketChunked>(*socket);
in->setAsyncCallback(async_callback); in->setAsyncCallback(async_callback);
out = std::make_shared<WriteBufferFromPocoSocket>(*socket); out = std::make_shared<WriteBufferFromPocoSocketChunked>(*socket);
out->setAsyncCallback(async_callback); out->setAsyncCallback(async_callback);
connected = true; connected = true;
setDescription(); setDescription();
@ -222,9 +222,61 @@ void Connection::connect(const ConnectionTimeouts & timeouts)
sendHello(); sendHello();
receiveHello(timeouts.handshake_timeout); receiveHello(timeouts.handshake_timeout);
if (server_revision >= DBMS_MIN_PROTOCOL_VERSION_WITH_CHUNKED_PACKETS)
{
/// Client side of chunked protocol negotiation.
/// Server advertises its protocol capabilities (separate for send and receive channels) by sending
/// in its 'Hello' response one of four types - chunked, notchunked, chunked_optional, notchunked_optional.
/// Not optional types are strict meaning that server only supports this type, optional means that
/// server prefer this type but capable to work in opposite.
/// Client selects which type it is going to communicate based on the settings from config or arguments,
/// and sends either "chunked" or "notchunked" protocol request in addendum section of handshake.
/// Client can detect if server's protocol capabilities are not compatible with client's settings (for example
/// server strictly requires chunked protocol but client's settings only allows notchunked protocol) - in such case
/// client should interrupt this connection. However if client continues with incompatible protocol type request, server
/// will send appropriate exception and disconnect client.
auto is_chunked = [](const String & chunked_srv_str, const String & chunked_cl_str, const String & direction)
{
bool chunked_srv = chunked_srv_str.starts_with("chunked");
bool optional_srv = chunked_srv_str.ends_with("_optional");
bool chunked_cl = chunked_cl_str.starts_with("chunked");
bool optional_cl = chunked_cl_str.ends_with("_optional");
if (optional_srv)
return chunked_cl;
if (optional_cl)
return chunked_srv;
if (chunked_cl != chunked_srv)
throw NetException(
ErrorCodes::NETWORK_ERROR,
"Incompatible protocol: {} set to {}, server requires {}",
direction,
chunked_cl ? "chunked" : "notchunked",
chunked_srv ? "chunked" : "notchunked");
return chunked_srv;
};
proto_send_chunked = is_chunked(proto_recv_chunked_srv, proto_send_chunked, "send") ? "chunked" : "notchunked";
proto_recv_chunked = is_chunked(proto_send_chunked_srv, proto_recv_chunked, "recv") ? "chunked" : "notchunked";
}
else
{
if (proto_send_chunked == "chunked" || proto_recv_chunked == "chunked")
throw NetException(
ErrorCodes::NETWORK_ERROR,
"Incompatible protocol: server's version is too old and doesn't support chunked protocol while client settings require it.");
}
if (server_revision >= DBMS_MIN_PROTOCOL_VERSION_WITH_ADDENDUM) if (server_revision >= DBMS_MIN_PROTOCOL_VERSION_WITH_ADDENDUM)
sendAddendum(); sendAddendum();
if (proto_send_chunked == "chunked")
out->enableChunked();
if (proto_recv_chunked == "chunked")
in->enableChunked();
LOG_TRACE(log_wrapper.get(), "Connected to {} server version {}.{}.{}.", LOG_TRACE(log_wrapper.get(), "Connected to {} server version {}.{}.{}.",
server_name, server_version_major, server_version_minor, server_version_patch); server_name, server_version_major, server_version_minor, server_version_patch);
} }
@ -393,6 +445,13 @@ void Connection::sendAddendum()
{ {
if (server_revision >= DBMS_MIN_PROTOCOL_VERSION_WITH_QUOTA_KEY) if (server_revision >= DBMS_MIN_PROTOCOL_VERSION_WITH_QUOTA_KEY)
writeStringBinary(quota_key, *out); writeStringBinary(quota_key, *out);
if (server_revision >= DBMS_MIN_PROTOCOL_VERSION_WITH_CHUNKED_PACKETS)
{
writeStringBinary(proto_send_chunked, *out);
writeStringBinary(proto_recv_chunked, *out);
}
out->next(); out->next();
} }
@ -472,6 +531,12 @@ void Connection::receiveHello(const Poco::Timespan & handshake_timeout)
else else
server_version_patch = server_revision; server_version_patch = server_revision;
if (server_revision >= DBMS_MIN_PROTOCOL_VERSION_WITH_CHUNKED_PACKETS)
{
readStringBinary(proto_send_chunked_srv, *in);
readStringBinary(proto_recv_chunked_srv, *in);
}
if (server_revision >= DBMS_MIN_PROTOCOL_VERSION_WITH_PASSWORD_COMPLEXITY_RULES) if (server_revision >= DBMS_MIN_PROTOCOL_VERSION_WITH_PASSWORD_COMPLEXITY_RULES)
{ {
UInt64 rules_size; UInt64 rules_size;
@ -611,6 +676,7 @@ bool Connection::ping(const ConnectionTimeouts & timeouts)
UInt64 pong = 0; UInt64 pong = 0;
writeVarUInt(Protocol::Client::Ping, *out); writeVarUInt(Protocol::Client::Ping, *out);
out->finishChunk();
out->next(); out->next();
if (in->eof()) if (in->eof())
@ -660,6 +726,7 @@ TablesStatusResponse Connection::getTablesStatus(const ConnectionTimeouts & time
writeVarUInt(Protocol::Client::TablesStatusRequest, *out); writeVarUInt(Protocol::Client::TablesStatusRequest, *out);
request.write(*out, server_revision); request.write(*out, server_revision);
out->finishChunk();
out->next(); out->next();
UInt64 response_type = 0; UInt64 response_type = 0;
@ -813,6 +880,8 @@ void Connection::sendQuery(
block_profile_events_in.reset(); block_profile_events_in.reset();
block_out.reset(); block_out.reset();
out->finishChunk();
/// Send empty block which means end of data. /// Send empty block which means end of data.
if (!with_pending_data) if (!with_pending_data)
{ {
@ -829,6 +898,7 @@ void Connection::sendCancel()
return; return;
writeVarUInt(Protocol::Client::Cancel, *out); writeVarUInt(Protocol::Client::Cancel, *out);
out->finishChunk();
out->next(); out->next();
} }
@ -854,7 +924,10 @@ void Connection::sendData(const Block & block, const String & name, bool scalar)
size_t prev_bytes = out->count(); size_t prev_bytes = out->count();
block_out->write(block); block_out->write(block);
maybe_compressed_out->next(); if (maybe_compressed_out != out)
maybe_compressed_out->next();
if (!block)
out->finishChunk();
out->next(); out->next();
if (throttler) if (throttler)
@ -865,6 +938,7 @@ void Connection::sendIgnoredPartUUIDs(const std::vector<UUID> & uuids)
{ {
writeVarUInt(Protocol::Client::IgnoredPartUUIDs, *out); writeVarUInt(Protocol::Client::IgnoredPartUUIDs, *out);
writeVectorBinary(uuids, *out); writeVectorBinary(uuids, *out);
out->finishChunk();
out->next(); out->next();
} }
@ -874,6 +948,7 @@ void Connection::sendReadTaskResponse(const String & response)
writeVarUInt(Protocol::Client::ReadTaskResponse, *out); writeVarUInt(Protocol::Client::ReadTaskResponse, *out);
writeVarUInt(DBMS_CLUSTER_PROCESSING_PROTOCOL_VERSION, *out); writeVarUInt(DBMS_CLUSTER_PROCESSING_PROTOCOL_VERSION, *out);
writeStringBinary(response, *out); writeStringBinary(response, *out);
out->finishChunk();
out->next(); out->next();
} }
@ -882,6 +957,7 @@ void Connection::sendMergeTreeReadTaskResponse(const ParallelReadResponse & resp
{ {
writeVarUInt(Protocol::Client::MergeTreeReadTaskResponse, *out); writeVarUInt(Protocol::Client::MergeTreeReadTaskResponse, *out);
response.serialize(*out); response.serialize(*out);
out->finishChunk();
out->next(); out->next();
} }
@ -899,6 +975,8 @@ void Connection::sendPreparedData(ReadBuffer & input, size_t size, const String
copyData(input, *out); copyData(input, *out);
else else
copyData(input, *out, size); copyData(input, *out, size);
out->finishChunk();
out->next(); out->next();
} }
@ -927,6 +1005,8 @@ void Connection::sendScalarsData(Scalars & data)
sendData(elem.second, elem.first, true /* scalar */); sendData(elem.second, elem.first, true /* scalar */);
} }
out->finishChunk();
out_bytes = out->count() - out_bytes; out_bytes = out->count() - out_bytes;
maybe_compressed_out_bytes = maybe_compressed_out->count() - maybe_compressed_out_bytes; maybe_compressed_out_bytes = maybe_compressed_out->count() - maybe_compressed_out_bytes;
double elapsed = watch.elapsedSeconds(); double elapsed = watch.elapsedSeconds();
@ -1069,13 +1149,13 @@ std::optional<Poco::Net::SocketAddress> Connection::getResolvedAddress() const
bool Connection::poll(size_t timeout_microseconds) bool Connection::poll(size_t timeout_microseconds)
{ {
return static_cast<ReadBufferFromPocoSocket &>(*in).poll(timeout_microseconds); return in->poll(timeout_microseconds);
} }
bool Connection::hasReadPendingData() const bool Connection::hasReadPendingData() const
{ {
return last_input_packet_type.has_value() || static_cast<const ReadBufferFromPocoSocket &>(*in).hasPendingData(); return last_input_packet_type.has_value() || in->hasBufferedData();
} }
@ -1349,6 +1429,8 @@ ServerConnectionPtr Connection::createConnection(const ConnectionParameters & pa
parameters.default_database, parameters.default_database,
parameters.user, parameters.user,
parameters.password, parameters.password,
parameters.proto_send_chunked,
parameters.proto_recv_chunked,
parameters.ssh_private_key, parameters.ssh_private_key,
parameters.jwt, parameters.jwt,
parameters.quota_key, parameters.quota_key,

View File

@ -8,8 +8,8 @@
#include <Core/Defines.h> #include <Core/Defines.h>
#include <IO/ReadBufferFromPocoSocket.h> #include <IO/ReadBufferFromPocoSocketChunked.h>
#include <IO/WriteBufferFromPocoSocket.h> #include <IO/WriteBufferFromPocoSocketChunked.h>
#include <Interpreters/TablesStatus.h> #include <Interpreters/TablesStatus.h>
#include <Interpreters/Context_fwd.h> #include <Interpreters/Context_fwd.h>
@ -52,6 +52,7 @@ public:
Connection(const String & host_, UInt16 port_, Connection(const String & host_, UInt16 port_,
const String & default_database_, const String & default_database_,
const String & user_, const String & password_, const String & user_, const String & password_,
const String & proto_send_chunked_, const String & proto_recv_chunked_,
const SSHKey & ssh_private_key_, const SSHKey & ssh_private_key_,
const String & jwt_, const String & jwt_,
const String & quota_key_, const String & quota_key_,
@ -170,6 +171,10 @@ private:
String default_database; String default_database;
String user; String user;
String password; String password;
String proto_send_chunked;
String proto_recv_chunked;
String proto_send_chunked_srv;
String proto_recv_chunked_srv;
#if USE_SSH #if USE_SSH
SSHKey ssh_private_key; SSHKey ssh_private_key;
#endif #endif
@ -209,8 +214,8 @@ private:
String server_display_name; String server_display_name;
std::unique_ptr<Poco::Net::StreamSocket> socket; std::unique_ptr<Poco::Net::StreamSocket> socket;
std::shared_ptr<ReadBufferFromPocoSocket> in; std::shared_ptr<ReadBufferFromPocoSocketChunked> in;
std::shared_ptr<WriteBufferFromPocoSocket> out; std::shared_ptr<WriteBufferFromPocoSocketChunked> out;
std::optional<UInt64> last_input_packet_type; std::optional<UInt64> last_input_packet_type;
String query_id; String query_id;

View File

@ -107,6 +107,9 @@ ConnectionParameters::ConnectionParameters(const Poco::Util::AbstractConfigurati
} }
} }
proto_send_chunked = config.getString("proto_caps.send", "notchunked");
proto_recv_chunked = config.getString("proto_caps.recv", "notchunked");
quota_key = config.getString("quota_key", ""); quota_key = config.getString("quota_key", "");
/// By default compression is disabled if address looks like localhost. /// By default compression is disabled if address looks like localhost.

View File

@ -20,6 +20,8 @@ struct ConnectionParameters
std::string default_database; std::string default_database;
std::string user; std::string user;
std::string password; std::string password;
std::string proto_send_chunked = "notchunked";
std::string proto_recv_chunked = "notchunked";
std::string quota_key; std::string quota_key;
SSHKey ssh_private_key; SSHKey ssh_private_key;
std::string jwt; std::string jwt;

View File

@ -13,6 +13,8 @@ ConnectionPoolPtr ConnectionPoolFactory::get(
String default_database, String default_database,
String user, String user,
String password, String password,
String proto_send_chunked,
String proto_recv_chunked,
String quota_key, String quota_key,
String cluster, String cluster,
String cluster_secret, String cluster_secret,
@ -22,7 +24,7 @@ ConnectionPoolPtr ConnectionPoolFactory::get(
Priority priority) Priority priority)
{ {
Key key{ Key key{
max_connections, host, port, default_database, user, password, quota_key, cluster, cluster_secret, client_name, compression, secure, priority}; max_connections, host, port, default_database, user, password, proto_send_chunked, proto_recv_chunked, quota_key, cluster, cluster_secret, client_name, compression, secure, priority};
std::lock_guard lock(mutex); std::lock_guard lock(mutex);
auto [it, inserted] = pools.emplace(key, ConnectionPoolPtr{}); auto [it, inserted] = pools.emplace(key, ConnectionPoolPtr{});
@ -39,6 +41,8 @@ ConnectionPoolPtr ConnectionPoolFactory::get(
default_database, default_database,
user, user,
password, password,
proto_send_chunked,
proto_recv_chunked,
quota_key, quota_key,
cluster, cluster,
cluster_secret, cluster_secret,

View File

@ -73,6 +73,8 @@ public:
const String & default_database_, const String & default_database_,
const String & user_, const String & user_,
const String & password_, const String & password_,
const String & proto_send_chunked_,
const String & proto_recv_chunked_,
const String & quota_key_, const String & quota_key_,
const String & cluster_, const String & cluster_,
const String & cluster_secret_, const String & cluster_secret_,
@ -85,6 +87,8 @@ public:
, default_database(default_database_) , default_database(default_database_)
, user(user_) , user(user_)
, password(password_) , password(password_)
, proto_send_chunked(proto_send_chunked_)
, proto_recv_chunked(proto_recv_chunked_)
, quota_key(quota_key_) , quota_key(quota_key_)
, cluster(cluster_) , cluster(cluster_)
, cluster_secret(cluster_secret_) , cluster_secret(cluster_secret_)
@ -116,7 +120,9 @@ protected:
{ {
return std::make_shared<Connection>( return std::make_shared<Connection>(
host, port, host, port,
default_database, user, password, SSHKey(), /*jwt*/ "", quota_key, default_database, user, password,
proto_send_chunked, proto_recv_chunked,
SSHKey(), /*jwt*/ "", quota_key,
cluster, cluster_secret, cluster, cluster_secret,
client_name, compression, secure); client_name, compression, secure);
} }
@ -125,6 +131,8 @@ private:
String default_database; String default_database;
String user; String user;
String password; String password;
String proto_send_chunked;
String proto_recv_chunked;
String quota_key; String quota_key;
/// For inter-server authorization /// For inter-server authorization
@ -150,6 +158,8 @@ public:
String default_database; String default_database;
String user; String user;
String password; String password;
String proto_send_chunked;
String proto_recv_chunked;
String quota_key; String quota_key;
String cluster; String cluster;
String cluster_secret; String cluster_secret;
@ -173,6 +183,8 @@ public:
String default_database, String default_database,
String user, String user,
String password, String password,
String proto_send_chunked,
String proto_recv_chunked,
String quota_key, String quota_key,
String cluster, String cluster,
String cluster_secret, String cluster_secret,
@ -190,6 +202,7 @@ inline bool operator==(const ConnectionPoolFactory::Key & lhs, const ConnectionP
{ {
return lhs.max_connections == rhs.max_connections && lhs.host == rhs.host && lhs.port == rhs.port return lhs.max_connections == rhs.max_connections && lhs.host == rhs.host && lhs.port == rhs.port
&& lhs.default_database == rhs.default_database && lhs.user == rhs.user && lhs.password == rhs.password && lhs.default_database == rhs.default_database && lhs.user == rhs.user && lhs.password == rhs.password
&& lhs.proto_send_chunked == rhs.proto_send_chunked && lhs.proto_recv_chunked == rhs.proto_recv_chunked
&& lhs.quota_key == rhs.quota_key && lhs.quota_key == rhs.quota_key
&& lhs.cluster == rhs.cluster && lhs.cluster_secret == rhs.cluster_secret && lhs.client_name == rhs.client_name && lhs.cluster == rhs.cluster && lhs.cluster_secret == rhs.cluster_secret && lhs.client_name == rhs.client_name
&& lhs.compression == rhs.compression && lhs.secure == rhs.secure && lhs.priority == rhs.priority; && lhs.compression == rhs.compression && lhs.secure == rhs.secure && lhs.priority == rhs.priority;

View File

@ -294,6 +294,7 @@ ReplxxLineReader::ReplxxLineReader(
Suggest & suggest, Suggest & suggest,
const String & history_file_path_, const String & history_file_path_,
bool multiline_, bool multiline_,
bool ignore_shell_suspend,
Patterns extenders_, Patterns extenders_,
Patterns delimiters_, Patterns delimiters_,
const char word_break_characters_[], const char word_break_characters_[],
@ -363,7 +364,8 @@ ReplxxLineReader::ReplxxLineReader(
rx.bind_key(Replxx::KEY::control('P'), [this](char32_t code) { return rx.invoke(Replxx::ACTION::HISTORY_PREVIOUS, code); }); rx.bind_key(Replxx::KEY::control('P'), [this](char32_t code) { return rx.invoke(Replxx::ACTION::HISTORY_PREVIOUS, code); });
/// We don't want the default, "suspend" behavior, it confuses people. /// We don't want the default, "suspend" behavior, it confuses people.
rx.bind_key_internal(replxx::Replxx::KEY::control('Z'), "insert_character"); if (ignore_shell_suspend)
rx.bind_key_internal(replxx::Replxx::KEY::control('Z'), "insert_character");
auto commit_action = [this](char32_t code) auto commit_action = [this](char32_t code)
{ {

View File

@ -15,6 +15,7 @@ public:
Suggest & suggest, Suggest & suggest,
const String & history_file_path, const String & history_file_path,
bool multiline, bool multiline,
bool ignore_shell_suspend,
Patterns extenders_, Patterns extenders_,
Patterns delimiters_, Patterns delimiters_,
const char word_break_characters_[], const char word_break_characters_[],

View File

@ -452,6 +452,11 @@ void ColumnArray::reserve(size_t n)
getData().reserve(n); /// The average size of arrays is not taken into account here. Or it is considered to be no more than 1. getData().reserve(n); /// The average size of arrays is not taken into account here. Or it is considered to be no more than 1.
} }
size_t ColumnArray::capacity() const
{
return getOffsets().capacity();
}
void ColumnArray::prepareForSquashing(const Columns & source_columns) void ColumnArray::prepareForSquashing(const Columns & source_columns)
{ {
size_t new_size = size(); size_t new_size = size();

Some files were not shown because too many files have changed in this diff Show More