mirror of
https://github.com/ClickHouse/ClickHouse.git
synced 2024-11-26 17:41:59 +00:00
Merge branch 'master' into vdimir/multiple_joins_with_storage_join
This commit is contained in:
commit
dfe8508a3a
18
.github/actions/clean/action.yml
vendored
18
.github/actions/clean/action.yml
vendored
@ -1,11 +1,23 @@
|
|||||||
name: Clean runner
|
name: Clean runner
|
||||||
description: Clean the runner's temp path on ending
|
description: Clean the runner's temp path on ending
|
||||||
|
inputs:
|
||||||
|
images:
|
||||||
|
description: clean docker images
|
||||||
|
default: false
|
||||||
|
type: boolean
|
||||||
runs:
|
runs:
|
||||||
using: "composite"
|
using: "composite"
|
||||||
steps:
|
steps:
|
||||||
- name: Clean
|
- name: Clean Temp
|
||||||
shell: bash
|
shell: bash
|
||||||
run: |
|
run: |
|
||||||
docker ps --quiet | xargs --no-run-if-empty docker kill ||:
|
|
||||||
docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||:
|
|
||||||
sudo rm -fr "${{runner.temp}}"
|
sudo rm -fr "${{runner.temp}}"
|
||||||
|
- name: Clean Docker Containers
|
||||||
|
shell: bash
|
||||||
|
run: |
|
||||||
|
docker rm -vf $(docker ps -aq) ||:
|
||||||
|
- name: Clean Docker Images
|
||||||
|
if: ${{ inputs.images }}
|
||||||
|
shell: bash
|
||||||
|
run: |
|
||||||
|
docker rmi -f $(docker images -aq) ||:
|
||||||
|
82
.github/workflows/auto_releases.yml
vendored
82
.github/workflows/auto_releases.yml
vendored
@ -14,18 +14,16 @@ on:
|
|||||||
dry-run:
|
dry-run:
|
||||||
description: 'Dry run'
|
description: 'Dry run'
|
||||||
required: false
|
required: false
|
||||||
default: true
|
default: false
|
||||||
type: boolean
|
type: boolean
|
||||||
|
|
||||||
jobs:
|
jobs:
|
||||||
AutoReleaseInfo:
|
AutoReleaseInfo:
|
||||||
runs-on: [self-hosted, style-checker-aarch64]
|
runs-on: [self-hosted, release-maker]
|
||||||
outputs:
|
outputs:
|
||||||
data: ${{ steps.info.outputs.AUTO_RELEASE_PARAMS }}
|
data: ${{ steps.info.outputs.AUTO_RELEASE_PARAMS }}
|
||||||
dry_run: ${{ steps.info.outputs.DRY_RUN }}
|
dry_run: ${{ steps.info.outputs.DRY_RUN }}
|
||||||
steps:
|
steps:
|
||||||
- name: Debug Info
|
|
||||||
uses: ./.github/actions/debug
|
|
||||||
- name: Set envs
|
- name: Set envs
|
||||||
run: |
|
run: |
|
||||||
cat >> "$GITHUB_ENV" << 'EOF'
|
cat >> "$GITHUB_ENV" << 'EOF'
|
||||||
@ -36,6 +34,10 @@ jobs:
|
|||||||
echo "DRY_RUN=true" >> "$GITHUB_ENV"
|
echo "DRY_RUN=true" >> "$GITHUB_ENV"
|
||||||
- name: Check out repository code
|
- name: Check out repository code
|
||||||
uses: ClickHouse/checkout@v1
|
uses: ClickHouse/checkout@v1
|
||||||
|
with:
|
||||||
|
fetch-depth: 0 # full history needed
|
||||||
|
- name: Debug Info
|
||||||
|
uses: ./.github/actions/debug
|
||||||
- name: Prepare Info
|
- name: Prepare Info
|
||||||
id: info
|
id: info
|
||||||
run: |
|
run: |
|
||||||
@ -46,15 +48,14 @@ jobs:
|
|||||||
echo "::endgroup::"
|
echo "::endgroup::"
|
||||||
{
|
{
|
||||||
echo 'AUTO_RELEASE_PARAMS<<EOF'
|
echo 'AUTO_RELEASE_PARAMS<<EOF'
|
||||||
cat /tmp/autorelease_info.json
|
cat /tmp/autorelease_params.json
|
||||||
echo 'EOF'
|
|
||||||
} >> "$GITHUB_ENV"
|
|
||||||
{
|
|
||||||
echo 'AUTO_RELEASE_PARAMS<<EOF'
|
|
||||||
cat /tmp/autorelease_info.json
|
|
||||||
echo 'EOF'
|
echo 'EOF'
|
||||||
} >> "$GITHUB_OUTPUT"
|
} >> "$GITHUB_OUTPUT"
|
||||||
|
if [[ "${{ github.event_name }}" == "schedule" ]]; then
|
||||||
echo "DRY_RUN=true" >> "$GITHUB_OUTPUT"
|
echo "DRY_RUN=true" >> "$GITHUB_OUTPUT"
|
||||||
|
else
|
||||||
|
echo "DRY_RUN=${{ github.event.inputs.dry-run }}" >> "$GITHUB_OUTPUT"
|
||||||
|
fi
|
||||||
- name: Post Release Branch statuses
|
- name: Post Release Branch statuses
|
||||||
run: |
|
run: |
|
||||||
cd "$GITHUB_WORKSPACE/tests/ci"
|
cd "$GITHUB_WORKSPACE/tests/ci"
|
||||||
@ -62,48 +63,37 @@ jobs:
|
|||||||
- name: Clean up
|
- name: Clean up
|
||||||
uses: ./.github/actions/clean
|
uses: ./.github/actions/clean
|
||||||
|
|
||||||
Release_0:
|
Releases:
|
||||||
needs: AutoReleaseInfo
|
needs: AutoReleaseInfo
|
||||||
name: Release ${{ fromJson(needs.AutoReleaseInfo.outputs.data).releases[0].release_branch }}
|
strategy:
|
||||||
if: ${{ fromJson(needs.AutoReleaseInfo.outputs.data).releases[0] && fromJson(needs.AutoReleaseInfo.outputs.data).releases[0].ready }}
|
matrix:
|
||||||
|
release_params: ${{ fromJson(needs.AutoReleaseInfo.outputs.data).releases }}
|
||||||
|
max-parallel: 1
|
||||||
|
name: Release ${{ matrix.release_params.release_branch }}
|
||||||
uses: ./.github/workflows/create_release.yml
|
uses: ./.github/workflows/create_release.yml
|
||||||
with:
|
with:
|
||||||
ref: ${{ fromJson(needs.AutoReleaseInfo.outputs.data).releases[0].commit_sha }}
|
ref: ${{ matrix.release_params.commit_sha }}
|
||||||
type: patch
|
type: patch
|
||||||
dry-run: ${{ needs.AutoReleaseInfo.outputs.dry_run }}
|
dry-run: ${{ fromJson(needs.AutoReleaseInfo.outputs.dry_run) }}
|
||||||
#
|
secrets:
|
||||||
# Release_1:
|
ROBOT_CLICKHOUSE_COMMIT_TOKEN: ${{ secrets.ROBOT_CLICKHOUSE_COMMIT_TOKEN }}
|
||||||
# needs: [AutoReleaseInfo, Release_0]
|
|
||||||
# name: Release ${{ fromJson(needs.AutoReleaseInfo.outputs.data).releases[1].release_branch }}
|
|
||||||
# if: ${{ fromJson(needs.AutoReleaseInfo.outputs.data).releases[1] && fromJson(needs.AutoReleaseInfo.outputs.data).releases[1].ready }}
|
|
||||||
# uses: ./.github/workflows/create_release.yml
|
|
||||||
# with:
|
|
||||||
# ref: ${{ fromJson(needs.AutoReleaseInfo.outputs.data).releases[1].commit_sha }}
|
|
||||||
# type: patch
|
|
||||||
# dry-run: ${{ env.DRY_RUN }}
|
|
||||||
#
|
|
||||||
# Release_2:
|
|
||||||
# needs: [AutoReleaseInfo, Release_1]
|
|
||||||
# name: Release ${{ fromJson(needs.AutoReleaseInfo.outputs.data).releases[2].release_branch }}
|
|
||||||
# if: ${{ fromJson(needs.AutoReleaseInfo.outputs.data).releases[0] && fromJson(needs.AutoReleaseInfo.outputs.data).releases[2].ready }}
|
|
||||||
# uses: ./.github/workflow/create_release.yml
|
|
||||||
# with:
|
|
||||||
# ref: ${{ fromJson(needs.AutoReleaseInfo.outputs.data).releases[0].commit_sha }}
|
|
||||||
# type: patch
|
|
||||||
# dry-run: ${{ env.DRY_RUN }}
|
|
||||||
#
|
|
||||||
# Release_3:
|
|
||||||
# needs: [AutoReleaseInfo, Release_2]
|
|
||||||
# name: Release ${{ fromJson(needs.AutoReleaseInfo.outputs.data).releases[3].release_branch }}
|
|
||||||
# if: ${{ fromJson(needs.AutoReleaseInfo.outputs.data).releases[3] && fromJson(needs.AutoReleaseInfo.outputs.data).releases[3].ready }}
|
|
||||||
# uses: ./.github/workflow/create_release.yml
|
|
||||||
# with:
|
|
||||||
# ref: ${{ fromJson(needs.AutoReleaseInfo.outputs.data).releases[3].commit_sha }}
|
|
||||||
# type: patch
|
|
||||||
# dry-run: ${{ env.DRY_RUN }}
|
|
||||||
|
|
||||||
# - name: Post Slack Message
|
CleanUp:
|
||||||
|
needs: [Releases]
|
||||||
|
runs-on: [self-hosted, release-maker]
|
||||||
|
steps:
|
||||||
|
- uses: ./.github/actions/clean
|
||||||
|
with:
|
||||||
|
images: true
|
||||||
|
|
||||||
|
# PostSlackMessage:
|
||||||
|
# needs: [Releases]
|
||||||
|
# runs-on: [self-hosted, release-maker]
|
||||||
# if: ${{ !cancelled() }}
|
# if: ${{ !cancelled() }}
|
||||||
|
# steps:
|
||||||
|
# - name: Check out repository code
|
||||||
|
# uses: ClickHouse/checkout@v1
|
||||||
|
# - name: Post
|
||||||
# run: |
|
# run: |
|
||||||
# cd "$GITHUB_WORKSPACE/tests/ci"
|
# cd "$GITHUB_WORKSPACE/tests/ci"
|
||||||
# python3 auto_release.py --post-auto-release-complete --wf-status ${{ job.status }}
|
# python3 auto_release.py --post-auto-release-complete --wf-status ${{ job.status }}
|
||||||
|
2
.github/workflows/create_release.yml
vendored
2
.github/workflows/create_release.yml
vendored
@ -47,6 +47,8 @@ concurrency:
|
|||||||
required: false
|
required: false
|
||||||
default: false
|
default: false
|
||||||
type: boolean
|
type: boolean
|
||||||
|
secrets:
|
||||||
|
ROBOT_CLICKHOUSE_COMMIT_TOKEN:
|
||||||
|
|
||||||
jobs:
|
jobs:
|
||||||
CreateRelease:
|
CreateRelease:
|
||||||
|
4
.github/workflows/release_branches.yml
vendored
4
.github/workflows/release_branches.yml
vendored
@ -130,6 +130,7 @@ jobs:
|
|||||||
with:
|
with:
|
||||||
build_name: package_debug
|
build_name: package_debug
|
||||||
data: ${{ needs.RunConfig.outputs.data }}
|
data: ${{ needs.RunConfig.outputs.data }}
|
||||||
|
force: true
|
||||||
BuilderBinDarwin:
|
BuilderBinDarwin:
|
||||||
needs: [RunConfig, BuildDockers]
|
needs: [RunConfig, BuildDockers]
|
||||||
if: ${{ !failure() && !cancelled() }}
|
if: ${{ !failure() && !cancelled() }}
|
||||||
@ -482,7 +483,7 @@ jobs:
|
|||||||
if: ${{ !failure() }}
|
if: ${{ !failure() }}
|
||||||
run: |
|
run: |
|
||||||
# update overall ci report
|
# update overall ci report
|
||||||
python3 finish_check.py --wf-status ${{ contains(needs.*.result, 'failure') && 'failure' || 'success' }}
|
python3 ./tests/ci/finish_check.py --wf-status ${{ contains(needs.*.result, 'failure') && 'failure' || 'success' }}
|
||||||
- name: Check Workflow results
|
- name: Check Workflow results
|
||||||
if: ${{ !cancelled() }}
|
if: ${{ !cancelled() }}
|
||||||
run: |
|
run: |
|
||||||
@ -490,5 +491,4 @@ jobs:
|
|||||||
cat > "$WORKFLOW_RESULT_FILE" << 'EOF'
|
cat > "$WORKFLOW_RESULT_FILE" << 'EOF'
|
||||||
${{ toJson(needs) }}
|
${{ toJson(needs) }}
|
||||||
EOF
|
EOF
|
||||||
|
|
||||||
python3 ./tests/ci/ci_buddy.py --check-wf-status
|
python3 ./tests/ci/ci_buddy.py --check-wf-status
|
||||||
|
2
.gitmodules
vendored
2
.gitmodules
vendored
@ -108,7 +108,7 @@
|
|||||||
url = https://github.com/ClickHouse/icudata
|
url = https://github.com/ClickHouse/icudata
|
||||||
[submodule "contrib/icu"]
|
[submodule "contrib/icu"]
|
||||||
path = contrib/icu
|
path = contrib/icu
|
||||||
url = https://github.com/unicode-org/icu
|
url = https://github.com/ClickHouse/icu
|
||||||
[submodule "contrib/flatbuffers"]
|
[submodule "contrib/flatbuffers"]
|
||||||
path = contrib/flatbuffers
|
path = contrib/flatbuffers
|
||||||
url = https://github.com/ClickHouse/flatbuffers
|
url = https://github.com/ClickHouse/flatbuffers
|
||||||
|
147
CHANGELOG.md
147
CHANGELOG.md
@ -1,4 +1,5 @@
|
|||||||
### Table of Contents
|
### Table of Contents
|
||||||
|
**[ClickHouse release v24.8 LTS, 2024-08-20](#243)**<br/>
|
||||||
**[ClickHouse release v24.7, 2024-07-30](#247)**<br/>
|
**[ClickHouse release v24.7, 2024-07-30](#247)**<br/>
|
||||||
**[ClickHouse release v24.6, 2024-07-01](#246)**<br/>
|
**[ClickHouse release v24.6, 2024-07-01](#246)**<br/>
|
||||||
**[ClickHouse release v24.5, 2024-05-30](#245)**<br/>
|
**[ClickHouse release v24.5, 2024-05-30](#245)**<br/>
|
||||||
@ -10,6 +11,152 @@
|
|||||||
|
|
||||||
# 2024 Changelog
|
# 2024 Changelog
|
||||||
|
|
||||||
|
### <a id="248"></a> ClickHouse release 24.8 LTS, 2024-08-20
|
||||||
|
|
||||||
|
#### Backward Incompatible Change
|
||||||
|
* `clickhouse-client` and `clickhouse-local` now default to multi-query mode (instead single-query mode). As an example, `clickhouse-client -q "SELECT 1; SELECT 2"` now works, whereas users previously had to add `--multiquery` (or `-n`). The `--multiquery/-n` switch became obsolete. INSERT queries in multi-query statements are treated specially based on their FORMAT clause: If the FORMAT is `VALUES` (the most common case), the end of the INSERT statement is represented by a trailing semicolon `;` at the end of the query. For all other FORMATs (e.g. `CSV` or `JSONEachRow`), the end of the INSERT statement is represented by two newlines `\n\n` at the end of the query. [#63898](https://github.com/ClickHouse/ClickHouse/pull/63898) ([FFish](https://github.com/wxybear)).
|
||||||
|
* In previous versions, it was possible to use an alternative syntax for `LowCardinality` data types by appending `WithDictionary` to the name of the data type. It was an initial working implementation, and it was never documented or exposed to the public. Now, it is deprecated. If you have used this syntax, you have to ALTER your tables and rename the data types to `LowCardinality`. [#66842](https://github.com/ClickHouse/ClickHouse/pull/66842) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||||
|
* Fix logical errors with storage `Buffer` used with distributed destination table. It's a backward incompatible change: queries using `Buffer` with a distributed destination table may stop working if the table appears more than once in the query (e.g., in a self-join). [#67015](https://github.com/ClickHouse/ClickHouse/pull/67015) ([vdimir](https://github.com/vdimir)).
|
||||||
|
* In previous versions, calling functions for random distributions based on the Gamma function (such as Chi-Squared, Student, Fisher) with negative arguments close to zero led to a long computation or an infinite loop. In the new version, calling these functions with zero or negative arguments will produce an exception. This closes [#67297](https://github.com/ClickHouse/ClickHouse/issues/67297). [#67326](https://github.com/ClickHouse/ClickHouse/pull/67326) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||||
|
* The system table `text_log` is enabled by default. This is fully compatible with previous versions, but you may notice subtly increased disk usage on the local disk (this system table takes a tiny amount of disk space). [#67428](https://github.com/ClickHouse/ClickHouse/pull/67428) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||||
|
* In previous versions, `arrayWithConstant` can be slow if asked to generate very large arrays. In the new version, it is limited to 1 GB per array. This closes [#32754](https://github.com/ClickHouse/ClickHouse/issues/32754). [#67741](https://github.com/ClickHouse/ClickHouse/pull/67741) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||||
|
* Fix REPLACE modifier formatting (forbid omitting brackets). [#67774](https://github.com/ClickHouse/ClickHouse/pull/67774) ([Azat Khuzhin](https://github.com/azat)).
|
||||||
|
* Backported in [#68349](https://github.com/ClickHouse/ClickHouse/issues/68349): Reimplement `Dynamic` type. Now when the limit of dynamic data types is reached new types are not casted to String but stored in a special data structure in binary format with binary encoded data type. Now any type ever inserted into `Dynamic` column can be read from it as subcolumn. [#68132](https://github.com/ClickHouse/ClickHouse/pull/68132) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||||
|
|
||||||
|
#### New Feature
|
||||||
|
* Added a new `MergeTree` setting `deduplicate_merge_projection_mode` to control the projections during merges (for specific engines) and `OPTIMIZE DEDUPLICATE` query. Supported options: `throw` (throw an exception in case the projection is not fully supported for *MergeTree engine), `drop` (remove projection during merge if it can't be merged itself consistently) and `rebuild` (rebuild projection from scratch, which is a heavy operation). [#66672](https://github.com/ClickHouse/ClickHouse/pull/66672) ([jsc0218](https://github.com/jsc0218)).
|
||||||
|
* Add `_etag` virtual column for S3 table engine. Fixes [#65312](https://github.com/ClickHouse/ClickHouse/issues/65312). [#65386](https://github.com/ClickHouse/ClickHouse/pull/65386) ([skyoct](https://github.com/skyoct)).
|
||||||
|
* Added a tagging (namespace) mechanism for the query cache. The same queries with different tags are considered different by the query cache. Example: `SELECT 1 SETTINGS use_query_cache = 1, query_cache_tag = 'abc'` and `SELECT 1 SETTINGS use_query_cache = 1, query_cache_tag = 'def'` now create different query cache entries. [#68235](https://github.com/ClickHouse/ClickHouse/pull/68235) ([sakulali](https://github.com/sakulali)).
|
||||||
|
* Support more variants of JOIN strictness (`LEFT/RIGHT SEMI/ANTI/ANY JOIN`) with inequality conditions which involve columns from both left and right table. e.g. `t1.y < t2.y` (see the setting `allow_experimental_join_condition`). [#64281](https://github.com/ClickHouse/ClickHouse/pull/64281) ([lgbo](https://github.com/lgbo-ustc)).
|
||||||
|
* Intrpret Hive-style partitioning for different engines (`File`, `URL`, `S3`, `AzureBlobStorage`, `HDFS`). Hive-style partitioning organizes data into partitioned sub-directories, making it efficient to query and manage large datasets. Currently, it only creates virtual columns with the appropriate name and data. The follow-up PR will introduce the appropriate data filtering (performance speedup). [#65997](https://github.com/ClickHouse/ClickHouse/pull/65997) ([Yarik Briukhovetskyi](https://github.com/yariks5s)).
|
||||||
|
* Add function `printf` for Spark compatiability (but you can use the existing `format` function). [#66257](https://github.com/ClickHouse/ClickHouse/pull/66257) ([李扬](https://github.com/taiyang-li)).
|
||||||
|
* Add options `restore_replace_external_engines_to_null` and `restore_replace_external_table_functions_to_null` to replace external engines and table_engines to `Null` engine that can be useful for testing. It should work for RESTORE and explicit table creation. [#66536](https://github.com/ClickHouse/ClickHouse/pull/66536) ([Ilya Yatsishin](https://github.com/qoega)).
|
||||||
|
* Added support for reading `MULTILINESTRING` geometry in `WKT` format using function `readWKTLineString`. [#67647](https://github.com/ClickHouse/ClickHouse/pull/67647) ([Jacob Reckhard](https://github.com/jacobrec)).
|
||||||
|
* Add a new table function `fuzzQuery`. This function allows the modification of a given query string with random variations. Example: `SELECT query FROM fuzzQuery('SELECT 1') LIMIT 5;`. [#67655](https://github.com/ClickHouse/ClickHouse/pull/67655) ([pufit](https://github.com/pufit)).
|
||||||
|
* Add a query `ALTER TABLE ... DROP DETACHED PARTITION ALL` to drop all detached partitions. [#67885](https://github.com/ClickHouse/ClickHouse/pull/67885) ([Duc Canh Le](https://github.com/canhld94)).
|
||||||
|
* Add the `rows_before_aggregation_at_least` statistic to the query response when a new setting, `rows_before_aggregation` is enabled. This statistic represents the number of rows read before aggregation. In the context of a distributed query, when using the `group by` or `max` aggregation function without a `limit`, `rows_before_aggregation_at_least` can reflect the number of rows hit by the query. [#66084](https://github.com/ClickHouse/ClickHouse/pull/66084) ([morning-color](https://github.com/morning-color)).
|
||||||
|
* Support `OPTIMIZE` query on `Join` tables to reduce their memory footprint. [#67883](https://github.com/ClickHouse/ClickHouse/pull/67883) ([Duc Canh Le](https://github.com/canhld94)).
|
||||||
|
* Allow run query instantly in play if you add `&run=1` in the URL [#66457](https://github.com/ClickHouse/ClickHouse/pull/66457) ([Aleksandr Musorin](https://github.com/AVMusorin)).
|
||||||
|
|
||||||
|
#### Experimental Feature
|
||||||
|
* Implement a new `JSON` data type. [#66444](https://github.com/ClickHouse/ClickHouse/pull/66444) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||||
|
* Add the new `TimeSeries` table engine. [#64183](https://github.com/ClickHouse/ClickHouse/pull/64183) ([Vitaly Baranov](https://github.com/vitlibar)).
|
||||||
|
* Add new experimental `Kafka` storage engine to store offsets in Keeper instead of relying on committing them to Kafka. It makes the commit to ClickHouse tables atomic with regard to consumption from the queue. [#57625](https://github.com/ClickHouse/ClickHouse/pull/57625) ([János Benjamin Antal](https://github.com/antaljanosbenjamin)).
|
||||||
|
* Use adaptive read task size calculation method (adaptive meaning it depends on read column sizes) for parallel replicas. [#60377](https://github.com/ClickHouse/ClickHouse/pull/60377) ([Nikita Taranov](https://github.com/nickitat)).
|
||||||
|
* Added statistics type `count_min` (count-min sketches) which provide selectivity estimations for equality predicates like `col = 'val'`. Supported data types are string, date, datatime and numeric types. [#65521](https://github.com/ClickHouse/ClickHouse/pull/65521) ([JackyWoo](https://github.com/JackyWoo)).
|
||||||
|
|
||||||
|
#### Performance Improvement
|
||||||
|
* Setting `optimize_functions_to_subcolumns` is enabled by default. [#68053](https://github.com/ClickHouse/ClickHouse/pull/68053) ([Anton Popov](https://github.com/CurtizJ)).
|
||||||
|
* Store the `plain_rewritable` disk directory metadata in `__meta` layout, separately from the merge tree data in the object storage. Move the `plain_rewritable` disk to a flat directory structure. [#65751](https://github.com/ClickHouse/ClickHouse/pull/65751) ([Julia Kartseva](https://github.com/jkartseva)).
|
||||||
|
* Improve columns squashing (an operation happening in INSERT queries) for `String`/`Array`/`Map`/`Variant`/`Dynamic` types by reserving required memory in advance for all subcolumns. [#67043](https://github.com/ClickHouse/ClickHouse/pull/67043) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||||
|
* Speed up `SYSTEM FLUSH LOGS` and flush logs on shutdown. [#67472](https://github.com/ClickHouse/ClickHouse/pull/67472) ([Sema Checherinda](https://github.com/CheSema)).
|
||||||
|
* Improved overall performance of merges by reducing the overhead of the scheduling steps of merges. [#68016](https://github.com/ClickHouse/ClickHouse/pull/68016) ([Anton Popov](https://github.com/CurtizJ)).
|
||||||
|
* Speed up tables removal for `DROP DATABASE` query, increased the default value for `database_catalog_drop_table_concurrency` to 16. [#67228](https://github.com/ClickHouse/ClickHouse/pull/67228) ([Nikita Mikhaylov](https://github.com/nikitamikhaylov)).
|
||||||
|
* Avoid allocating too much capacity for array column while writing ORC. Performance speeds up 15% for an Array column. [#67879](https://github.com/ClickHouse/ClickHouse/pull/67879) ([李扬](https://github.com/taiyang-li)).
|
||||||
|
* Speed up mutations for non-replicated MergeTree significantly [#66911](https://github.com/ClickHouse/ClickHouse/pull/66911) [#66909](https://github.com/ClickHouse/ClickHouse/pull/66909) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||||
|
|
||||||
|
#### Improvement
|
||||||
|
* Setting `allow_experimental_analyzer` is renamed to `enable_analyzer`. The old name is preserved in a form of an alias. This signifies that Analyzer is no longer in beta and is fully promoted to production. [#66438](https://github.com/ClickHouse/ClickHouse/pull/66438) ([Nikita Mikhaylov](https://github.com/nikitamikhaylov)).
|
||||||
|
* Improve schema inference of date times. Now DateTime64 used only when date time has fractional part, otherwise regular DateTime is used. Inference of Date/DateTime is more strict now, especially when `date_time_input_format='best_effort'` to avoid inferring date times from strings in corner cases. [#68382](https://github.com/ClickHouse/ClickHouse/pull/68382) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||||
|
* ClickHouse server now supports new setting `max_keep_alive_requests`. For keep-alive HTTP connections to the server it works in tandem with `keep_alive_timeout` - if idle timeout not expired but there already more than `max_keep_alive_requests` requests done through the given connection - it will be closed by the server. [#61793](https://github.com/ClickHouse/ClickHouse/pull/61793) ([Nikita Taranov](https://github.com/nickitat)).
|
||||||
|
* Various improvements in the advanced dashboard. This closes [#67697](https://github.com/ClickHouse/ClickHouse/issues/67697). This closes [#63407](https://github.com/ClickHouse/ClickHouse/issues/63407). This closes [#51129](https://github.com/ClickHouse/ClickHouse/issues/51129). This closes [#61204](https://github.com/ClickHouse/ClickHouse/issues/61204). [#67701](https://github.com/ClickHouse/ClickHouse/pull/67701) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||||
|
* Do not require a grant for REMOTE when creating a Distributed table: a grant for the Distributed engine is enough. [#65419](https://github.com/ClickHouse/ClickHouse/pull/65419) ([jsc0218](https://github.com/jsc0218)).
|
||||||
|
* Do not pass logs for keeper explicitly in the Docker image to allow overriding. [#65564](https://github.com/ClickHouse/ClickHouse/pull/65564) ([Azat Khuzhin](https://github.com/azat)).
|
||||||
|
* Introduced `use_same_password_for_base_backup` settings for `BACKUP` and `RESTORE` queries, allowing to create and restore incremental backups to/from password protected archives. [#66214](https://github.com/ClickHouse/ClickHouse/pull/66214) ([Samuele](https://github.com/sguerrini97)).
|
||||||
|
* Ignore `async_load_databases` for `ATTACH` query (previously it was possible for ATTACH to return before the tables had been attached). [#66240](https://github.com/ClickHouse/ClickHouse/pull/66240) ([Azat Khuzhin](https://github.com/azat)).
|
||||||
|
* Added logs and metrics for rejected connections (where there are not enough resources). [#66410](https://github.com/ClickHouse/ClickHouse/pull/66410) ([Alexander Tokmakov](https://github.com/tavplubix)).
|
||||||
|
* Support proper `UUID` type for MongoDB engine. [#66671](https://github.com/ClickHouse/ClickHouse/pull/66671) ([Azat Khuzhin](https://github.com/azat)).
|
||||||
|
* Add replication lag and recovery time metrics. [#66703](https://github.com/ClickHouse/ClickHouse/pull/66703) ([Miсhael Stetsyuk](https://github.com/mstetsyuk)).
|
||||||
|
* Add `DiskS3NoSuchKeyErrors` metric. [#66704](https://github.com/ClickHouse/ClickHouse/pull/66704) ([Miсhael Stetsyuk](https://github.com/mstetsyuk)).
|
||||||
|
* Ensure the `COMMENT` clause works for all table engines. [#66832](https://github.com/ClickHouse/ClickHouse/pull/66832) ([Joe Lynch](https://github.com/joelynch)).
|
||||||
|
* Function `mapFromArrays` now accepts `Map(K, V)` as first argument, for example: `SELECT mapFromArrays(map('a', 4, 'b', 4), ['aa', 'bb'])` now works and returns `{('a',4):'aa',('b',4):'bb'}`. Also, if the 1st argument is an Array, it can now also be of type `Array(Nullable(T))` or `Array(LowCardinality(Nullable(T)))` as long as the actual array values are not `NULL`. [#67103](https://github.com/ClickHouse/ClickHouse/pull/67103) ([李扬](https://github.com/taiyang-li)).
|
||||||
|
* Read configuration for `clickhouse-local` from `~/.clickhouse-local`. [#67135](https://github.com/ClickHouse/ClickHouse/pull/67135) ([Azat Khuzhin](https://github.com/azat)).
|
||||||
|
* Rename setting `input_format_orc_read_use_writer_time_zone` to `input_format_orc_reader_timezone` and allow the user to set the reader timezone. [#67175](https://github.com/ClickHouse/ClickHouse/pull/67175) ([kevinyhzou](https://github.com/KevinyhZou)).
|
||||||
|
* Decrease level of the `Socket is not connected` error when HTTP connection immediately reset by peer after connecting, close [#34218](https://github.com/ClickHouse/ClickHouse/issues/34218). [#67177](https://github.com/ClickHouse/ClickHouse/pull/67177) ([vdimir](https://github.com/vdimir)).
|
||||||
|
* Add ability to load dashboards for `system.dashboards` from config (once set, they overrides the default dashboards preset). [#67232](https://github.com/ClickHouse/ClickHouse/pull/67232) ([Azat Khuzhin](https://github.com/azat)).
|
||||||
|
* The window functions in SQL are traditionally in snake case. ClickHouse uses `camelCase`, so new aliases `denseRank()` and `percentRank()` have been created. These new functions can be called the exact same as the original `dense_rank()` and `percent_rank()` functions. Both snake case and camelCase syntaxes remain usable. A new test for each of the functions has been added as well. This closes [#67042](https://github.com/ClickHouse/ClickHouse/issues/67042) . [#67334](https://github.com/ClickHouse/ClickHouse/pull/67334) ([Peter Nguyen](https://github.com/petern48)).
|
||||||
|
* Autodetect configuration file format if is not `.xml`, `.yml` or `.yaml`. If the file begins with < it might be XML, otherwise it might be YAML. It is useful when providing a configuration file from a pipe: `clickhouse-server --config-file <(echo "hello: world")`. [#67391](https://github.com/ClickHouse/ClickHouse/pull/67391) ([sakulali](https://github.com/sakulali)).
|
||||||
|
* Functions `formatDateTime` and `formatDateTimeInJodaSyntax` now treat their format parameter as optional. If it is not specified, format strings `%Y-%m-%d %H:%i:%s` and `yyyy-MM-dd HH:mm:ss` are assumed. Example: `SELECT parseDateTime('2021-01-04 23:12:34')` now returns DateTime value `2021-01-04 23:12:34` (previously, this threw an exception). [#67399](https://github.com/ClickHouse/ClickHouse/pull/67399) ([Robert Schulze](https://github.com/rschu1ze)).
|
||||||
|
* Automatically retry Keeper requests in KeeperMap if they happen because of timeout or connection loss. [#67448](https://github.com/ClickHouse/ClickHouse/pull/67448) ([Antonio Andelic](https://github.com/antonio2368)).
|
||||||
|
* Add `-no-pie` to Aarch64 Linux builds to allow proper introspection and symbolizing of stacktraces after a ClickHouse restart. [#67916](https://github.com/ClickHouse/ClickHouse/pull/67916) ([filimonov](https://github.com/filimonov)).
|
||||||
|
* Added profile events for merges and mutations for better introspection. [#68015](https://github.com/ClickHouse/ClickHouse/pull/68015) ([Anton Popov](https://github.com/CurtizJ)).
|
||||||
|
* Fix settings and `current_database` in `system.processes` for async BACKUP/RESTORE. [#68163](https://github.com/ClickHouse/ClickHouse/pull/68163) ([Azat Khuzhin](https://github.com/azat)).
|
||||||
|
* Remove unnecessary logs for non-replicated `MergeTree`. [#68238](https://github.com/ClickHouse/ClickHouse/pull/68238) ([Daniil Ivanik](https://github.com/divanik)).
|
||||||
|
|
||||||
|
#### Build/Testing/Packaging Improvement
|
||||||
|
* Integration tests flaky check will not run each test case multiple times to find more issues in tests and make them more reliable. It is using `pytest-repeat` library to run test case multiple times for the same environment. It is important to cleanup tables and other entities in the end of a test case to pass. Repeating works much faster than several pytest runs as it starts necessary containers only once. [#66986](https://github.com/ClickHouse/ClickHouse/pull/66986) ([Ilya Yatsishin](https://github.com/qoega)).
|
||||||
|
* Unblock the usage of CLion with ClickHouse. In previous versions, CLion freezed for a minute on every keypress. This closes [#66994](https://github.com/ClickHouse/ClickHouse/issues/66994). [#66995](https://github.com/ClickHouse/ClickHouse/pull/66995) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||||
|
* getauxval: avoid a crash under a sanitizer re-exec due to high ASLR entropy in newer Linux kernels. [#67081](https://github.com/ClickHouse/ClickHouse/pull/67081) ([Raúl Marín](https://github.com/Algunenano)).
|
||||||
|
* Some parts of client code are extracted to a single file and highest possible level optimization is applied to them even for debug builds. This closes: [#65745](https://github.com/ClickHouse/ClickHouse/issues/65745). [#67215](https://github.com/ClickHouse/ClickHouse/pull/67215) ([Nikita Mikhaylov](https://github.com/nikitamikhaylov)).
|
||||||
|
|
||||||
|
#### Bug Fix
|
||||||
|
* Only relevant to the experimental Variant data type. Fix crash with Variant + AggregateFunction type. [#67122](https://github.com/ClickHouse/ClickHouse/pull/67122) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||||
|
* Fix crash in DistributedAsyncInsert when connection is empty. [#67219](https://github.com/ClickHouse/ClickHouse/pull/67219) ([Pablo Marcos](https://github.com/pamarcos)).
|
||||||
|
* Fix crash of `uniq` and `uniqTheta ` with `tuple()` argument. Closes [#67303](https://github.com/ClickHouse/ClickHouse/issues/67303). [#67306](https://github.com/ClickHouse/ClickHouse/pull/67306) ([flynn](https://github.com/ucasfl)).
|
||||||
|
* Fixes [#66026](https://github.com/ClickHouse/ClickHouse/issues/66026). Avoid unresolved table function arguments traversal in `ReplaceTableNodeToDummyVisitor`. [#67522](https://github.com/ClickHouse/ClickHouse/pull/67522) ([Dmitry Novik](https://github.com/novikd)).
|
||||||
|
* Fix potential stack overflow in `JSONMergePatch` function. Renamed this function from `jsonMergePatch` to `JSONMergePatch` because the previous name was wrong. The previous name is still kept for compatibility. Improved diagnostic of errors in the function. This closes [#67304](https://github.com/ClickHouse/ClickHouse/issues/67304). [#67756](https://github.com/ClickHouse/ClickHouse/pull/67756) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||||
|
* Fixed a NULL pointer dereference, triggered by a specially crafted query, that crashed the server via hopEnd, hopStart, tumbleEnd, and tumbleStart. [#68098](https://github.com/ClickHouse/ClickHouse/pull/68098) ([Salvatore Mesoraca](https://github.com/aiven-sal)).
|
||||||
|
* Fixed `Not-ready Set` in some system tables when filtering using subqueries. [#66018](https://github.com/ClickHouse/ClickHouse/pull/66018) ([Michael Kolupaev](https://github.com/al13n321)).
|
||||||
|
* Fixed reading of subcolumns after `ALTER ADD COLUMN` query. [#66243](https://github.com/ClickHouse/ClickHouse/pull/66243) ([Anton Popov](https://github.com/CurtizJ)).
|
||||||
|
* Fix boolean literals in query sent to external database (for engines like `PostgreSQL`). [#66282](https://github.com/ClickHouse/ClickHouse/pull/66282) ([vdimir](https://github.com/vdimir)).
|
||||||
|
* Fix formatting of query with aliased JOIN ON expression, e.g. `... JOIN t2 ON (x = y) AS e ORDER BY x` should be formatted as `... JOIN t2 ON ((x = y) AS e) ORDER BY x`. [#66312](https://github.com/ClickHouse/ClickHouse/pull/66312) ([vdimir](https://github.com/vdimir)).
|
||||||
|
* Fix cluster() for inter-server secret (preserve initial user as before). [#66364](https://github.com/ClickHouse/ClickHouse/pull/66364) ([Azat Khuzhin](https://github.com/azat)).
|
||||||
|
* Fix possible runtime error while converting Array field with nulls to Array(Variant). [#66727](https://github.com/ClickHouse/ClickHouse/pull/66727) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||||
|
* Fix for occasional deadlock in Context::getDDLWorker. [#66843](https://github.com/ClickHouse/ClickHouse/pull/66843) ([Alexander Gololobov](https://github.com/davenger)).
|
||||||
|
* Fix creating KeeperMap table after an incomplete drop. [#66865](https://github.com/ClickHouse/ClickHouse/pull/66865) ([Antonio Andelic](https://github.com/antonio2368)).
|
||||||
|
* Fix broken part error while restoring to a `s3_plain_rewritable` disk. [#66881](https://github.com/ClickHouse/ClickHouse/pull/66881) ([Vitaly Baranov](https://github.com/vitlibar)).
|
||||||
|
* In rare cases ClickHouse could consider parts as broken because of some unexpected projections on disk. Now it's fixed. [#66898](https://github.com/ClickHouse/ClickHouse/pull/66898) ([alesapin](https://github.com/alesapin)).
|
||||||
|
* Fix invalid format detection in schema inference that could lead to logical error Format {} doesn't support schema inference. [#66899](https://github.com/ClickHouse/ClickHouse/pull/66899) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||||
|
* Fix possible deadlock on query cancel with parallel replicas. [#66905](https://github.com/ClickHouse/ClickHouse/pull/66905) ([Nikita Taranov](https://github.com/nickitat)).
|
||||||
|
* Forbid create as select even when database_replicated_allow_heavy_create is set. It was unconditionally forbidden in 23.12 and accidentally allowed under the setting in unreleased 24.7. [#66980](https://github.com/ClickHouse/ClickHouse/pull/66980) ([vdimir](https://github.com/vdimir)).
|
||||||
|
* Reading from the `numbers` could wrongly throw an exception when the `max_rows_to_read` limit was set. This closes [#66992](https://github.com/ClickHouse/ClickHouse/issues/66992). [#66996](https://github.com/ClickHouse/ClickHouse/pull/66996) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||||
|
* Add proper type conversion to lagInFrame and leadInFrame window functions - fixes msan test. [#67091](https://github.com/ClickHouse/ClickHouse/pull/67091) ([Yakov Olkhovskiy](https://github.com/yakov-olkhovskiy)).
|
||||||
|
* TRUNCATE DATABASE used to stop replication as if it was a DROP DATABASE query, it's fixed. [#67129](https://github.com/ClickHouse/ClickHouse/pull/67129) ([Alexander Tokmakov](https://github.com/tavplubix)).
|
||||||
|
* Use a separate client context in `clickhouse-local`. [#67133](https://github.com/ClickHouse/ClickHouse/pull/67133) ([Vitaly Baranov](https://github.com/vitlibar)).
|
||||||
|
* Fix error `Cannot convert column because it is non constant in source stream but must be constant in result.` for a query that reads from the `Merge` table over the `Distriburted` table with one shard. [#67146](https://github.com/ClickHouse/ClickHouse/pull/67146) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
|
||||||
|
* Correct behavior of `ORDER BY all` with disabled `enable_order_by_all` and parallel replicas (distributed queries as well). [#67153](https://github.com/ClickHouse/ClickHouse/pull/67153) ([Igor Nikonov](https://github.com/devcrafter)).
|
||||||
|
* Fix wrong usage of input_format_max_bytes_to_read_for_schema_inference in schema cache. [#67157](https://github.com/ClickHouse/ClickHouse/pull/67157) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||||
|
* Fix the memory leak for count distinct, when exception issued during group by single nullable key. [#67171](https://github.com/ClickHouse/ClickHouse/pull/67171) ([Jet He](https://github.com/compasses)).
|
||||||
|
* Fix an error in optimization which converts OUTER JOIN to INNER JOIN. This closes [#67156](https://github.com/ClickHouse/ClickHouse/issues/67156). This closes [#66447](https://github.com/ClickHouse/ClickHouse/issues/66447). The bug was introduced in https://github.com/ClickHouse/ClickHouse/pull/62907. [#67178](https://github.com/ClickHouse/ClickHouse/pull/67178) ([Maksim Kita](https://github.com/kitaisreal)).
|
||||||
|
* Fix error `Conversion from AggregateFunction(name, Type) to AggregateFunction(name, Nullable(Type)) is not supported`. The bug was caused by the `optimize_rewrite_aggregate_function_with_if` optimization. Fixes [#67112](https://github.com/ClickHouse/ClickHouse/issues/67112). [#67229](https://github.com/ClickHouse/ClickHouse/pull/67229) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
|
||||||
|
* Fix hung query when using empty tuple as lhs of function IN. [#67295](https://github.com/ClickHouse/ClickHouse/pull/67295) ([Duc Canh Le](https://github.com/canhld94)).
|
||||||
|
* It was possible to create a very deep nested JSON data that triggered stack overflow while skipping unknown fields. This closes [#67292](https://github.com/ClickHouse/ClickHouse/issues/67292). [#67324](https://github.com/ClickHouse/ClickHouse/pull/67324) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||||
|
* Fix attaching ReplicatedMergeTree table after exception during startup. [#67360](https://github.com/ClickHouse/ClickHouse/pull/67360) ([Antonio Andelic](https://github.com/antonio2368)).
|
||||||
|
* Fix segfault caused by incorrectly detaching from thread group in `Aggregator`. [#67385](https://github.com/ClickHouse/ClickHouse/pull/67385) ([Antonio Andelic](https://github.com/antonio2368)).
|
||||||
|
* Fix one more case when a non-deterministic function is specified in PK. [#67395](https://github.com/ClickHouse/ClickHouse/pull/67395) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
|
||||||
|
* Fixed `bloom_filter` index breaking queries with mildly weird conditions like `(k=2)=(k=2)` or `has([1,2,3], k)`. [#67423](https://github.com/ClickHouse/ClickHouse/pull/67423) ([Michael Kolupaev](https://github.com/al13n321)).
|
||||||
|
* Correctly parse file name/URI containing `::` if it's not an archive. [#67433](https://github.com/ClickHouse/ClickHouse/pull/67433) ([Antonio Andelic](https://github.com/antonio2368)).
|
||||||
|
* Fix wait for tasks in ~WriteBufferFromS3 in case WriteBuffer was cancelled. [#67459](https://github.com/ClickHouse/ClickHouse/pull/67459) ([Kseniia Sumarokova](https://github.com/kssenii)).
|
||||||
|
* Protect temporary part directories from removing during RESTORE. [#67491](https://github.com/ClickHouse/ClickHouse/pull/67491) ([Vitaly Baranov](https://github.com/vitlibar)).
|
||||||
|
* Fix execution of nested short-circuit functions. [#67520](https://github.com/ClickHouse/ClickHouse/pull/67520) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||||
|
* Fix `Logical error: Expected the argument №N of type T to have X rows, but it has 0`. The error could happen in a remote query with constant expression in `GROUP BY` (with a new analyzer). [#67536](https://github.com/ClickHouse/ClickHouse/pull/67536) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
|
||||||
|
* Fix join on tuple with NULLs: Some queries with the new analyzer and `NULL` inside the tuple in the `JOIN ON` section returned incorrect results. [#67538](https://github.com/ClickHouse/ClickHouse/pull/67538) ([vdimir](https://github.com/vdimir)).
|
||||||
|
* Fix redundant reschedule of FileCache::freeSpaceRatioKeepingThreadFunc() in case of full non-evictable cache. [#67540](https://github.com/ClickHouse/ClickHouse/pull/67540) ([Kseniia Sumarokova](https://github.com/kssenii)).
|
||||||
|
* Fix inserting into stream like engines (Kafka, RabbitMQ, NATS) through HTTP interface. [#67554](https://github.com/ClickHouse/ClickHouse/pull/67554) ([János Benjamin Antal](https://github.com/antaljanosbenjamin)).
|
||||||
|
* Fix for function `toStartOfWeek` which returned the wrong result with a small `DateTime64` value. [#67558](https://github.com/ClickHouse/ClickHouse/pull/67558) ([Yarik Briukhovetskyi](https://github.com/yariks5s)).
|
||||||
|
* Fix creation of view with recursive CTE. [#67587](https://github.com/ClickHouse/ClickHouse/pull/67587) ([Yakov Olkhovskiy](https://github.com/yakov-olkhovskiy)).
|
||||||
|
* Fix `Logical error: 'file_offset_of_buffer_end <= read_until_position'` in filesystem cache. Closes [#57508](https://github.com/ClickHouse/ClickHouse/issues/57508). [#67623](https://github.com/ClickHouse/ClickHouse/pull/67623) ([Kseniia Sumarokova](https://github.com/kssenii)).
|
||||||
|
* Fixes [#62282](https://github.com/ClickHouse/ClickHouse/issues/62282). Removed the call to `convertFieldToString()` and added datatype specific serialization code. Parameterized view substitution was broken for multiple datatypes when parameter value was a function or expression returning datatype instance. [#67654](https://github.com/ClickHouse/ClickHouse/pull/67654) ([Shankar](https://github.com/shiyer7474)).
|
||||||
|
* Fix crash on `percent_rank`. `percent_rank`'s default frame type is changed to `range unbounded preceding and unbounded following`. `IWindowFunction`'s default window frame is considered and now window functions without window frame definition in sql can be put into different `WindowTransfomer`s properly. [#67661](https://github.com/ClickHouse/ClickHouse/pull/67661) ([lgbo](https://github.com/lgbo-ustc)).
|
||||||
|
* Fix reloading SQL UDFs with UNION. Previously, restarting the server could make UDF invalid. [#67665](https://github.com/ClickHouse/ClickHouse/pull/67665) ([Antonio Andelic](https://github.com/antonio2368)).
|
||||||
|
* Fix possible logical error "Unexpected return type from if" with experimental Variant type and enabled setting `use_variant_as_common_type ` in function if with Tuples and Maps. [#67687](https://github.com/ClickHouse/ClickHouse/pull/67687) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||||
|
* Due to a bug in Linux Kernel, a query can hung in `TimerDescriptor::drain`. This closes [#37686](https://github.com/ClickHouse/ClickHouse/issues/37686). [#67702](https://github.com/ClickHouse/ClickHouse/pull/67702) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||||
|
* Fix completion of `RESTORE ON CLUSTER` command. [#67720](https://github.com/ClickHouse/ClickHouse/pull/67720) ([Vitaly Baranov](https://github.com/vitlibar)).
|
||||||
|
* Fix dictionary hang in case of CANNOT_SCHEDULE_TASK while loading. [#67751](https://github.com/ClickHouse/ClickHouse/pull/67751) ([Azat Khuzhin](https://github.com/azat)).
|
||||||
|
* Queries like `SELECT count() FROM t WHERE cast(c = 1 or c = 9999 AS Bool) SETTINGS use_skip_indexes=1` with bloom filter indexes on `c` now work correctly. [#67781](https://github.com/ClickHouse/ClickHouse/pull/67781) ([jsc0218](https://github.com/jsc0218)).
|
||||||
|
* Fix wrong aggregation result in some queries with aggregation without keys and filter, close [#67419](https://github.com/ClickHouse/ClickHouse/issues/67419). [#67804](https://github.com/ClickHouse/ClickHouse/pull/67804) ([vdimir](https://github.com/vdimir)).
|
||||||
|
* Validate experimental/suspicious data types in ALTER ADD/MODIFY COLUMN. [#67911](https://github.com/ClickHouse/ClickHouse/pull/67911) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||||
|
* Fix DateTime64 parsing after constant folding in distributed queries, close [#66773](https://github.com/ClickHouse/ClickHouse/issues/66773). [#67920](https://github.com/ClickHouse/ClickHouse/pull/67920) ([vdimir](https://github.com/vdimir)).
|
||||||
|
* Fix wrong `count()` result when there is non-deterministic function in predicate. [#67922](https://github.com/ClickHouse/ClickHouse/pull/67922) ([János Benjamin Antal](https://github.com/antaljanosbenjamin)).
|
||||||
|
* Fixed the calculation of the maximum thread soft limit in containerized environments where the usable CPU count is limited. [#67963](https://github.com/ClickHouse/ClickHouse/pull/67963) ([Robert Schulze](https://github.com/rschu1ze)).
|
||||||
|
* Now ClickHouse doesn't consider part as broken if projection doesn't exist on disk but exists in `checksums.txt`. [#68003](https://github.com/ClickHouse/ClickHouse/pull/68003) ([alesapin](https://github.com/alesapin)).
|
||||||
|
* Fixed skipping of untouched parts in mutations with new analyzer. Previously with enabled analyzer data in part could be rewritten by mutation even if mutation doesn't affect this part according to predicate. [#68052](https://github.com/ClickHouse/ClickHouse/pull/68052) ([Anton Popov](https://github.com/CurtizJ)).
|
||||||
|
* Removes an incorrect optimization to remove sorting in subqueries that use `OFFSET`. Fixes [#67906](https://github.com/ClickHouse/ClickHouse/issues/67906). [#68099](https://github.com/ClickHouse/ClickHouse/pull/68099) ([Graham Campbell](https://github.com/GrahamCampbell)).
|
||||||
|
* Attempt to fix `Block structure mismatch in AggregatingStep stream: different types` for aggregate projection optimization. [#68107](https://github.com/ClickHouse/ClickHouse/pull/68107) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
|
||||||
|
* Try fix postgres crash when query is cancelled. [#68288](https://github.com/ClickHouse/ClickHouse/pull/68288) ([Kseniia Sumarokova](https://github.com/kssenii)).
|
||||||
|
* Fix missing sync replica mode in query `SYSTEM SYNC REPLICA`. [#68326](https://github.com/ClickHouse/ClickHouse/pull/68326) ([Duc Canh Le](https://github.com/canhld94)).
|
||||||
|
|
||||||
|
|
||||||
### <a id="247"></a> ClickHouse release 24.7, 2024-07-30
|
### <a id="247"></a> ClickHouse release 24.7, 2024-07-30
|
||||||
|
|
||||||
#### Backward Incompatible Change
|
#### Backward Incompatible Change
|
||||||
|
@ -605,7 +605,9 @@ if (NATIVE_BUILD_TARGETS
|
|||||||
|
|
||||||
execute_process(
|
execute_process(
|
||||||
COMMAND ${CMAKE_COMMAND} -E make_directory "${NATIVE_BUILD_DIR}"
|
COMMAND ${CMAKE_COMMAND} -E make_directory "${NATIVE_BUILD_DIR}"
|
||||||
COMMAND_ECHO STDOUT)
|
COMMAND_ECHO STDOUT
|
||||||
|
COMMAND_ERROR_IS_FATAL ANY
|
||||||
|
)
|
||||||
|
|
||||||
execute_process(
|
execute_process(
|
||||||
COMMAND ${CMAKE_COMMAND}
|
COMMAND ${CMAKE_COMMAND}
|
||||||
@ -617,9 +619,13 @@ if (NATIVE_BUILD_TARGETS
|
|||||||
"-DENABLE_CLICKHOUSE_SELF_EXTRACTING=${ENABLE_CLICKHOUSE_SELF_EXTRACTING}"
|
"-DENABLE_CLICKHOUSE_SELF_EXTRACTING=${ENABLE_CLICKHOUSE_SELF_EXTRACTING}"
|
||||||
${PROJECT_SOURCE_DIR}
|
${PROJECT_SOURCE_DIR}
|
||||||
WORKING_DIRECTORY "${NATIVE_BUILD_DIR}"
|
WORKING_DIRECTORY "${NATIVE_BUILD_DIR}"
|
||||||
COMMAND_ECHO STDOUT)
|
COMMAND_ECHO STDOUT
|
||||||
|
COMMAND_ERROR_IS_FATAL ANY
|
||||||
|
)
|
||||||
|
|
||||||
execute_process(
|
execute_process(
|
||||||
COMMAND ${CMAKE_COMMAND} --build "${NATIVE_BUILD_DIR}" --target ${NATIVE_BUILD_TARGETS}
|
COMMAND ${CMAKE_COMMAND} --build "${NATIVE_BUILD_DIR}" --target ${NATIVE_BUILD_TARGETS}
|
||||||
COMMAND_ECHO STDOUT)
|
COMMAND_ECHO STDOUT
|
||||||
|
COMMAND_ERROR_IS_FATAL ANY
|
||||||
|
)
|
||||||
endif ()
|
endif ()
|
||||||
|
@ -51,8 +51,14 @@ if (NOT "$ENV{CFLAGS}" STREQUAL ""
|
|||||||
endif()
|
endif()
|
||||||
|
|
||||||
# Default toolchain - this is needed to avoid dependency on OS files.
|
# Default toolchain - this is needed to avoid dependency on OS files.
|
||||||
execute_process(COMMAND uname -s OUTPUT_VARIABLE OS)
|
execute_process(COMMAND uname -s
|
||||||
execute_process(COMMAND uname -m OUTPUT_VARIABLE ARCH)
|
OUTPUT_VARIABLE OS
|
||||||
|
COMMAND_ERROR_IS_FATAL ANY
|
||||||
|
)
|
||||||
|
execute_process(COMMAND uname -m
|
||||||
|
OUTPUT_VARIABLE ARCH
|
||||||
|
COMMAND_ERROR_IS_FATAL ANY
|
||||||
|
)
|
||||||
|
|
||||||
# By default, prefer clang on Linux
|
# By default, prefer clang on Linux
|
||||||
# But note, that you still may change the compiler with -DCMAKE_C_COMPILER/-DCMAKE_CXX_COMPILER.
|
# But note, that you still may change the compiler with -DCMAKE_C_COMPILER/-DCMAKE_CXX_COMPILER.
|
||||||
|
30
README.md
30
README.md
@ -34,17 +34,41 @@ curl https://clickhouse.com/ | sh
|
|||||||
|
|
||||||
Every month we get together with the community (users, contributors, customers, those interested in learning more about ClickHouse) to discuss what is coming in the latest release. If you are interested in sharing what you've built on ClickHouse, let us know.
|
Every month we get together with the community (users, contributors, customers, those interested in learning more about ClickHouse) to discuss what is coming in the latest release. If you are interested in sharing what you've built on ClickHouse, let us know.
|
||||||
|
|
||||||
* [v24.8 Community Call](https://clickhouse.com/company/events/v24-8-community-release-call) - August 29
|
* [v24.9 Community Call](https://clickhouse.com/company/events/v24-9-community-release-call) - September 26
|
||||||
|
|
||||||
## Upcoming Events
|
## Upcoming Events
|
||||||
|
|
||||||
Keep an eye out for upcoming meetups and events around the world. Somewhere else you want us to be? Please feel free to reach out to tyler `<at>` clickhouse `<dot>` com. You can also peruse [ClickHouse Events](https://clickhouse.com/company/news-events) for a list of all upcoming trainings, meetups, speaking engagements, etc.
|
Keep an eye out for upcoming meetups and events around the world. Somewhere else you want us to be? Please feel free to reach out to tyler `<at>` clickhouse `<dot>` com. You can also peruse [ClickHouse Events](https://clickhouse.com/company/news-events) for a list of all upcoming trainings, meetups, speaking engagements, etc.
|
||||||
|
|
||||||
* MORE COMING SOON!
|
The following upcoming meetups are featuring creator of ClickHouse & CTO, Alexey Milovidov:
|
||||||
|
|
||||||
|
* [ClickHouse Guangzhou User Group Meetup](https://mp.weixin.qq.com/s/GSvo-7xUoVzCsuUvlLTpCw) - August 25
|
||||||
|
* [San Francisco Meetup (Cloudflare)](https://www.meetup.com/clickhouse-silicon-valley-meetup-group/events/302540575) - September 5
|
||||||
|
* [Raleigh Meetup (Deutsche Bank)](https://www.meetup.com/triangletechtalks/events/302723486/) - September 9
|
||||||
|
* [New York Meetup (Rokt)](https://www.meetup.com/clickhouse-new-york-user-group/events/302575342) - September 10
|
||||||
|
* [Chicago Meetup (Jump Capital)](https://lu.ma/43tvmrfw) - September 12
|
||||||
|
|
||||||
|
Other upcoming meetups
|
||||||
|
* [Seattle Meetup (Statsig)](https://www.meetup.com/clickhouse-seattle-user-group/events/302518075/) - August 27
|
||||||
|
* [Melbourne Meetup](https://www.meetup.com/clickhouse-australia-user-group/events/302732666/) - August 27
|
||||||
|
* [Sydney Meetup](https://www.meetup.com/clickhouse-australia-user-group/events/302862966/) - September 5
|
||||||
|
* [Zurich Meetup](https://www.meetup.com/clickhouse-switzerland-meetup-group/events/302267429/) - September 5
|
||||||
|
* [Toronto Meetup (Shopify)](https://www.meetup.com/clickhouse-toronto-user-group/events/301490855/) - September 10
|
||||||
|
* [Austin Meetup](https://www.meetup.com/clickhouse-austin-user-group/events/302558689/) - September 17
|
||||||
|
* [London Meetup](https://www.meetup.com/clickhouse-london-user-group/events/302977267) - September 17
|
||||||
|
* [Tel Aviv Meetup](https://www.meetup.com/clickhouse-meetup-israel/events/303095121) - September 22
|
||||||
|
* [Madrid Meetup](https://www.meetup.com/clickhouse-spain-user-group/events/303096564/) - October 22
|
||||||
|
* [Barcelona Meetup](https://www.meetup.com/clickhouse-spain-user-group/events/303096876/) - October 29
|
||||||
|
* [Oslo Meetup](https://www.meetup.com/open-source-real-time-data-warehouse-real-time-analytics/events/302938622) - October 31
|
||||||
|
* [Ghent Meetup](https://www.meetup.com/clickhouse-belgium-user-group/events/303049405/) - November 19
|
||||||
|
* [Dubai Meetup](https://www.meetup.com/clickhouse-dubai-meetup-group/events/303096989/) - November 21
|
||||||
|
* [Paris Meetup](https://www.meetup.com/clickhouse-france-user-group/events/303096434) - November 26
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
## Recent Recordings
|
## Recent Recordings
|
||||||
* **Recent Meetup Videos**: [Meetup Playlist](https://www.youtube.com/playlist?list=PL0Z2YDlm0b3iNDUzpY1S3L_iV4nARda_U) Whenever possible recordings of the ClickHouse Community Meetups are edited and presented as individual talks. Current featuring "Modern SQL in 2023", "Fast, Concurrent, and Consistent Asynchronous INSERTS in ClickHouse", and "Full-Text Indices: Design and Experiments"
|
* **Recent Meetup Videos**: [Meetup Playlist](https://www.youtube.com/playlist?list=PL0Z2YDlm0b3iNDUzpY1S3L_iV4nARda_U) Whenever possible recordings of the ClickHouse Community Meetups are edited and presented as individual talks. Current featuring "Modern SQL in 2023", "Fast, Concurrent, and Consistent Asynchronous INSERTS in ClickHouse", and "Full-Text Indices: Design and Experiments"
|
||||||
* **Recording available**: [**v24.4 Release Call**](https://www.youtube.com/watch?v=dtUqgcfOGmE) All the features of 24.4, one convenient video! Watch it now!
|
* **Recording available**: [**v24.8 LTS Release Call**](https://www.youtube.com/watch?v=AeLmp2jc51k) All the features of 24.8 LTS, one convenient video! Watch it now!
|
||||||
|
|
||||||
## Interested in joining ClickHouse and making it your full-time job?
|
## Interested in joining ClickHouse and making it your full-time job?
|
||||||
|
|
||||||
|
16
SECURITY.md
16
SECURITY.md
@ -14,25 +14,15 @@ The following versions of ClickHouse server are currently supported with securit
|
|||||||
|
|
||||||
| Version | Supported |
|
| Version | Supported |
|
||||||
|:-|:-|
|
|:-|:-|
|
||||||
|
| 24.8 | ✔️ |
|
||||||
| 24.7 | ✔️ |
|
| 24.7 | ✔️ |
|
||||||
| 24.6 | ✔️ |
|
| 24.6 | ✔️ |
|
||||||
| 24.5 | ✔️ |
|
| 24.5 | ❌ |
|
||||||
| 24.4 | ❌ |
|
| 24.4 | ❌ |
|
||||||
| 24.3 | ✔️ |
|
| 24.3 | ✔️ |
|
||||||
| 24.2 | ❌ |
|
| 24.2 | ❌ |
|
||||||
| 24.1 | ❌ |
|
| 24.1 | ❌ |
|
||||||
| 23.12 | ❌ |
|
| 23.* | ❌ |
|
||||||
| 23.11 | ❌ |
|
|
||||||
| 23.10 | ❌ |
|
|
||||||
| 23.9 | ❌ |
|
|
||||||
| 23.8 | ✔️ |
|
|
||||||
| 23.7 | ❌ |
|
|
||||||
| 23.6 | ❌ |
|
|
||||||
| 23.5 | ❌ |
|
|
||||||
| 23.4 | ❌ |
|
|
||||||
| 23.3 | ❌ |
|
|
||||||
| 23.2 | ❌ |
|
|
||||||
| 23.1 | ❌ |
|
|
||||||
| 22.* | ❌ |
|
| 22.* | ❌ |
|
||||||
| 21.* | ❌ |
|
| 21.* | ❌ |
|
||||||
| 20.* | ❌ |
|
| 20.* | ❌ |
|
||||||
|
@ -18,7 +18,9 @@
|
|||||||
#define Net_HTTPResponse_INCLUDED
|
#define Net_HTTPResponse_INCLUDED
|
||||||
|
|
||||||
|
|
||||||
|
#include <map>
|
||||||
#include <vector>
|
#include <vector>
|
||||||
|
|
||||||
#include "Poco/Net/HTTPCookie.h"
|
#include "Poco/Net/HTTPCookie.h"
|
||||||
#include "Poco/Net/HTTPMessage.h"
|
#include "Poco/Net/HTTPMessage.h"
|
||||||
#include "Poco/Net/Net.h"
|
#include "Poco/Net/Net.h"
|
||||||
@ -180,6 +182,8 @@ namespace Net
|
|||||||
/// May throw an exception in case of a malformed
|
/// May throw an exception in case of a malformed
|
||||||
/// Set-Cookie header.
|
/// Set-Cookie header.
|
||||||
|
|
||||||
|
void getHeaders(std::map<std::string, std::string> & headers) const;
|
||||||
|
|
||||||
void write(std::ostream & ostr) const;
|
void write(std::ostream & ostr) const;
|
||||||
/// Writes the HTTP response to the given
|
/// Writes the HTTP response to the given
|
||||||
/// output stream.
|
/// output stream.
|
||||||
|
@ -209,6 +209,15 @@ void HTTPResponse::getCookies(std::vector<HTTPCookie>& cookies) const
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void HTTPResponse::getHeaders(std::map<std::string, std::string> & headers) const
|
||||||
|
{
|
||||||
|
headers.clear();
|
||||||
|
for (const auto & it : *this)
|
||||||
|
{
|
||||||
|
headers.emplace(it.first, it.second);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
void HTTPResponse::write(std::ostream& ostr) const
|
void HTTPResponse::write(std::ostream& ostr) const
|
||||||
{
|
{
|
||||||
|
@ -311,6 +311,14 @@ int SecureSocketImpl::sendBytes(const void* buffer, int length, int flags)
|
|||||||
while (mustRetry(rc, remaining_time));
|
while (mustRetry(rc, remaining_time));
|
||||||
if (rc <= 0)
|
if (rc <= 0)
|
||||||
{
|
{
|
||||||
|
// At this stage we still can have last not yet received SSL message containing SSL error
|
||||||
|
// so make a read to force SSL to process possible SSL error
|
||||||
|
if (SSL_get_error(_pSSL, rc) == SSL_ERROR_SYSCALL && SocketImpl::lastError() == POCO_ECONNRESET)
|
||||||
|
{
|
||||||
|
char c = 0;
|
||||||
|
SSL_read(_pSSL, &c, 1);
|
||||||
|
}
|
||||||
|
|
||||||
rc = handleError(rc);
|
rc = handleError(rc);
|
||||||
if (rc == 0) throw SSLConnectionUnexpectedlyClosedException();
|
if (rc == 0) throw SSLConnectionUnexpectedlyClosedException();
|
||||||
}
|
}
|
||||||
|
@ -9,10 +9,18 @@ endif ()
|
|||||||
file(GLOB bprefix "/usr/local/llvm${COMPILER_VERSION_MAJOR}/lib/clang/${COMPILER_VERSION_MAJOR}/lib/${system_processor}-portbld-freebsd*/")
|
file(GLOB bprefix "/usr/local/llvm${COMPILER_VERSION_MAJOR}/lib/clang/${COMPILER_VERSION_MAJOR}/lib/${system_processor}-portbld-freebsd*/")
|
||||||
message(STATUS "-Bprefix: ${bprefix}")
|
message(STATUS "-Bprefix: ${bprefix}")
|
||||||
|
|
||||||
execute_process(COMMAND ${CMAKE_CXX_COMPILER} -Bprefix=${bprefix} --print-file-name=libclang_rt.builtins-${system_processor}.a OUTPUT_VARIABLE BUILTINS_LIBRARY OUTPUT_STRIP_TRAILING_WHITESPACE)
|
execute_process(COMMAND
|
||||||
|
${CMAKE_CXX_COMPILER} -Bprefix=${bprefix} --print-file-name=libclang_rt.builtins-${system_processor}.a
|
||||||
|
OUTPUT_VARIABLE BUILTINS_LIBRARY
|
||||||
|
COMMAND_ERROR_IS_FATAL ANY
|
||||||
|
OUTPUT_STRIP_TRAILING_WHITESPACE)
|
||||||
# --print-file-name simply prints what you passed in case of nothing was resolved, so let's try one other possible option
|
# --print-file-name simply prints what you passed in case of nothing was resolved, so let's try one other possible option
|
||||||
if (BUILTINS_LIBRARY STREQUAL "libclang_rt.builtins-${system_processor}.a")
|
if (BUILTINS_LIBRARY STREQUAL "libclang_rt.builtins-${system_processor}.a")
|
||||||
execute_process(COMMAND ${CMAKE_CXX_COMPILER} -Bprefix=${bprefix} --print-file-name=libclang_rt.builtins.a OUTPUT_VARIABLE BUILTINS_LIBRARY OUTPUT_STRIP_TRAILING_WHITESPACE)
|
execute_process(COMMAND
|
||||||
|
${CMAKE_CXX_COMPILER} -Bprefix=${bprefix} --print-file-name=libclang_rt.builtins.a
|
||||||
|
OUTPUT_VARIABLE BUILTINS_LIBRARY
|
||||||
|
COMMAND_ERROR_IS_FATAL ANY
|
||||||
|
OUTPUT_STRIP_TRAILING_WHITESPACE)
|
||||||
endif()
|
endif()
|
||||||
if (BUILTINS_LIBRARY STREQUAL "libclang_rt.builtins.a")
|
if (BUILTINS_LIBRARY STREQUAL "libclang_rt.builtins.a")
|
||||||
message(FATAL_ERROR "libclang_rt.builtins had not been found")
|
message(FATAL_ERROR "libclang_rt.builtins had not been found")
|
||||||
|
@ -8,4 +8,7 @@ set (CMAKE_CXX_COMPILER_TARGET "x86_64-pc-freebsd11")
|
|||||||
set (CMAKE_ASM_COMPILER_TARGET "x86_64-pc-freebsd11")
|
set (CMAKE_ASM_COMPILER_TARGET "x86_64-pc-freebsd11")
|
||||||
set (CMAKE_SYSROOT "${CMAKE_CURRENT_LIST_DIR}/../../contrib/sysroot/freebsd-x86_64")
|
set (CMAKE_SYSROOT "${CMAKE_CURRENT_LIST_DIR}/../../contrib/sysroot/freebsd-x86_64")
|
||||||
|
|
||||||
|
# dprintf is used in a patched version of replxx
|
||||||
|
add_compile_definitions(_WITH_DPRINTF)
|
||||||
|
|
||||||
set (CMAKE_TRY_COMPILE_TARGET_TYPE STATIC_LIBRARY) # disable linkage check - it doesn't work in CMake
|
set (CMAKE_TRY_COMPILE_TARGET_TYPE STATIC_LIBRARY) # disable linkage check - it doesn't work in CMake
|
||||||
|
@ -5,7 +5,11 @@ set (DEFAULT_LIBS "-nodefaultlibs")
|
|||||||
|
|
||||||
# We need builtins from Clang's RT even without libcxx - for ubsan+int128.
|
# We need builtins from Clang's RT even without libcxx - for ubsan+int128.
|
||||||
# See https://bugs.llvm.org/show_bug.cgi?id=16404
|
# See https://bugs.llvm.org/show_bug.cgi?id=16404
|
||||||
execute_process (COMMAND ${CMAKE_CXX_COMPILER} --target=${CMAKE_CXX_COMPILER_TARGET} --print-libgcc-file-name --rtlib=compiler-rt OUTPUT_VARIABLE BUILTINS_LIBRARY OUTPUT_STRIP_TRAILING_WHITESPACE)
|
execute_process (COMMAND
|
||||||
|
${CMAKE_CXX_COMPILER} --target=${CMAKE_CXX_COMPILER_TARGET} --print-libgcc-file-name --rtlib=compiler-rt
|
||||||
|
OUTPUT_VARIABLE BUILTINS_LIBRARY
|
||||||
|
COMMAND_ERROR_IS_FATAL ANY
|
||||||
|
OUTPUT_STRIP_TRAILING_WHITESPACE)
|
||||||
|
|
||||||
# Apparently, in clang-19, the UBSan support library for C++ was moved out into ubsan_standalone_cxx.a, so we have to include both.
|
# Apparently, in clang-19, the UBSan support library for C++ was moved out into ubsan_standalone_cxx.a, so we have to include both.
|
||||||
if (SANITIZE STREQUAL undefined)
|
if (SANITIZE STREQUAL undefined)
|
||||||
|
@ -5,7 +5,11 @@ if (NOT CMAKE_CXX_COMPILER_ID MATCHES "Clang")
|
|||||||
endif ()
|
endif ()
|
||||||
|
|
||||||
# Print details to output
|
# Print details to output
|
||||||
execute_process(COMMAND ${CMAKE_CXX_COMPILER} --version OUTPUT_VARIABLE COMPILER_SELF_IDENTIFICATION OUTPUT_STRIP_TRAILING_WHITESPACE)
|
execute_process(COMMAND ${CMAKE_CXX_COMPILER} --version
|
||||||
|
OUTPUT_VARIABLE COMPILER_SELF_IDENTIFICATION
|
||||||
|
COMMAND_ERROR_IS_FATAL ANY
|
||||||
|
OUTPUT_STRIP_TRAILING_WHITESPACE
|
||||||
|
)
|
||||||
message (STATUS "Using compiler:\n${COMPILER_SELF_IDENTIFICATION}")
|
message (STATUS "Using compiler:\n${COMPILER_SELF_IDENTIFICATION}")
|
||||||
|
|
||||||
# Require minimum compiler versions
|
# Require minimum compiler versions
|
||||||
|
@ -90,7 +90,10 @@ endfunction()
|
|||||||
|
|
||||||
# Function get_cmake_properties returns list of all propreties that cmake supports
|
# Function get_cmake_properties returns list of all propreties that cmake supports
|
||||||
function(get_cmake_properties outvar)
|
function(get_cmake_properties outvar)
|
||||||
execute_process(COMMAND cmake --help-property-list OUTPUT_VARIABLE cmake_properties)
|
execute_process(COMMAND cmake --help-property-list
|
||||||
|
OUTPUT_VARIABLE cmake_properties
|
||||||
|
COMMAND_ERROR_IS_FATAL ANY
|
||||||
|
)
|
||||||
# Convert command output into a CMake list
|
# Convert command output into a CMake list
|
||||||
string(REGEX REPLACE ";" "\\\\;" cmake_properties "${cmake_properties}")
|
string(REGEX REPLACE ";" "\\\\;" cmake_properties "${cmake_properties}")
|
||||||
string(REGEX REPLACE "\n" ";" cmake_properties "${cmake_properties}")
|
string(REGEX REPLACE "\n" ";" cmake_properties "${cmake_properties}")
|
||||||
|
2
contrib/aws
vendored
2
contrib/aws
vendored
@ -1 +1 @@
|
|||||||
Subproject commit 1c2946bfcb7f1e3ae0a858de0b59d4f1a7b4ccaf
|
Subproject commit d5450d76abda556ce145ddabe7e0cc6a7644ec59
|
2
contrib/aws-crt-cpp
vendored
2
contrib/aws-crt-cpp
vendored
@ -1 +1 @@
|
|||||||
Subproject commit f532d6abc0d2b0d8b5d6fe9e7c51eaedbe4afbd0
|
Subproject commit e5aa45cacfdcda7719ead38760e7c61076f5745f
|
@ -37,7 +37,9 @@ message(STATUS "Packaging with tzdata version: ${TZDATA_VERSION}")
|
|||||||
execute_process(COMMAND
|
execute_process(COMMAND
|
||||||
bash -c "cd ${TZDIR} && find * -type f -and ! -name '*.tab' -and ! -name 'localtime' | LC_ALL=C sort | paste -sd ';' -"
|
bash -c "cd ${TZDIR} && find * -type f -and ! -name '*.tab' -and ! -name 'localtime' | LC_ALL=C sort | paste -sd ';' -"
|
||||||
OUTPUT_STRIP_TRAILING_WHITESPACE
|
OUTPUT_STRIP_TRAILING_WHITESPACE
|
||||||
OUTPUT_VARIABLE TIMEZONES)
|
OUTPUT_VARIABLE TIMEZONES
|
||||||
|
COMMAND_ERROR_IS_FATAL ANY
|
||||||
|
)
|
||||||
|
|
||||||
file(APPEND ${TIMEZONES_FILE} "// autogenerated by ClickHouse/contrib/cctz-cmake/CMakeLists.txt\n")
|
file(APPEND ${TIMEZONES_FILE} "// autogenerated by ClickHouse/contrib/cctz-cmake/CMakeLists.txt\n")
|
||||||
file(APPEND ${TIMEZONES_FILE} "#include <incbin.h>\n")
|
file(APPEND ${TIMEZONES_FILE} "#include <incbin.h>\n")
|
||||||
|
@ -359,7 +359,9 @@ else ()
|
|||||||
|
|
||||||
execute_process(
|
execute_process(
|
||||||
COMMAND mkdir -p ${PROTOC_BUILD_DIR}
|
COMMAND mkdir -p ${PROTOC_BUILD_DIR}
|
||||||
COMMAND_ECHO STDOUT)
|
COMMAND_ECHO STDOUT
|
||||||
|
COMMAND_ERROR_IS_FATAL ANY
|
||||||
|
)
|
||||||
|
|
||||||
execute_process(
|
execute_process(
|
||||||
COMMAND ${CMAKE_COMMAND}
|
COMMAND ${CMAKE_COMMAND}
|
||||||
@ -375,11 +377,15 @@ else ()
|
|||||||
"-DABSL_ENABLE_INSTALL=0"
|
"-DABSL_ENABLE_INSTALL=0"
|
||||||
"${protobuf_source_dir}"
|
"${protobuf_source_dir}"
|
||||||
WORKING_DIRECTORY "${PROTOC_BUILD_DIR}"
|
WORKING_DIRECTORY "${PROTOC_BUILD_DIR}"
|
||||||
COMMAND_ECHO STDOUT)
|
COMMAND_ECHO STDOUT
|
||||||
|
COMMAND_ERROR_IS_FATAL ANY
|
||||||
|
)
|
||||||
|
|
||||||
execute_process(
|
execute_process(
|
||||||
COMMAND ${CMAKE_COMMAND} --build "${PROTOC_BUILD_DIR}"
|
COMMAND ${CMAKE_COMMAND} --build "${PROTOC_BUILD_DIR}"
|
||||||
COMMAND_ECHO STDOUT)
|
COMMAND_ECHO STDOUT
|
||||||
|
COMMAND_ERROR_IS_FATAL ANY
|
||||||
|
)
|
||||||
endif ()
|
endif ()
|
||||||
|
|
||||||
add_executable(protoc IMPORTED GLOBAL)
|
add_executable(protoc IMPORTED GLOBAL)
|
||||||
|
@ -53,6 +53,7 @@ if (NOT CMAKE_HOST_SYSTEM_NAME STREQUAL CMAKE_SYSTEM_NAME
|
|||||||
execute_process(
|
execute_process(
|
||||||
COMMAND mkdir -p ${OPENSSL_BUILD_DIR}
|
COMMAND mkdir -p ${OPENSSL_BUILD_DIR}
|
||||||
COMMAND_ECHO STDOUT
|
COMMAND_ECHO STDOUT
|
||||||
|
COMMAND_ERROR_IS_FATAL ANY
|
||||||
)
|
)
|
||||||
|
|
||||||
if (CMAKE_HOST_SYSTEM_PROCESSOR MATCHES "amd64|x86_64")
|
if (CMAKE_HOST_SYSTEM_PROCESSOR MATCHES "amd64|x86_64")
|
||||||
@ -89,15 +90,21 @@ if (NOT CMAKE_HOST_SYSTEM_NAME STREQUAL CMAKE_SYSTEM_NAME
|
|||||||
"-DClickHouse_SOURCE_DIR=${ClickHouse_SOURCE_DIR}"
|
"-DClickHouse_SOURCE_DIR=${ClickHouse_SOURCE_DIR}"
|
||||||
"${OPENSSL_SOURCE_DIR}"
|
"${OPENSSL_SOURCE_DIR}"
|
||||||
WORKING_DIRECTORY "${OPENSSL_BUILD_DIR}"
|
WORKING_DIRECTORY "${OPENSSL_BUILD_DIR}"
|
||||||
COMMAND_ECHO STDOUT)
|
COMMAND_ECHO STDOUT
|
||||||
|
COMMAND_ERROR_IS_FATAL ANY
|
||||||
|
)
|
||||||
|
|
||||||
execute_process(
|
execute_process(
|
||||||
COMMAND ${CMAKE_COMMAND} --build "${OPENSSL_BUILD_DIR}"
|
COMMAND ${CMAKE_COMMAND} --build "${OPENSSL_BUILD_DIR}"
|
||||||
COMMAND_ECHO STDOUT)
|
COMMAND_ECHO STDOUT
|
||||||
|
COMMAND_ERROR_IS_FATAL ANY
|
||||||
|
)
|
||||||
|
|
||||||
execute_process(
|
execute_process(
|
||||||
COMMAND ${CMAKE_COMMAND} --install "${OPENSSL_BUILD_DIR}"
|
COMMAND ${CMAKE_COMMAND} --install "${OPENSSL_BUILD_DIR}"
|
||||||
COMMAND_ECHO STDOUT)
|
COMMAND_ECHO STDOUT
|
||||||
|
COMMAND_ERROR_IS_FATAL ANY
|
||||||
|
)
|
||||||
|
|
||||||
# It's not important on which file we depend, we just want to specify right order
|
# It's not important on which file we depend, we just want to specify right order
|
||||||
add_library(openssl_for_grpc STATIC IMPORTED GLOBAL)
|
add_library(openssl_for_grpc STATIC IMPORTED GLOBAL)
|
||||||
@ -110,6 +117,7 @@ if (NOT CMAKE_HOST_SYSTEM_NAME STREQUAL CMAKE_SYSTEM_NAME
|
|||||||
execute_process(
|
execute_process(
|
||||||
COMMAND mkdir -p ${GRPC_CPP_PLUGIN_BUILD_DIR}
|
COMMAND mkdir -p ${GRPC_CPP_PLUGIN_BUILD_DIR}
|
||||||
COMMAND_ECHO STDOUT
|
COMMAND_ECHO STDOUT
|
||||||
|
COMMAND_ERROR_IS_FATAL ANY
|
||||||
)
|
)
|
||||||
|
|
||||||
set(abseil_source_dir "${ClickHouse_SOURCE_DIR}/contrib/abseil-cpp")
|
set(abseil_source_dir "${ClickHouse_SOURCE_DIR}/contrib/abseil-cpp")
|
||||||
@ -140,11 +148,15 @@ if (NOT CMAKE_HOST_SYSTEM_NAME STREQUAL CMAKE_SYSTEM_NAME
|
|||||||
"-DgRPC_SSL_PROVIDER=package"
|
"-DgRPC_SSL_PROVIDER=package"
|
||||||
"${_gRPC_SOURCE_DIR}"
|
"${_gRPC_SOURCE_DIR}"
|
||||||
WORKING_DIRECTORY "${GRPC_CPP_PLUGIN_BUILD_DIR}"
|
WORKING_DIRECTORY "${GRPC_CPP_PLUGIN_BUILD_DIR}"
|
||||||
COMMAND_ECHO STDOUT)
|
COMMAND_ECHO STDOUT
|
||||||
|
COMMAND_ERROR_IS_FATAL ANY
|
||||||
|
)
|
||||||
|
|
||||||
execute_process(
|
execute_process(
|
||||||
COMMAND ${CMAKE_COMMAND} --build "${GRPC_CPP_PLUGIN_BUILD_DIR}"
|
COMMAND ${CMAKE_COMMAND} --build "${GRPC_CPP_PLUGIN_BUILD_DIR}"
|
||||||
COMMAND_ECHO STDOUT)
|
COMMAND_ECHO STDOUT
|
||||||
|
COMMAND_ERROR_IS_FATAL ANY
|
||||||
|
)
|
||||||
|
|
||||||
add_executable(grpc_cpp_plugin IMPORTED GLOBAL)
|
add_executable(grpc_cpp_plugin IMPORTED GLOBAL)
|
||||||
set_target_properties (grpc_cpp_plugin PROPERTIES IMPORTED_LOCATION "${GRPC_CPP_PLUGIN_BUILD_DIR}/grpc_cpp_plugin")
|
set_target_properties (grpc_cpp_plugin PROPERTIES IMPORTED_LOCATION "${GRPC_CPP_PLUGIN_BUILD_DIR}/grpc_cpp_plugin")
|
||||||
|
2
contrib/icu
vendored
2
contrib/icu
vendored
@ -1 +1 @@
|
|||||||
Subproject commit 7750081bda4b3bc1768ae03849ec70f67ea10625
|
Subproject commit 4216173eeeb39c1d4caaa54a68860e800412d273
|
2
contrib/libfiu
vendored
2
contrib/libfiu
vendored
@ -1 +1 @@
|
|||||||
Subproject commit b85edbde4cf974b1b40d27828a56f0505f4e2ee5
|
Subproject commit a1290d8cd3d7b4541d6c976e0a54f572ac03f2a3
|
2
contrib/replxx
vendored
2
contrib/replxx
vendored
@ -1 +1 @@
|
|||||||
Subproject commit 5d04501f93a4fb7f0bb8b73b8f614bc986f9e25b
|
Subproject commit 711c18e7f4d951255aa8b0851e5a55d5a5fb0ddb
|
2
contrib/usearch
vendored
2
contrib/usearch
vendored
@ -1 +1 @@
|
|||||||
Subproject commit e21a5778a0d4469ddaf38c94b7be0196bb701ee4
|
Subproject commit 7a8967cb442b08ca20c3dd781414378e65957d37
|
@ -47,8 +47,7 @@
|
|||||||
"docker/test/stateful": {
|
"docker/test/stateful": {
|
||||||
"name": "clickhouse/stateful-test",
|
"name": "clickhouse/stateful-test",
|
||||||
"dependent": [
|
"dependent": [
|
||||||
"docker/test/stress",
|
"docker/test/stress"
|
||||||
"docker/test/upgrade"
|
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
"docker/test/unit": {
|
"docker/test/unit": {
|
||||||
@ -59,10 +58,6 @@
|
|||||||
"name": "clickhouse/stress-test",
|
"name": "clickhouse/stress-test",
|
||||||
"dependent": []
|
"dependent": []
|
||||||
},
|
},
|
||||||
"docker/test/upgrade": {
|
|
||||||
"name": "clickhouse/upgrade-check",
|
|
||||||
"dependent": []
|
|
||||||
},
|
|
||||||
"docker/test/integration/runner": {
|
"docker/test/integration/runner": {
|
||||||
"name": "clickhouse/integration-tests-runner",
|
"name": "clickhouse/integration-tests-runner",
|
||||||
"dependent": []
|
"dependent": []
|
||||||
|
@ -34,7 +34,7 @@ RUN arch=${TARGETARCH:-amd64} \
|
|||||||
# lts / testing / prestable / etc
|
# lts / testing / prestable / etc
|
||||||
ARG REPO_CHANNEL="stable"
|
ARG REPO_CHANNEL="stable"
|
||||||
ARG REPOSITORY="https://packages.clickhouse.com/tgz/${REPO_CHANNEL}"
|
ARG REPOSITORY="https://packages.clickhouse.com/tgz/${REPO_CHANNEL}"
|
||||||
ARG VERSION="24.7.3.42"
|
ARG VERSION="24.8.2.3"
|
||||||
ARG PACKAGES="clickhouse-keeper"
|
ARG PACKAGES="clickhouse-keeper"
|
||||||
ARG DIRECT_DOWNLOAD_URLS=""
|
ARG DIRECT_DOWNLOAD_URLS=""
|
||||||
|
|
||||||
|
@ -32,7 +32,7 @@ RUN arch=${TARGETARCH:-amd64} \
|
|||||||
# lts / testing / prestable / etc
|
# lts / testing / prestable / etc
|
||||||
ARG REPO_CHANNEL="stable"
|
ARG REPO_CHANNEL="stable"
|
||||||
ARG REPOSITORY="https://packages.clickhouse.com/tgz/${REPO_CHANNEL}"
|
ARG REPOSITORY="https://packages.clickhouse.com/tgz/${REPO_CHANNEL}"
|
||||||
ARG VERSION="24.7.3.42"
|
ARG VERSION="24.8.2.3"
|
||||||
ARG PACKAGES="clickhouse-client clickhouse-server clickhouse-common-static"
|
ARG PACKAGES="clickhouse-client clickhouse-server clickhouse-common-static"
|
||||||
ARG DIRECT_DOWNLOAD_URLS=""
|
ARG DIRECT_DOWNLOAD_URLS=""
|
||||||
|
|
||||||
|
@ -28,7 +28,7 @@ RUN sed -i "s|http://archive.ubuntu.com|${apt_archive}|g" /etc/apt/sources.list
|
|||||||
|
|
||||||
ARG REPO_CHANNEL="stable"
|
ARG REPO_CHANNEL="stable"
|
||||||
ARG REPOSITORY="deb [signed-by=/usr/share/keyrings/clickhouse-keyring.gpg] https://packages.clickhouse.com/deb ${REPO_CHANNEL} main"
|
ARG REPOSITORY="deb [signed-by=/usr/share/keyrings/clickhouse-keyring.gpg] https://packages.clickhouse.com/deb ${REPO_CHANNEL} main"
|
||||||
ARG VERSION="24.7.3.42"
|
ARG VERSION="24.8.2.3"
|
||||||
ARG PACKAGES="clickhouse-client clickhouse-server clickhouse-common-static"
|
ARG PACKAGES="clickhouse-client clickhouse-server clickhouse-common-static"
|
||||||
|
|
||||||
#docker-official-library:off
|
#docker-official-library:off
|
||||||
|
@ -93,6 +93,3 @@ RUN ln -snf /usr/share/zoneinfo/$TZ /etc/localtime && echo $TZ > /etc/timezone
|
|||||||
ENV COMMIT_SHA=''
|
ENV COMMIT_SHA=''
|
||||||
ENV PULL_REQUEST_NUMBER=''
|
ENV PULL_REQUEST_NUMBER=''
|
||||||
ENV COPY_CLICKHOUSE_BINARY_TO_OUTPUT=0
|
ENV COPY_CLICKHOUSE_BINARY_TO_OUTPUT=0
|
||||||
|
|
||||||
COPY run.sh /
|
|
||||||
CMD ["/bin/bash", "/run.sh"]
|
|
||||||
|
@ -112,3 +112,5 @@ wadllib==1.3.6
|
|||||||
websocket-client==0.59.0
|
websocket-client==0.59.0
|
||||||
wheel==0.37.1
|
wheel==0.37.1
|
||||||
zipp==1.0.0
|
zipp==1.0.0
|
||||||
|
deltalake==0.16.0
|
||||||
|
|
||||||
|
@ -13,7 +13,8 @@ entry="/usr/share/clickhouse-test/performance/scripts/entrypoint.sh"
|
|||||||
# https://www.kernel.org/doc/Documentation/filesystems/tmpfs.txt
|
# https://www.kernel.org/doc/Documentation/filesystems/tmpfs.txt
|
||||||
# Double-escaped backslashes are a tribute to the engineering wonder of docker --
|
# Double-escaped backslashes are a tribute to the engineering wonder of docker --
|
||||||
# it gives '/bin/sh: 1: [bash,: not found' otherwise.
|
# it gives '/bin/sh: 1: [bash,: not found' otherwise.
|
||||||
numactl --hardware
|
echo > compare.log
|
||||||
|
numactl --hardware | tee -a compare.log
|
||||||
node=$(( RANDOM % $(numactl --hardware | sed -n 's/^.*available:\(.*\)nodes.*$/\1/p') ));
|
node=$(( RANDOM % $(numactl --hardware | sed -n 's/^.*available:\(.*\)nodes.*$/\1/p') ));
|
||||||
echo Will bind to NUMA node $node;
|
echo Will bind to NUMA node $node | tee -a compare.log
|
||||||
numactl --cpunodebind=$node --membind=$node $entry
|
numactl --cpunodebind=$node --membind=$node $entry
|
||||||
|
@ -35,12 +35,8 @@ RUN mkdir -p /tmp/clickhouse-odbc-tmp \
|
|||||||
|
|
||||||
|
|
||||||
ENV TZ=Europe/Amsterdam
|
ENV TZ=Europe/Amsterdam
|
||||||
ENV MAX_RUN_TIME=9000
|
|
||||||
RUN ln -snf /usr/share/zoneinfo/$TZ /etc/localtime && echo $TZ > /etc/timezone
|
RUN ln -snf /usr/share/zoneinfo/$TZ /etc/localtime && echo $TZ > /etc/timezone
|
||||||
|
|
||||||
ARG sqllogic_test_repo="https://github.com/gregrahn/sqllogictest.git"
|
ARG sqllogic_test_repo="https://github.com/gregrahn/sqllogictest.git"
|
||||||
|
|
||||||
RUN git clone --recursive ${sqllogic_test_repo}
|
RUN git clone --recursive ${sqllogic_test_repo}
|
||||||
|
|
||||||
COPY run.sh /
|
|
||||||
CMD ["/bin/bash", "/run.sh"]
|
|
||||||
|
@ -22,7 +22,6 @@ ARG sqltest_repo="https://github.com/elliotchance/sqltest/"
|
|||||||
RUN git clone ${sqltest_repo}
|
RUN git clone ${sqltest_repo}
|
||||||
|
|
||||||
ENV TZ=UTC
|
ENV TZ=UTC
|
||||||
ENV MAX_RUN_TIME=900
|
|
||||||
RUN ln -snf /usr/share/zoneinfo/$TZ /etc/localtime && echo $TZ > /etc/timezone
|
RUN ln -snf /usr/share/zoneinfo/$TZ /etc/localtime && echo $TZ > /etc/timezone
|
||||||
|
|
||||||
COPY run.sh /
|
COPY run.sh /
|
||||||
|
@ -10,7 +10,3 @@ RUN apt-get update -y \
|
|||||||
npm \
|
npm \
|
||||||
&& apt-get clean \
|
&& apt-get clean \
|
||||||
&& rm -rf /var/lib/apt/lists/* /var/cache/debconf /tmp/*
|
&& rm -rf /var/lib/apt/lists/* /var/cache/debconf /tmp/*
|
||||||
|
|
||||||
COPY create.sql /
|
|
||||||
COPY run.sh /
|
|
||||||
CMD ["/bin/bash", "/run.sh"]
|
|
||||||
|
@ -1 +0,0 @@
|
|||||||
../stateless/setup_minio.sh
|
|
@ -65,7 +65,6 @@ ENV TZ=Europe/Amsterdam
|
|||||||
RUN ln -snf /usr/share/zoneinfo/$TZ /etc/localtime && echo $TZ > /etc/timezone
|
RUN ln -snf /usr/share/zoneinfo/$TZ /etc/localtime && echo $TZ > /etc/timezone
|
||||||
|
|
||||||
ENV NUM_TRIES=1
|
ENV NUM_TRIES=1
|
||||||
ENV MAX_RUN_TIME=0
|
|
||||||
|
|
||||||
# Unrelated to vars in setup_minio.sh, but should be the same there
|
# Unrelated to vars in setup_minio.sh, but should be the same there
|
||||||
# to have the same binaries for local running scenario
|
# to have the same binaries for local running scenario
|
||||||
@ -86,18 +85,6 @@ RUN curl -L --no-verbose -O 'https://archive.apache.org/dist/hadoop/common/hadoo
|
|||||||
ENV MINIO_ROOT_USER="clickhouse"
|
ENV MINIO_ROOT_USER="clickhouse"
|
||||||
ENV MINIO_ROOT_PASSWORD="clickhouse"
|
ENV MINIO_ROOT_PASSWORD="clickhouse"
|
||||||
ENV EXPORT_S3_STORAGE_POLICIES=1
|
ENV EXPORT_S3_STORAGE_POLICIES=1
|
||||||
ENV CLICKHOUSE_GRPC_CLIENT="/usr/share/clickhouse-utils/grpc-client/clickhouse-grpc-client.py"
|
|
||||||
|
|
||||||
RUN npm install -g azurite@3.30.0 \
|
RUN npm install -g azurite@3.30.0 \
|
||||||
&& npm install -g tslib && npm install -g node
|
&& npm install -g tslib && npm install -g node
|
||||||
|
|
||||||
COPY run.sh /
|
|
||||||
COPY setup_minio.sh /
|
|
||||||
COPY setup_hdfs_minicluster.sh /
|
|
||||||
COPY attach_gdb.lib /
|
|
||||||
COPY utils.lib /
|
|
||||||
|
|
||||||
# We store stress_tests.lib in stateless image to avoid duplication of this file in stress and upgrade tests
|
|
||||||
COPY stress_tests.lib /
|
|
||||||
|
|
||||||
CMD ["/bin/bash", "/run.sh"]
|
|
||||||
|
@ -22,8 +22,5 @@ RUN apt-get update -y \
|
|||||||
&& apt-get clean \
|
&& apt-get clean \
|
||||||
&& rm -rf /var/lib/apt/lists/* /var/cache/debconf /tmp/*
|
&& rm -rf /var/lib/apt/lists/* /var/cache/debconf /tmp/*
|
||||||
|
|
||||||
COPY run.sh /
|
|
||||||
|
|
||||||
ENV EXPORT_S3_STORAGE_POLICIES=1
|
ENV EXPORT_S3_STORAGE_POLICIES=1
|
||||||
|
|
||||||
CMD ["/bin/bash", "/run.sh"]
|
|
||||||
|
@ -1,29 +0,0 @@
|
|||||||
# rebuild in #33610
|
|
||||||
# docker build -t clickhouse/upgrade-check .
|
|
||||||
ARG FROM_TAG=latest
|
|
||||||
FROM clickhouse/stateful-test:$FROM_TAG
|
|
||||||
|
|
||||||
RUN apt-get update -y \
|
|
||||||
&& env DEBIAN_FRONTEND=noninteractive \
|
|
||||||
apt-get install --yes --no-install-recommends \
|
|
||||||
bash \
|
|
||||||
tzdata \
|
|
||||||
parallel \
|
|
||||||
expect \
|
|
||||||
python3 \
|
|
||||||
python3-lxml \
|
|
||||||
python3-termcolor \
|
|
||||||
python3-requests \
|
|
||||||
curl \
|
|
||||||
sudo \
|
|
||||||
openssl \
|
|
||||||
netcat-openbsd \
|
|
||||||
brotli \
|
|
||||||
&& apt-get clean \
|
|
||||||
&& rm -rf /var/lib/apt/lists/* /var/cache/debconf /tmp/*
|
|
||||||
|
|
||||||
COPY run.sh /
|
|
||||||
|
|
||||||
ENV EXPORT_S3_STORAGE_POLICIES=1
|
|
||||||
|
|
||||||
CMD ["/bin/bash", "/run.sh"]
|
|
@ -56,7 +56,5 @@ RUN apt-get update \
|
|||||||
&& apt-get clean \
|
&& apt-get clean \
|
||||||
&& rm -rf /var/lib/apt/lists/* /var/cache/debconf /tmp/*
|
&& rm -rf /var/lib/apt/lists/* /var/cache/debconf /tmp/*
|
||||||
|
|
||||||
COPY process_functional_tests_result.py /
|
|
||||||
|
|
||||||
COPY --from=clickhouse/cctools:0d6b90a7a490 /opt/gdb /opt/gdb
|
COPY --from=clickhouse/cctools:0d6b90a7a490 /opt/gdb /opt/gdb
|
||||||
ENV PATH="/opt/gdb/bin:${PATH}"
|
ENV PATH="/opt/gdb/bin:${PATH}"
|
||||||
|
16
docs/changelogs/v24.3.8.13-lts.md
Normal file
16
docs/changelogs/v24.3.8.13-lts.md
Normal file
@ -0,0 +1,16 @@
|
|||||||
|
---
|
||||||
|
sidebar_position: 1
|
||||||
|
sidebar_label: 2024
|
||||||
|
---
|
||||||
|
|
||||||
|
# 2024 Changelog
|
||||||
|
|
||||||
|
### ClickHouse release v24.3.8.13-lts (84bbfc70f5d) FIXME as compared to v24.3.7.30-lts (c8a28cf4331)
|
||||||
|
|
||||||
|
#### Bug Fix (user-visible misbehavior in an official stable release)
|
||||||
|
* Backported in [#68562](https://github.com/ClickHouse/ClickHouse/issues/68562): Fix indexHint function case found by fuzzer. [#66286](https://github.com/ClickHouse/ClickHouse/pull/66286) ([Anton Popov](https://github.com/CurtizJ)).
|
||||||
|
* Backported in [#68114](https://github.com/ClickHouse/ClickHouse/issues/68114): Fix possible PARAMETER_OUT_OF_BOUND error during reading variant subcolumn. [#66659](https://github.com/ClickHouse/ClickHouse/pull/66659) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||||
|
* Backported in [#67989](https://github.com/ClickHouse/ClickHouse/issues/67989): Validate experimental/suspicious data types in ALTER ADD/MODIFY COLUMN. [#67911](https://github.com/ClickHouse/ClickHouse/pull/67911) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||||
|
* Backported in [#68335](https://github.com/ClickHouse/ClickHouse/issues/68335): Try fix postgres crash when query is cancelled. [#68288](https://github.com/ClickHouse/ClickHouse/pull/68288) ([Kseniia Sumarokova](https://github.com/kssenii)).
|
||||||
|
* Backported in [#68392](https://github.com/ClickHouse/ClickHouse/issues/68392): Fix missing sync replica mode in query `SYSTEM SYNC REPLICA`. [#68326](https://github.com/ClickHouse/ClickHouse/pull/68326) ([Duc Canh Le](https://github.com/canhld94)).
|
||||||
|
|
71
docs/changelogs/v24.5.5.41-stable.md
Normal file
71
docs/changelogs/v24.5.5.41-stable.md
Normal file
@ -0,0 +1,71 @@
|
|||||||
|
---
|
||||||
|
sidebar_position: 1
|
||||||
|
sidebar_label: 2024
|
||||||
|
---
|
||||||
|
|
||||||
|
# 2024 Changelog
|
||||||
|
|
||||||
|
### ClickHouse release v24.5.5.41-stable (441d4a6ebe3) FIXME as compared to v24.5.4.49-stable (63b760955a0)
|
||||||
|
|
||||||
|
#### Improvement
|
||||||
|
* Backported in [#66768](https://github.com/ClickHouse/ClickHouse/issues/66768): Make allow_experimental_analyzer be controlled by the initiator for distributed queries. This ensures compatibility and correctness during operations in mixed version clusters. [#65777](https://github.com/ClickHouse/ClickHouse/pull/65777) ([Nikita Mikhaylov](https://github.com/nikitamikhaylov)).
|
||||||
|
|
||||||
|
#### Bug Fix (user-visible misbehavior in an official stable release)
|
||||||
|
* Backported in [#65350](https://github.com/ClickHouse/ClickHouse/issues/65350): Fix possible abort on uncaught exception in ~WriteBufferFromFileDescriptor in StatusFile. [#64206](https://github.com/ClickHouse/ClickHouse/pull/64206) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||||
|
* Backported in [#65621](https://github.com/ClickHouse/ClickHouse/issues/65621): Fix `Cannot find column` in distributed query with `ARRAY JOIN` by `Nested` column. Fixes [#64755](https://github.com/ClickHouse/ClickHouse/issues/64755). [#64801](https://github.com/ClickHouse/ClickHouse/pull/64801) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
|
||||||
|
* Backported in [#67902](https://github.com/ClickHouse/ClickHouse/issues/67902): Fixing the `Not-ready Set` error after the `PREWHERE` optimization for StorageMerge. [#65057](https://github.com/ClickHouse/ClickHouse/pull/65057) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
|
||||||
|
* Backported in [#66884](https://github.com/ClickHouse/ClickHouse/issues/66884): Fix unexpeced size of low cardinality column in function calls. [#65298](https://github.com/ClickHouse/ClickHouse/pull/65298) ([Raúl Marín](https://github.com/Algunenano)).
|
||||||
|
* Backported in [#65933](https://github.com/ClickHouse/ClickHouse/issues/65933): For queries that read from `PostgreSQL`, cancel the internal `PostgreSQL` query if the ClickHouse query is finished. Otherwise, `ClickHouse` query cannot be canceled until the internal `PostgreSQL` query is finished. [#65771](https://github.com/ClickHouse/ClickHouse/pull/65771) ([Maksim Kita](https://github.com/kitaisreal)).
|
||||||
|
* Backported in [#66301](https://github.com/ClickHouse/ClickHouse/issues/66301): Better handling of join conditions involving `IS NULL` checks (for example `ON (a = b AND (a IS NOT NULL) AND (b IS NOT NULL) ) OR ( (a IS NULL) AND (b IS NULL) )` is rewritten to `ON a <=> b`), fix incorrect optimization when condition other then `IS NULL` are present. [#65835](https://github.com/ClickHouse/ClickHouse/pull/65835) ([vdimir](https://github.com/vdimir)).
|
||||||
|
* Backported in [#66328](https://github.com/ClickHouse/ClickHouse/issues/66328): Add missing settings `input_format_csv_skip_first_lines/input_format_tsv_skip_first_lines/input_format_csv_try_infer_numbers_from_strings/input_format_csv_try_infer_strings_from_quoted_tuples` in schema inference cache because they can change the resulting schema. It prevents from incorrect result of schema inference with these settings changed. [#65980](https://github.com/ClickHouse/ClickHouse/pull/65980) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||||
|
* Backported in [#68252](https://github.com/ClickHouse/ClickHouse/issues/68252): Fixed `Not-ready Set` in some system tables when filtering using subqueries. [#66018](https://github.com/ClickHouse/ClickHouse/pull/66018) ([Michael Kolupaev](https://github.com/al13n321)).
|
||||||
|
* Backported in [#66155](https://github.com/ClickHouse/ClickHouse/issues/66155): Fixed buffer overflow bug in `unbin`/`unhex` implementation. [#66106](https://github.com/ClickHouse/ClickHouse/pull/66106) ([Nikita Taranov](https://github.com/nickitat)).
|
||||||
|
* Backported in [#66454](https://github.com/ClickHouse/ClickHouse/issues/66454): Fixed a bug in ZooKeeper client: a session could get stuck in unusable state after receiving a hardware error from ZooKeeper. For example, this might happen due to "soft memory limit" in ClickHouse Keeper. [#66140](https://github.com/ClickHouse/ClickHouse/pull/66140) ([Alexander Tokmakov](https://github.com/tavplubix)).
|
||||||
|
* Backported in [#66226](https://github.com/ClickHouse/ClickHouse/issues/66226): Fix issue in SumIfToCountIfVisitor and signed integers. [#66146](https://github.com/ClickHouse/ClickHouse/pull/66146) ([Raúl Marín](https://github.com/Algunenano)).
|
||||||
|
* Backported in [#66680](https://github.com/ClickHouse/ClickHouse/issues/66680): Fix handling limit for `system.numbers_mt` when no index can be used. [#66231](https://github.com/ClickHouse/ClickHouse/pull/66231) ([János Benjamin Antal](https://github.com/antaljanosbenjamin)).
|
||||||
|
* Backported in [#66604](https://github.com/ClickHouse/ClickHouse/issues/66604): Fixed how the ClickHouse server detects the maximum number of usable CPU cores as specified by cgroups v2 if the server runs in a container such as Docker. In more detail, containers often run their process in the root cgroup which has an empty name. In that case, ClickHouse ignored the CPU limits set by cgroups v2. [#66237](https://github.com/ClickHouse/ClickHouse/pull/66237) ([filimonov](https://github.com/filimonov)).
|
||||||
|
* Backported in [#66360](https://github.com/ClickHouse/ClickHouse/issues/66360): Fix the `Not-ready set` error when a subquery with `IN` is used in the constraint. [#66261](https://github.com/ClickHouse/ClickHouse/pull/66261) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
|
||||||
|
* Backported in [#68064](https://github.com/ClickHouse/ClickHouse/issues/68064): Fix boolean literals in query sent to external database (for engines like `PostgreSQL`). [#66282](https://github.com/ClickHouse/ClickHouse/pull/66282) ([vdimir](https://github.com/vdimir)).
|
||||||
|
* Backported in [#68158](https://github.com/ClickHouse/ClickHouse/issues/68158): Fix cluster() for inter-server secret (preserve initial user as before). [#66364](https://github.com/ClickHouse/ClickHouse/pull/66364) ([Azat Khuzhin](https://github.com/azat)).
|
||||||
|
* Backported in [#66972](https://github.com/ClickHouse/ClickHouse/issues/66972): Fix `Column identifier is already registered` error with `group_by_use_nulls=true` and new analyzer. [#66400](https://github.com/ClickHouse/ClickHouse/pull/66400) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
|
||||||
|
* Backported in [#66691](https://github.com/ClickHouse/ClickHouse/issues/66691): Fix the VALID UNTIL clause in the user definition resetting after a restart. Closes [#66405](https://github.com/ClickHouse/ClickHouse/issues/66405). [#66409](https://github.com/ClickHouse/ClickHouse/pull/66409) ([Nikolay Degterinsky](https://github.com/evillique)).
|
||||||
|
* Backported in [#66969](https://github.com/ClickHouse/ClickHouse/issues/66969): Fix `Cannot find column` error for queries with constant expression in `GROUP BY` key and new analyzer enabled. [#66433](https://github.com/ClickHouse/ClickHouse/pull/66433) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
|
||||||
|
* Backported in [#66720](https://github.com/ClickHouse/ClickHouse/issues/66720): Correctly track memory for `Allocator::realloc`. [#66548](https://github.com/ClickHouse/ClickHouse/pull/66548) ([Antonio Andelic](https://github.com/antonio2368)).
|
||||||
|
* Backported in [#66951](https://github.com/ClickHouse/ClickHouse/issues/66951): Fix an invalid result for queries with `WINDOW`. This could happen when `PARTITION` columns have sparse serialization and window functions are executed in parallel. [#66579](https://github.com/ClickHouse/ClickHouse/pull/66579) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
|
||||||
|
* Backported in [#66757](https://github.com/ClickHouse/ClickHouse/issues/66757): Fix `Unknown identifier` and `Column is not under aggregate function` errors for queries with the expression `(column IS NULL).` The bug was triggered by [#65088](https://github.com/ClickHouse/ClickHouse/issues/65088), with the disabled analyzer only. [#66654](https://github.com/ClickHouse/ClickHouse/pull/66654) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
|
||||||
|
* Backported in [#66948](https://github.com/ClickHouse/ClickHouse/issues/66948): Fix `Method getResultType is not supported for QUERY query node` error when scalar subquery was used as the first argument of IN (with new analyzer). [#66655](https://github.com/ClickHouse/ClickHouse/pull/66655) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
|
||||||
|
* Backported in [#68115](https://github.com/ClickHouse/ClickHouse/issues/68115): Fix possible PARAMETER_OUT_OF_BOUND error during reading variant subcolumn. [#66659](https://github.com/ClickHouse/ClickHouse/pull/66659) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||||
|
* Backported in [#67633](https://github.com/ClickHouse/ClickHouse/issues/67633): Fix for occasional deadlock in Context::getDDLWorker. [#66843](https://github.com/ClickHouse/ClickHouse/pull/66843) ([Alexander Gololobov](https://github.com/davenger)).
|
||||||
|
* Backported in [#67481](https://github.com/ClickHouse/ClickHouse/issues/67481): In rare cases ClickHouse could consider parts as broken because of some unexpected projections on disk. Now it's fixed. [#66898](https://github.com/ClickHouse/ClickHouse/pull/66898) ([alesapin](https://github.com/alesapin)).
|
||||||
|
* Backported in [#67814](https://github.com/ClickHouse/ClickHouse/issues/67814): Only relevant to the experimental Variant data type. Fix crash with Variant + AggregateFunction type. [#67122](https://github.com/ClickHouse/ClickHouse/pull/67122) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||||
|
* Backported in [#67197](https://github.com/ClickHouse/ClickHouse/issues/67197): TRUNCATE DATABASE used to stop replication as if it was a DROP DATABASE query, it's fixed. [#67129](https://github.com/ClickHouse/ClickHouse/pull/67129) ([Alexander Tokmakov](https://github.com/tavplubix)).
|
||||||
|
* Backported in [#67379](https://github.com/ClickHouse/ClickHouse/issues/67379): Fix error `Cannot convert column because it is non constant in source stream but must be constant in result.` for a query that reads from the `Merge` table over the `Distriburted` table with one shard. [#67146](https://github.com/ClickHouse/ClickHouse/pull/67146) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
|
||||||
|
* Backported in [#67501](https://github.com/ClickHouse/ClickHouse/issues/67501): Fix crash in DistributedAsyncInsert when connection is empty. [#67219](https://github.com/ClickHouse/ClickHouse/pull/67219) ([Pablo Marcos](https://github.com/pamarcos)).
|
||||||
|
* Backported in [#67886](https://github.com/ClickHouse/ClickHouse/issues/67886): Correctly parse file name/URI containing `::` if it's not an archive. [#67433](https://github.com/ClickHouse/ClickHouse/pull/67433) ([Antonio Andelic](https://github.com/antonio2368)).
|
||||||
|
* Backported in [#67576](https://github.com/ClickHouse/ClickHouse/issues/67576): Fix execution of nested short-circuit functions. [#67520](https://github.com/ClickHouse/ClickHouse/pull/67520) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||||
|
* Backported in [#67850](https://github.com/ClickHouse/ClickHouse/issues/67850): Fixes [#66026](https://github.com/ClickHouse/ClickHouse/issues/66026). Avoid unresolved table function arguments traversal in `ReplaceTableNodeToDummyVisitor`. [#67522](https://github.com/ClickHouse/ClickHouse/pull/67522) ([Dmitry Novik](https://github.com/novikd)).
|
||||||
|
* Backported in [#68272](https://github.com/ClickHouse/ClickHouse/issues/68272): Fix inserting into stream like engines (Kafka, RabbitMQ, NATS) through HTTP interface. [#67554](https://github.com/ClickHouse/ClickHouse/pull/67554) ([János Benjamin Antal](https://github.com/antaljanosbenjamin)).
|
||||||
|
* Backported in [#67807](https://github.com/ClickHouse/ClickHouse/issues/67807): Fix reloading SQL UDFs with UNION. Previously, restarting the server could make UDF invalid. [#67665](https://github.com/ClickHouse/ClickHouse/pull/67665) ([Antonio Andelic](https://github.com/antonio2368)).
|
||||||
|
* Backported in [#67836](https://github.com/ClickHouse/ClickHouse/issues/67836): Fix potential stack overflow in `JSONMergePatch` function. Renamed this function from `jsonMergePatch` to `JSONMergePatch` because the previous name was wrong. The previous name is still kept for compatibility. Improved diagnostic of errors in the function. This closes [#67304](https://github.com/ClickHouse/ClickHouse/issues/67304). [#67756](https://github.com/ClickHouse/ClickHouse/pull/67756) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||||
|
* Backported in [#67991](https://github.com/ClickHouse/ClickHouse/issues/67991): Validate experimental/suspicious data types in ALTER ADD/MODIFY COLUMN. [#67911](https://github.com/ClickHouse/ClickHouse/pull/67911) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||||
|
* Backported in [#68207](https://github.com/ClickHouse/ClickHouse/issues/68207): Fix wrong `count()` result when there is non-deterministic function in predicate. [#67922](https://github.com/ClickHouse/ClickHouse/pull/67922) ([János Benjamin Antal](https://github.com/antaljanosbenjamin)).
|
||||||
|
* Backported in [#68091](https://github.com/ClickHouse/ClickHouse/issues/68091): Fixed the calculation of the maximum thread soft limit in containerized environments where the usable CPU count is limited. [#67963](https://github.com/ClickHouse/ClickHouse/pull/67963) ([Robert Schulze](https://github.com/rschu1ze)).
|
||||||
|
* Backported in [#68122](https://github.com/ClickHouse/ClickHouse/issues/68122): Fixed skipping of untouched parts in mutations with new analyzer. Previously with enabled analyzer data in part could be rewritten by mutation even if mutation doesn't affect this part according to predicate. [#68052](https://github.com/ClickHouse/ClickHouse/pull/68052) ([Anton Popov](https://github.com/CurtizJ)).
|
||||||
|
* Backported in [#68171](https://github.com/ClickHouse/ClickHouse/issues/68171): Removes an incorrect optimization to remove sorting in subqueries that use `OFFSET`. Fixes [#67906](https://github.com/ClickHouse/ClickHouse/issues/67906). [#68099](https://github.com/ClickHouse/ClickHouse/pull/68099) ([Graham Campbell](https://github.com/GrahamCampbell)).
|
||||||
|
* Backported in [#68337](https://github.com/ClickHouse/ClickHouse/issues/68337): Try fix postgres crash when query is cancelled. [#68288](https://github.com/ClickHouse/ClickHouse/pull/68288) ([Kseniia Sumarokova](https://github.com/kssenii)).
|
||||||
|
* Backported in [#68667](https://github.com/ClickHouse/ClickHouse/issues/68667): Fix `LOGICAL_ERROR`s when functions `sipHash64Keyed`, `sipHash128Keyed`, or `sipHash128ReferenceKeyed` are applied to empty arrays or tuples. [#68630](https://github.com/ClickHouse/ClickHouse/pull/68630) ([Robert Schulze](https://github.com/rschu1ze)).
|
||||||
|
|
||||||
|
#### NOT FOR CHANGELOG / INSIGNIFICANT
|
||||||
|
|
||||||
|
* Backported in [#66387](https://github.com/ClickHouse/ClickHouse/issues/66387): Disable broken cases from 02911_join_on_nullsafe_optimization. [#66310](https://github.com/ClickHouse/ClickHouse/pull/66310) ([vdimir](https://github.com/vdimir)).
|
||||||
|
* Backported in [#66426](https://github.com/ClickHouse/ClickHouse/issues/66426): Ignore subquery for IN in DDLLoadingDependencyVisitor. [#66395](https://github.com/ClickHouse/ClickHouse/pull/66395) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
|
||||||
|
* Backported in [#66544](https://github.com/ClickHouse/ClickHouse/issues/66544): Add additional log masking in CI. [#66523](https://github.com/ClickHouse/ClickHouse/pull/66523) ([Raúl Marín](https://github.com/Algunenano)).
|
||||||
|
* Backported in [#66859](https://github.com/ClickHouse/ClickHouse/issues/66859): Fix data race in S3::ClientCache. [#66644](https://github.com/ClickHouse/ClickHouse/pull/66644) ([Konstantin Morozov](https://github.com/k-morozov)).
|
||||||
|
* Backported in [#66875](https://github.com/ClickHouse/ClickHouse/issues/66875): Support one more case in JOIN ON ... IS NULL. [#66725](https://github.com/ClickHouse/ClickHouse/pull/66725) ([vdimir](https://github.com/vdimir)).
|
||||||
|
* Backported in [#67059](https://github.com/ClickHouse/ClickHouse/issues/67059): Increase asio pool size in case the server is tiny. [#66761](https://github.com/ClickHouse/ClickHouse/pull/66761) ([alesapin](https://github.com/alesapin)).
|
||||||
|
* Backported in [#66945](https://github.com/ClickHouse/ClickHouse/issues/66945): Small fix in realloc memory tracking. [#66820](https://github.com/ClickHouse/ClickHouse/pull/66820) ([Antonio Andelic](https://github.com/antonio2368)).
|
||||||
|
* Backported in [#67252](https://github.com/ClickHouse/ClickHouse/issues/67252): Followup [#66725](https://github.com/ClickHouse/ClickHouse/issues/66725). [#66869](https://github.com/ClickHouse/ClickHouse/pull/66869) ([vdimir](https://github.com/vdimir)).
|
||||||
|
* Backported in [#67412](https://github.com/ClickHouse/ClickHouse/issues/67412): CI: Fix build results for release branches. [#67402](https://github.com/ClickHouse/ClickHouse/pull/67402) ([Max K.](https://github.com/maxknv)).
|
||||||
|
* Update version after release. [#67862](https://github.com/ClickHouse/ClickHouse/pull/67862) ([robot-clickhouse](https://github.com/robot-clickhouse)).
|
||||||
|
* Backported in [#68077](https://github.com/ClickHouse/ClickHouse/issues/68077): Add an explicit error for `ALTER MODIFY SQL SECURITY` on non-view tables. [#67953](https://github.com/ClickHouse/ClickHouse/pull/67953) ([pufit](https://github.com/pufit)).
|
||||||
|
|
33
docs/changelogs/v24.5.6.45-stable.md
Normal file
33
docs/changelogs/v24.5.6.45-stable.md
Normal file
@ -0,0 +1,33 @@
|
|||||||
|
---
|
||||||
|
sidebar_position: 1
|
||||||
|
sidebar_label: 2024
|
||||||
|
---
|
||||||
|
|
||||||
|
# 2024 Changelog
|
||||||
|
|
||||||
|
### ClickHouse release v24.5.6.45-stable (bdca8604c29) FIXME as compared to v24.5.5.78-stable (0138248cb62)
|
||||||
|
|
||||||
|
#### Bug Fix (user-visible misbehavior in an official stable release)
|
||||||
|
* Backported in [#67902](https://github.com/ClickHouse/ClickHouse/issues/67902): Fixing the `Not-ready Set` error after the `PREWHERE` optimization for StorageMerge. [#65057](https://github.com/ClickHouse/ClickHouse/pull/65057) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
|
||||||
|
* Backported in [#68252](https://github.com/ClickHouse/ClickHouse/issues/68252): Fixed `Not-ready Set` in some system tables when filtering using subqueries. [#66018](https://github.com/ClickHouse/ClickHouse/pull/66018) ([Michael Kolupaev](https://github.com/al13n321)).
|
||||||
|
* Backported in [#68064](https://github.com/ClickHouse/ClickHouse/issues/68064): Fix boolean literals in query sent to external database (for engines like `PostgreSQL`). [#66282](https://github.com/ClickHouse/ClickHouse/pull/66282) ([vdimir](https://github.com/vdimir)).
|
||||||
|
* Backported in [#68158](https://github.com/ClickHouse/ClickHouse/issues/68158): Fix cluster() for inter-server secret (preserve initial user as before). [#66364](https://github.com/ClickHouse/ClickHouse/pull/66364) ([Azat Khuzhin](https://github.com/azat)).
|
||||||
|
* Backported in [#68115](https://github.com/ClickHouse/ClickHouse/issues/68115): Fix possible PARAMETER_OUT_OF_BOUND error during reading variant subcolumn. [#66659](https://github.com/ClickHouse/ClickHouse/pull/66659) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||||
|
* Backported in [#67886](https://github.com/ClickHouse/ClickHouse/issues/67886): Correctly parse file name/URI containing `::` if it's not an archive. [#67433](https://github.com/ClickHouse/ClickHouse/pull/67433) ([Antonio Andelic](https://github.com/antonio2368)).
|
||||||
|
* Backported in [#68272](https://github.com/ClickHouse/ClickHouse/issues/68272): Fix inserting into stream like engines (Kafka, RabbitMQ, NATS) through HTTP interface. [#67554](https://github.com/ClickHouse/ClickHouse/pull/67554) ([János Benjamin Antal](https://github.com/antaljanosbenjamin)).
|
||||||
|
* Backported in [#67807](https://github.com/ClickHouse/ClickHouse/issues/67807): Fix reloading SQL UDFs with UNION. Previously, restarting the server could make UDF invalid. [#67665](https://github.com/ClickHouse/ClickHouse/pull/67665) ([Antonio Andelic](https://github.com/antonio2368)).
|
||||||
|
* Backported in [#67836](https://github.com/ClickHouse/ClickHouse/issues/67836): Fix potential stack overflow in `JSONMergePatch` function. Renamed this function from `jsonMergePatch` to `JSONMergePatch` because the previous name was wrong. The previous name is still kept for compatibility. Improved diagnostic of errors in the function. This closes [#67304](https://github.com/ClickHouse/ClickHouse/issues/67304). [#67756](https://github.com/ClickHouse/ClickHouse/pull/67756) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||||
|
* Backported in [#67991](https://github.com/ClickHouse/ClickHouse/issues/67991): Validate experimental/suspicious data types in ALTER ADD/MODIFY COLUMN. [#67911](https://github.com/ClickHouse/ClickHouse/pull/67911) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||||
|
* Backported in [#68207](https://github.com/ClickHouse/ClickHouse/issues/68207): Fix wrong `count()` result when there is non-deterministic function in predicate. [#67922](https://github.com/ClickHouse/ClickHouse/pull/67922) ([János Benjamin Antal](https://github.com/antaljanosbenjamin)).
|
||||||
|
* Backported in [#68091](https://github.com/ClickHouse/ClickHouse/issues/68091): Fixed the calculation of the maximum thread soft limit in containerized environments where the usable CPU count is limited. [#67963](https://github.com/ClickHouse/ClickHouse/pull/67963) ([Robert Schulze](https://github.com/rschu1ze)).
|
||||||
|
* Backported in [#68122](https://github.com/ClickHouse/ClickHouse/issues/68122): Fixed skipping of untouched parts in mutations with new analyzer. Previously with enabled analyzer data in part could be rewritten by mutation even if mutation doesn't affect this part according to predicate. [#68052](https://github.com/ClickHouse/ClickHouse/pull/68052) ([Anton Popov](https://github.com/CurtizJ)).
|
||||||
|
* Backported in [#68171](https://github.com/ClickHouse/ClickHouse/issues/68171): Removes an incorrect optimization to remove sorting in subqueries that use `OFFSET`. Fixes [#67906](https://github.com/ClickHouse/ClickHouse/issues/67906). [#68099](https://github.com/ClickHouse/ClickHouse/pull/68099) ([Graham Campbell](https://github.com/GrahamCampbell)).
|
||||||
|
* Backported in [#68337](https://github.com/ClickHouse/ClickHouse/issues/68337): Try fix postgres crash when query is cancelled. [#68288](https://github.com/ClickHouse/ClickHouse/pull/68288) ([Kseniia Sumarokova](https://github.com/kssenii)).
|
||||||
|
* Backported in [#68667](https://github.com/ClickHouse/ClickHouse/issues/68667): Fix `LOGICAL_ERROR`s when functions `sipHash64Keyed`, `sipHash128Keyed`, or `sipHash128ReferenceKeyed` are applied to empty arrays or tuples. [#68630](https://github.com/ClickHouse/ClickHouse/pull/68630) ([Robert Schulze](https://github.com/rschu1ze)).
|
||||||
|
|
||||||
|
#### NOT FOR CHANGELOG / INSIGNIFICANT
|
||||||
|
|
||||||
|
* Update version after release. [#67862](https://github.com/ClickHouse/ClickHouse/pull/67862) ([robot-clickhouse](https://github.com/robot-clickhouse)).
|
||||||
|
* Backported in [#68077](https://github.com/ClickHouse/ClickHouse/issues/68077): Add an explicit error for `ALTER MODIFY SQL SECURITY` on non-view tables. [#67953](https://github.com/ClickHouse/ClickHouse/pull/67953) ([pufit](https://github.com/pufit)).
|
||||||
|
* Backported in [#68756](https://github.com/ClickHouse/ClickHouse/issues/68756): To make patch release possible from every commit on release branch, package_debug build is required and must not be skipped. [#68750](https://github.com/ClickHouse/ClickHouse/pull/68750) ([Max K.](https://github.com/maxknv)).
|
||||||
|
|
83
docs/changelogs/v24.6.3.38-stable.md
Normal file
83
docs/changelogs/v24.6.3.38-stable.md
Normal file
@ -0,0 +1,83 @@
|
|||||||
|
---
|
||||||
|
sidebar_position: 1
|
||||||
|
sidebar_label: 2024
|
||||||
|
---
|
||||||
|
|
||||||
|
# 2024 Changelog
|
||||||
|
|
||||||
|
### ClickHouse release v24.6.3.38-stable (4e33c831589) FIXME as compared to v24.6.2.17-stable (5710a8b5c0c)
|
||||||
|
|
||||||
|
#### Improvement
|
||||||
|
* Backported in [#66770](https://github.com/ClickHouse/ClickHouse/issues/66770): Make allow_experimental_analyzer be controlled by the initiator for distributed queries. This ensures compatibility and correctness during operations in mixed version clusters. [#65777](https://github.com/ClickHouse/ClickHouse/pull/65777) ([Nikita Mikhaylov](https://github.com/nikitamikhaylov)).
|
||||||
|
|
||||||
|
#### Bug Fix (user-visible misbehavior in an official stable release)
|
||||||
|
* Backported in [#66885](https://github.com/ClickHouse/ClickHouse/issues/66885): Fix unexpeced size of low cardinality column in function calls. [#65298](https://github.com/ClickHouse/ClickHouse/pull/65298) ([Raúl Marín](https://github.com/Algunenano)).
|
||||||
|
* Backported in [#66303](https://github.com/ClickHouse/ClickHouse/issues/66303): Better handling of join conditions involving `IS NULL` checks (for example `ON (a = b AND (a IS NOT NULL) AND (b IS NOT NULL) ) OR ( (a IS NULL) AND (b IS NULL) )` is rewritten to `ON a <=> b`), fix incorrect optimization when condition other then `IS NULL` are present. [#65835](https://github.com/ClickHouse/ClickHouse/pull/65835) ([vdimir](https://github.com/vdimir)).
|
||||||
|
* Backported in [#66330](https://github.com/ClickHouse/ClickHouse/issues/66330): Add missing settings `input_format_csv_skip_first_lines/input_format_tsv_skip_first_lines/input_format_csv_try_infer_numbers_from_strings/input_format_csv_try_infer_strings_from_quoted_tuples` in schema inference cache because they can change the resulting schema. It prevents from incorrect result of schema inference with these settings changed. [#65980](https://github.com/ClickHouse/ClickHouse/pull/65980) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||||
|
* Backported in [#66157](https://github.com/ClickHouse/ClickHouse/issues/66157): Fixed buffer overflow bug in `unbin`/`unhex` implementation. [#66106](https://github.com/ClickHouse/ClickHouse/pull/66106) ([Nikita Taranov](https://github.com/nickitat)).
|
||||||
|
* Backported in [#66210](https://github.com/ClickHouse/ClickHouse/issues/66210): Disable the `merge-filters` optimization introduced in [#64760](https://github.com/ClickHouse/ClickHouse/issues/64760). It may cause an exception if optimization merges two filter expressions and does not apply a short-circuit evaluation. [#66126](https://github.com/ClickHouse/ClickHouse/pull/66126) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
|
||||||
|
* Backported in [#66456](https://github.com/ClickHouse/ClickHouse/issues/66456): Fixed a bug in ZooKeeper client: a session could get stuck in unusable state after receiving a hardware error from ZooKeeper. For example, this might happen due to "soft memory limit" in ClickHouse Keeper. [#66140](https://github.com/ClickHouse/ClickHouse/pull/66140) ([Alexander Tokmakov](https://github.com/tavplubix)).
|
||||||
|
* Backported in [#66228](https://github.com/ClickHouse/ClickHouse/issues/66228): Fix issue in SumIfToCountIfVisitor and signed integers. [#66146](https://github.com/ClickHouse/ClickHouse/pull/66146) ([Raúl Marín](https://github.com/Algunenano)).
|
||||||
|
* Backported in [#66183](https://github.com/ClickHouse/ClickHouse/issues/66183): Fix rare case with missing data in the result of distributed query, close [#61432](https://github.com/ClickHouse/ClickHouse/issues/61432). [#66174](https://github.com/ClickHouse/ClickHouse/pull/66174) ([vdimir](https://github.com/vdimir)).
|
||||||
|
* Backported in [#66271](https://github.com/ClickHouse/ClickHouse/issues/66271): Don't throw `TIMEOUT_EXCEEDED` for `none_only_active` mode of `distributed_ddl_output_mode`. [#66218](https://github.com/ClickHouse/ClickHouse/pull/66218) ([Alexander Tokmakov](https://github.com/tavplubix)).
|
||||||
|
* Backported in [#66682](https://github.com/ClickHouse/ClickHouse/issues/66682): Fix handling limit for `system.numbers_mt` when no index can be used. [#66231](https://github.com/ClickHouse/ClickHouse/pull/66231) ([János Benjamin Antal](https://github.com/antaljanosbenjamin)).
|
||||||
|
* Backported in [#66587](https://github.com/ClickHouse/ClickHouse/issues/66587): Fixed how the ClickHouse server detects the maximum number of usable CPU cores as specified by cgroups v2 if the server runs in a container such as Docker. In more detail, containers often run their process in the root cgroup which has an empty name. In that case, ClickHouse ignored the CPU limits set by cgroups v2. [#66237](https://github.com/ClickHouse/ClickHouse/pull/66237) ([filimonov](https://github.com/filimonov)).
|
||||||
|
* Backported in [#66362](https://github.com/ClickHouse/ClickHouse/issues/66362): Fix the `Not-ready set` error when a subquery with `IN` is used in the constraint. [#66261](https://github.com/ClickHouse/ClickHouse/pull/66261) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
|
||||||
|
* Backported in [#68066](https://github.com/ClickHouse/ClickHouse/issues/68066): Fix boolean literals in query sent to external database (for engines like `PostgreSQL`). [#66282](https://github.com/ClickHouse/ClickHouse/pull/66282) ([vdimir](https://github.com/vdimir)).
|
||||||
|
* Backported in [#68566](https://github.com/ClickHouse/ClickHouse/issues/68566): Fix indexHint function case found by fuzzer. [#66286](https://github.com/ClickHouse/ClickHouse/pull/66286) ([Anton Popov](https://github.com/CurtizJ)).
|
||||||
|
* Backported in [#68159](https://github.com/ClickHouse/ClickHouse/issues/68159): Fix cluster() for inter-server secret (preserve initial user as before). [#66364](https://github.com/ClickHouse/ClickHouse/pull/66364) ([Azat Khuzhin](https://github.com/azat)).
|
||||||
|
* Backported in [#66613](https://github.com/ClickHouse/ClickHouse/issues/66613): Fix `Column identifier is already registered` error with `group_by_use_nulls=true` and new analyzer. [#66400](https://github.com/ClickHouse/ClickHouse/pull/66400) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
|
||||||
|
* Backported in [#66693](https://github.com/ClickHouse/ClickHouse/issues/66693): Fix the VALID UNTIL clause in the user definition resetting after a restart. Closes [#66405](https://github.com/ClickHouse/ClickHouse/issues/66405). [#66409](https://github.com/ClickHouse/ClickHouse/pull/66409) ([Nikolay Degterinsky](https://github.com/evillique)).
|
||||||
|
* Backported in [#66577](https://github.com/ClickHouse/ClickHouse/issues/66577): Fix `Cannot find column` error for queries with constant expression in `GROUP BY` key and new analyzer enabled. [#66433](https://github.com/ClickHouse/ClickHouse/pull/66433) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
|
||||||
|
* Backported in [#66721](https://github.com/ClickHouse/ClickHouse/issues/66721): Correctly track memory for `Allocator::realloc`. [#66548](https://github.com/ClickHouse/ClickHouse/pull/66548) ([Antonio Andelic](https://github.com/antonio2368)).
|
||||||
|
* Backported in [#66670](https://github.com/ClickHouse/ClickHouse/issues/66670): Fix reading of uninitialized memory when hashing empty tuples. This closes [#66559](https://github.com/ClickHouse/ClickHouse/issues/66559). [#66562](https://github.com/ClickHouse/ClickHouse/pull/66562) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||||
|
* Backported in [#66952](https://github.com/ClickHouse/ClickHouse/issues/66952): Fix an invalid result for queries with `WINDOW`. This could happen when `PARTITION` columns have sparse serialization and window functions are executed in parallel. [#66579](https://github.com/ClickHouse/ClickHouse/pull/66579) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
|
||||||
|
* Backported in [#66956](https://github.com/ClickHouse/ClickHouse/issues/66956): Fix removing named collections in local storage. [#66599](https://github.com/ClickHouse/ClickHouse/pull/66599) ([János Benjamin Antal](https://github.com/antaljanosbenjamin)).
|
||||||
|
* Backported in [#66716](https://github.com/ClickHouse/ClickHouse/issues/66716): Fix removing named collections in local storage. [#66599](https://github.com/ClickHouse/ClickHouse/pull/66599) ([János Benjamin Antal](https://github.com/antaljanosbenjamin)).
|
||||||
|
* Backported in [#66759](https://github.com/ClickHouse/ClickHouse/issues/66759): Fix `Unknown identifier` and `Column is not under aggregate function` errors for queries with the expression `(column IS NULL).` The bug was triggered by [#65088](https://github.com/ClickHouse/ClickHouse/issues/65088), with the disabled analyzer only. [#66654](https://github.com/ClickHouse/ClickHouse/pull/66654) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
|
||||||
|
* Backported in [#66751](https://github.com/ClickHouse/ClickHouse/issues/66751): Fix `Method getResultType is not supported for QUERY query node` error when scalar subquery was used as the first argument of IN (with new analyzer). [#66655](https://github.com/ClickHouse/ClickHouse/pull/66655) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
|
||||||
|
* Backported in [#68116](https://github.com/ClickHouse/ClickHouse/issues/68116): Fix possible PARAMETER_OUT_OF_BOUND error during reading variant subcolumn. [#66659](https://github.com/ClickHouse/ClickHouse/pull/66659) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||||
|
* Backported in [#67635](https://github.com/ClickHouse/ClickHouse/issues/67635): Fix for occasional deadlock in Context::getDDLWorker. [#66843](https://github.com/ClickHouse/ClickHouse/pull/66843) ([Alexander Gololobov](https://github.com/davenger)).
|
||||||
|
* Backported in [#67482](https://github.com/ClickHouse/ClickHouse/issues/67482): In rare cases ClickHouse could consider parts as broken because of some unexpected projections on disk. Now it's fixed. [#66898](https://github.com/ClickHouse/ClickHouse/pull/66898) ([alesapin](https://github.com/alesapin)).
|
||||||
|
* Backported in [#67816](https://github.com/ClickHouse/ClickHouse/issues/67816): Only relevant to the experimental Variant data type. Fix crash with Variant + AggregateFunction type. [#67122](https://github.com/ClickHouse/ClickHouse/pull/67122) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||||
|
* Backported in [#67199](https://github.com/ClickHouse/ClickHouse/issues/67199): TRUNCATE DATABASE used to stop replication as if it was a DROP DATABASE query, it's fixed. [#67129](https://github.com/ClickHouse/ClickHouse/pull/67129) ([Alexander Tokmakov](https://github.com/tavplubix)).
|
||||||
|
* Backported in [#67381](https://github.com/ClickHouse/ClickHouse/issues/67381): Fix error `Cannot convert column because it is non constant in source stream but must be constant in result.` for a query that reads from the `Merge` table over the `Distriburted` table with one shard. [#67146](https://github.com/ClickHouse/ClickHouse/pull/67146) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
|
||||||
|
* Backported in [#67244](https://github.com/ClickHouse/ClickHouse/issues/67244): This closes [#67156](https://github.com/ClickHouse/ClickHouse/issues/67156). This closes [#66447](https://github.com/ClickHouse/ClickHouse/issues/66447). The bug was introduced in https://github.com/ClickHouse/ClickHouse/pull/62907. [#67178](https://github.com/ClickHouse/ClickHouse/pull/67178) ([Maksim Kita](https://github.com/kitaisreal)).
|
||||||
|
* Backported in [#67503](https://github.com/ClickHouse/ClickHouse/issues/67503): Fix crash in DistributedAsyncInsert when connection is empty. [#67219](https://github.com/ClickHouse/ClickHouse/pull/67219) ([Pablo Marcos](https://github.com/pamarcos)).
|
||||||
|
* Backported in [#67887](https://github.com/ClickHouse/ClickHouse/issues/67887): Correctly parse file name/URI containing `::` if it's not an archive. [#67433](https://github.com/ClickHouse/ClickHouse/pull/67433) ([Antonio Andelic](https://github.com/antonio2368)).
|
||||||
|
* Backported in [#67578](https://github.com/ClickHouse/ClickHouse/issues/67578): Fix execution of nested short-circuit functions. [#67520](https://github.com/ClickHouse/ClickHouse/pull/67520) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||||
|
* Backported in [#68611](https://github.com/ClickHouse/ClickHouse/issues/68611): Fixes [#66026](https://github.com/ClickHouse/ClickHouse/issues/66026). Avoid unresolved table function arguments traversal in `ReplaceTableNodeToDummyVisitor`. [#67522](https://github.com/ClickHouse/ClickHouse/pull/67522) ([Dmitry Novik](https://github.com/novikd)).
|
||||||
|
* Backported in [#67852](https://github.com/ClickHouse/ClickHouse/issues/67852): Fixes [#66026](https://github.com/ClickHouse/ClickHouse/issues/66026). Avoid unresolved table function arguments traversal in `ReplaceTableNodeToDummyVisitor`. [#67522](https://github.com/ClickHouse/ClickHouse/pull/67522) ([Dmitry Novik](https://github.com/novikd)).
|
||||||
|
* Backported in [#68275](https://github.com/ClickHouse/ClickHouse/issues/68275): Fix inserting into stream like engines (Kafka, RabbitMQ, NATS) through HTTP interface. [#67554](https://github.com/ClickHouse/ClickHouse/pull/67554) ([János Benjamin Antal](https://github.com/antaljanosbenjamin)).
|
||||||
|
* Backported in [#67808](https://github.com/ClickHouse/ClickHouse/issues/67808): Fix reloading SQL UDFs with UNION. Previously, restarting the server could make UDF invalid. [#67665](https://github.com/ClickHouse/ClickHouse/pull/67665) ([Antonio Andelic](https://github.com/antonio2368)).
|
||||||
|
* Backported in [#67838](https://github.com/ClickHouse/ClickHouse/issues/67838): Fix potential stack overflow in `JSONMergePatch` function. Renamed this function from `jsonMergePatch` to `JSONMergePatch` because the previous name was wrong. The previous name is still kept for compatibility. Improved diagnostic of errors in the function. This closes [#67304](https://github.com/ClickHouse/ClickHouse/issues/67304). [#67756](https://github.com/ClickHouse/ClickHouse/pull/67756) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||||
|
* Backported in [#67993](https://github.com/ClickHouse/ClickHouse/issues/67993): Validate experimental/suspicious data types in ALTER ADD/MODIFY COLUMN. [#67911](https://github.com/ClickHouse/ClickHouse/pull/67911) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||||
|
* Backported in [#68208](https://github.com/ClickHouse/ClickHouse/issues/68208): Fix wrong `count()` result when there is non-deterministic function in predicate. [#67922](https://github.com/ClickHouse/ClickHouse/pull/67922) ([János Benjamin Antal](https://github.com/antaljanosbenjamin)).
|
||||||
|
* Backported in [#68093](https://github.com/ClickHouse/ClickHouse/issues/68093): Fixed the calculation of the maximum thread soft limit in containerized environments where the usable CPU count is limited. [#67963](https://github.com/ClickHouse/ClickHouse/pull/67963) ([Robert Schulze](https://github.com/rschu1ze)).
|
||||||
|
* Backported in [#68124](https://github.com/ClickHouse/ClickHouse/issues/68124): Fixed skipping of untouched parts in mutations with new analyzer. Previously with enabled analyzer data in part could be rewritten by mutation even if mutation doesn't affect this part according to predicate. [#68052](https://github.com/ClickHouse/ClickHouse/pull/68052) ([Anton Popov](https://github.com/CurtizJ)).
|
||||||
|
* Backported in [#68221](https://github.com/ClickHouse/ClickHouse/issues/68221): Fixed a NULL pointer dereference, triggered by a specially crafted query, that crashed the server via hopEnd, hopStart, tumbleEnd, and tumbleStart. [#68098](https://github.com/ClickHouse/ClickHouse/pull/68098) ([Salvatore Mesoraca](https://github.com/aiven-sal)).
|
||||||
|
* Backported in [#68173](https://github.com/ClickHouse/ClickHouse/issues/68173): Removes an incorrect optimization to remove sorting in subqueries that use `OFFSET`. Fixes [#67906](https://github.com/ClickHouse/ClickHouse/issues/67906). [#68099](https://github.com/ClickHouse/ClickHouse/pull/68099) ([Graham Campbell](https://github.com/GrahamCampbell)).
|
||||||
|
* Backported in [#68339](https://github.com/ClickHouse/ClickHouse/issues/68339): Try fix postgres crash when query is cancelled. [#68288](https://github.com/ClickHouse/ClickHouse/pull/68288) ([Kseniia Sumarokova](https://github.com/kssenii)).
|
||||||
|
* Backported in [#68396](https://github.com/ClickHouse/ClickHouse/issues/68396): Fix missing sync replica mode in query `SYSTEM SYNC REPLICA`. [#68326](https://github.com/ClickHouse/ClickHouse/pull/68326) ([Duc Canh Le](https://github.com/canhld94)).
|
||||||
|
* Backported in [#68668](https://github.com/ClickHouse/ClickHouse/issues/68668): Fix `LOGICAL_ERROR`s when functions `sipHash64Keyed`, `sipHash128Keyed`, or `sipHash128ReferenceKeyed` are applied to empty arrays or tuples. [#68630](https://github.com/ClickHouse/ClickHouse/pull/68630) ([Robert Schulze](https://github.com/rschu1ze)).
|
||||||
|
|
||||||
|
#### NO CL ENTRY
|
||||||
|
|
||||||
|
* NO CL ENTRY: 'Revert "Backport [#66599](https://github.com/ClickHouse/ClickHouse/issues/66599) to 24.6: Fix dropping named collection in local storage"'. [#66922](https://github.com/ClickHouse/ClickHouse/pull/66922) ([János Benjamin Antal](https://github.com/antaljanosbenjamin)).
|
||||||
|
|
||||||
|
#### NOT FOR CHANGELOG / INSIGNIFICANT
|
||||||
|
|
||||||
|
* Backported in [#66332](https://github.com/ClickHouse/ClickHouse/issues/66332): Do not raise a NOT_IMPLEMENTED error when getting s3 metrics with a multiple disk configuration. [#65403](https://github.com/ClickHouse/ClickHouse/pull/65403) ([Elena Torró](https://github.com/elenatorro)).
|
||||||
|
* Backported in [#66142](https://github.com/ClickHouse/ClickHouse/issues/66142): Fix flaky test_storage_s3_queue tests. [#66009](https://github.com/ClickHouse/ClickHouse/pull/66009) ([Kseniia Sumarokova](https://github.com/kssenii)).
|
||||||
|
* Backported in [#66389](https://github.com/ClickHouse/ClickHouse/issues/66389): Disable broken cases from 02911_join_on_nullsafe_optimization. [#66310](https://github.com/ClickHouse/ClickHouse/pull/66310) ([vdimir](https://github.com/vdimir)).
|
||||||
|
* Backported in [#66428](https://github.com/ClickHouse/ClickHouse/issues/66428): Ignore subquery for IN in DDLLoadingDependencyVisitor. [#66395](https://github.com/ClickHouse/ClickHouse/pull/66395) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
|
||||||
|
* Backported in [#66546](https://github.com/ClickHouse/ClickHouse/issues/66546): Add additional log masking in CI. [#66523](https://github.com/ClickHouse/ClickHouse/pull/66523) ([Raúl Marín](https://github.com/Algunenano)).
|
||||||
|
* Backported in [#66861](https://github.com/ClickHouse/ClickHouse/issues/66861): Fix data race in S3::ClientCache. [#66644](https://github.com/ClickHouse/ClickHouse/pull/66644) ([Konstantin Morozov](https://github.com/k-morozov)).
|
||||||
|
* Backported in [#66877](https://github.com/ClickHouse/ClickHouse/issues/66877): Support one more case in JOIN ON ... IS NULL. [#66725](https://github.com/ClickHouse/ClickHouse/pull/66725) ([vdimir](https://github.com/vdimir)).
|
||||||
|
* Backported in [#67061](https://github.com/ClickHouse/ClickHouse/issues/67061): Increase asio pool size in case the server is tiny. [#66761](https://github.com/ClickHouse/ClickHouse/pull/66761) ([alesapin](https://github.com/alesapin)).
|
||||||
|
* Backported in [#66940](https://github.com/ClickHouse/ClickHouse/issues/66940): Small fix in realloc memory tracking. [#66820](https://github.com/ClickHouse/ClickHouse/pull/66820) ([Antonio Andelic](https://github.com/antonio2368)).
|
||||||
|
* Backported in [#67254](https://github.com/ClickHouse/ClickHouse/issues/67254): Followup [#66725](https://github.com/ClickHouse/ClickHouse/issues/66725). [#66869](https://github.com/ClickHouse/ClickHouse/pull/66869) ([vdimir](https://github.com/vdimir)).
|
||||||
|
* Backported in [#67414](https://github.com/ClickHouse/ClickHouse/issues/67414): CI: Fix build results for release branches. [#67402](https://github.com/ClickHouse/ClickHouse/pull/67402) ([Max K.](https://github.com/maxknv)).
|
||||||
|
* Update version after release. [#67909](https://github.com/ClickHouse/ClickHouse/pull/67909) ([robot-clickhouse](https://github.com/robot-clickhouse)).
|
||||||
|
* Backported in [#68079](https://github.com/ClickHouse/ClickHouse/issues/68079): Add an explicit error for `ALTER MODIFY SQL SECURITY` on non-view tables. [#67953](https://github.com/ClickHouse/ClickHouse/pull/67953) ([pufit](https://github.com/pufit)).
|
||||||
|
|
33
docs/changelogs/v24.6.4.42-stable.md
Normal file
33
docs/changelogs/v24.6.4.42-stable.md
Normal file
@ -0,0 +1,33 @@
|
|||||||
|
---
|
||||||
|
sidebar_position: 1
|
||||||
|
sidebar_label: 2024
|
||||||
|
---
|
||||||
|
|
||||||
|
# 2024 Changelog
|
||||||
|
|
||||||
|
### ClickHouse release v24.6.4.42-stable (c534bb4b4dd) FIXME as compared to v24.6.3.95-stable (8325c920d11)
|
||||||
|
|
||||||
|
#### Bug Fix (user-visible misbehavior in an official stable release)
|
||||||
|
* Backported in [#68066](https://github.com/ClickHouse/ClickHouse/issues/68066): Fix boolean literals in query sent to external database (for engines like `PostgreSQL`). [#66282](https://github.com/ClickHouse/ClickHouse/pull/66282) ([vdimir](https://github.com/vdimir)).
|
||||||
|
* Backported in [#68566](https://github.com/ClickHouse/ClickHouse/issues/68566): Fix indexHint function case found by fuzzer. [#66286](https://github.com/ClickHouse/ClickHouse/pull/66286) ([Anton Popov](https://github.com/CurtizJ)).
|
||||||
|
* Backported in [#68159](https://github.com/ClickHouse/ClickHouse/issues/68159): Fix cluster() for inter-server secret (preserve initial user as before). [#66364](https://github.com/ClickHouse/ClickHouse/pull/66364) ([Azat Khuzhin](https://github.com/azat)).
|
||||||
|
* Backported in [#68116](https://github.com/ClickHouse/ClickHouse/issues/68116): Fix possible PARAMETER_OUT_OF_BOUND error during reading variant subcolumn. [#66659](https://github.com/ClickHouse/ClickHouse/pull/66659) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||||
|
* Backported in [#67887](https://github.com/ClickHouse/ClickHouse/issues/67887): Correctly parse file name/URI containing `::` if it's not an archive. [#67433](https://github.com/ClickHouse/ClickHouse/pull/67433) ([Antonio Andelic](https://github.com/antonio2368)).
|
||||||
|
* Backported in [#68611](https://github.com/ClickHouse/ClickHouse/issues/68611): Fixes [#66026](https://github.com/ClickHouse/ClickHouse/issues/66026). Avoid unresolved table function arguments traversal in `ReplaceTableNodeToDummyVisitor`. [#67522](https://github.com/ClickHouse/ClickHouse/pull/67522) ([Dmitry Novik](https://github.com/novikd)).
|
||||||
|
* Backported in [#68275](https://github.com/ClickHouse/ClickHouse/issues/68275): Fix inserting into stream like engines (Kafka, RabbitMQ, NATS) through HTTP interface. [#67554](https://github.com/ClickHouse/ClickHouse/pull/67554) ([János Benjamin Antal](https://github.com/antaljanosbenjamin)).
|
||||||
|
* Backported in [#67993](https://github.com/ClickHouse/ClickHouse/issues/67993): Validate experimental/suspicious data types in ALTER ADD/MODIFY COLUMN. [#67911](https://github.com/ClickHouse/ClickHouse/pull/67911) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||||
|
* Backported in [#68208](https://github.com/ClickHouse/ClickHouse/issues/68208): Fix wrong `count()` result when there is non-deterministic function in predicate. [#67922](https://github.com/ClickHouse/ClickHouse/pull/67922) ([János Benjamin Antal](https://github.com/antaljanosbenjamin)).
|
||||||
|
* Backported in [#68093](https://github.com/ClickHouse/ClickHouse/issues/68093): Fixed the calculation of the maximum thread soft limit in containerized environments where the usable CPU count is limited. [#67963](https://github.com/ClickHouse/ClickHouse/pull/67963) ([Robert Schulze](https://github.com/rschu1ze)).
|
||||||
|
* Backported in [#68124](https://github.com/ClickHouse/ClickHouse/issues/68124): Fixed skipping of untouched parts in mutations with new analyzer. Previously with enabled analyzer data in part could be rewritten by mutation even if mutation doesn't affect this part according to predicate. [#68052](https://github.com/ClickHouse/ClickHouse/pull/68052) ([Anton Popov](https://github.com/CurtizJ)).
|
||||||
|
* Backported in [#68221](https://github.com/ClickHouse/ClickHouse/issues/68221): Fixed a NULL pointer dereference, triggered by a specially crafted query, that crashed the server via hopEnd, hopStart, tumbleEnd, and tumbleStart. [#68098](https://github.com/ClickHouse/ClickHouse/pull/68098) ([Salvatore Mesoraca](https://github.com/aiven-sal)).
|
||||||
|
* Backported in [#68173](https://github.com/ClickHouse/ClickHouse/issues/68173): Removes an incorrect optimization to remove sorting in subqueries that use `OFFSET`. Fixes [#67906](https://github.com/ClickHouse/ClickHouse/issues/67906). [#68099](https://github.com/ClickHouse/ClickHouse/pull/68099) ([Graham Campbell](https://github.com/GrahamCampbell)).
|
||||||
|
* Backported in [#68339](https://github.com/ClickHouse/ClickHouse/issues/68339): Try fix postgres crash when query is cancelled. [#68288](https://github.com/ClickHouse/ClickHouse/pull/68288) ([Kseniia Sumarokova](https://github.com/kssenii)).
|
||||||
|
* Backported in [#68396](https://github.com/ClickHouse/ClickHouse/issues/68396): Fix missing sync replica mode in query `SYSTEM SYNC REPLICA`. [#68326](https://github.com/ClickHouse/ClickHouse/pull/68326) ([Duc Canh Le](https://github.com/canhld94)).
|
||||||
|
* Backported in [#68668](https://github.com/ClickHouse/ClickHouse/issues/68668): Fix `LOGICAL_ERROR`s when functions `sipHash64Keyed`, `sipHash128Keyed`, or `sipHash128ReferenceKeyed` are applied to empty arrays or tuples. [#68630](https://github.com/ClickHouse/ClickHouse/pull/68630) ([Robert Schulze](https://github.com/rschu1ze)).
|
||||||
|
|
||||||
|
#### NOT FOR CHANGELOG / INSIGNIFICANT
|
||||||
|
|
||||||
|
* Update version after release. [#67909](https://github.com/ClickHouse/ClickHouse/pull/67909) ([robot-clickhouse](https://github.com/robot-clickhouse)).
|
||||||
|
* Backported in [#68079](https://github.com/ClickHouse/ClickHouse/issues/68079): Add an explicit error for `ALTER MODIFY SQL SECURITY` on non-view tables. [#67953](https://github.com/ClickHouse/ClickHouse/pull/67953) ([pufit](https://github.com/pufit)).
|
||||||
|
* Backported in [#68758](https://github.com/ClickHouse/ClickHouse/issues/68758): To make patch release possible from every commit on release branch, package_debug build is required and must not be skipped. [#68750](https://github.com/ClickHouse/ClickHouse/pull/68750) ([Max K.](https://github.com/maxknv)).
|
||||||
|
|
55
docs/changelogs/v24.7.3.47-stable.md
Normal file
55
docs/changelogs/v24.7.3.47-stable.md
Normal file
@ -0,0 +1,55 @@
|
|||||||
|
---
|
||||||
|
sidebar_position: 1
|
||||||
|
sidebar_label: 2024
|
||||||
|
---
|
||||||
|
|
||||||
|
# 2024 Changelog
|
||||||
|
|
||||||
|
### ClickHouse release v24.7.3.47-stable (2e50fe27a14) FIXME as compared to v24.7.2.13-stable (6e41f601b2f)
|
||||||
|
|
||||||
|
#### Bug Fix (user-visible misbehavior in an official stable release)
|
||||||
|
* Backported in [#68232](https://github.com/ClickHouse/ClickHouse/issues/68232): Fixed `Not-ready Set` in some system tables when filtering using subqueries. [#66018](https://github.com/ClickHouse/ClickHouse/pull/66018) ([Michael Kolupaev](https://github.com/al13n321)).
|
||||||
|
* Backported in [#67969](https://github.com/ClickHouse/ClickHouse/issues/67969): Fixed reading of subcolumns after `ALTER ADD COLUMN` query. [#66243](https://github.com/ClickHouse/ClickHouse/pull/66243) ([Anton Popov](https://github.com/CurtizJ)).
|
||||||
|
* Backported in [#68068](https://github.com/ClickHouse/ClickHouse/issues/68068): Fix boolean literals in query sent to external database (for engines like `PostgreSQL`). [#66282](https://github.com/ClickHouse/ClickHouse/pull/66282) ([vdimir](https://github.com/vdimir)).
|
||||||
|
* Backported in [#67637](https://github.com/ClickHouse/ClickHouse/issues/67637): Fix for occasional deadlock in Context::getDDLWorker. [#66843](https://github.com/ClickHouse/ClickHouse/pull/66843) ([Alexander Gololobov](https://github.com/davenger)).
|
||||||
|
* Backported in [#67820](https://github.com/ClickHouse/ClickHouse/issues/67820): Fix possible deadlock on query cancel with parallel replicas. [#66905](https://github.com/ClickHouse/ClickHouse/pull/66905) ([Nikita Taranov](https://github.com/nickitat)).
|
||||||
|
* Backported in [#67818](https://github.com/ClickHouse/ClickHouse/issues/67818): Only relevant to the experimental Variant data type. Fix crash with Variant + AggregateFunction type. [#67122](https://github.com/ClickHouse/ClickHouse/pull/67122) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||||
|
* Backported in [#67766](https://github.com/ClickHouse/ClickHouse/issues/67766): Fix crash of `uniq` and `uniqTheta ` with `tuple()` argument. Closes [#67303](https://github.com/ClickHouse/ClickHouse/issues/67303). [#67306](https://github.com/ClickHouse/ClickHouse/pull/67306) ([flynn](https://github.com/ucasfl)).
|
||||||
|
* Backported in [#67881](https://github.com/ClickHouse/ClickHouse/issues/67881): Correctly parse file name/URI containing `::` if it's not an archive. [#67433](https://github.com/ClickHouse/ClickHouse/pull/67433) ([Antonio Andelic](https://github.com/antonio2368)).
|
||||||
|
* Backported in [#68613](https://github.com/ClickHouse/ClickHouse/issues/68613): Fixes [#66026](https://github.com/ClickHouse/ClickHouse/issues/66026). Avoid unresolved table function arguments traversal in `ReplaceTableNodeToDummyVisitor`. [#67522](https://github.com/ClickHouse/ClickHouse/pull/67522) ([Dmitry Novik](https://github.com/novikd)).
|
||||||
|
* Backported in [#67854](https://github.com/ClickHouse/ClickHouse/issues/67854): Fixes [#66026](https://github.com/ClickHouse/ClickHouse/issues/66026). Avoid unresolved table function arguments traversal in `ReplaceTableNodeToDummyVisitor`. [#67522](https://github.com/ClickHouse/ClickHouse/pull/67522) ([Dmitry Novik](https://github.com/novikd)).
|
||||||
|
* Backported in [#68278](https://github.com/ClickHouse/ClickHouse/issues/68278): Fix inserting into stream like engines (Kafka, RabbitMQ, NATS) through HTTP interface. [#67554](https://github.com/ClickHouse/ClickHouse/pull/67554) ([János Benjamin Antal](https://github.com/antaljanosbenjamin)).
|
||||||
|
* Backported in [#68040](https://github.com/ClickHouse/ClickHouse/issues/68040): Fix creation of view with recursive CTE. [#67587](https://github.com/ClickHouse/ClickHouse/pull/67587) ([Yakov Olkhovskiy](https://github.com/yakov-olkhovskiy)).
|
||||||
|
* Backported in [#68038](https://github.com/ClickHouse/ClickHouse/issues/68038): Fix crash on `percent_rank`. `percent_rank`'s default frame type is changed to `range unbounded preceding and unbounded following`. `IWindowFunction`'s default window frame is considered and now window functions without window frame definition in sql can be put into different `WindowTransfomer`s properly. [#67661](https://github.com/ClickHouse/ClickHouse/pull/67661) ([lgbo](https://github.com/lgbo-ustc)).
|
||||||
|
* Backported in [#67713](https://github.com/ClickHouse/ClickHouse/issues/67713): Fix reloading SQL UDFs with UNION. Previously, restarting the server could make UDF invalid. [#67665](https://github.com/ClickHouse/ClickHouse/pull/67665) ([Antonio Andelic](https://github.com/antonio2368)).
|
||||||
|
* Backported in [#67840](https://github.com/ClickHouse/ClickHouse/issues/67840): Fix potential stack overflow in `JSONMergePatch` function. Renamed this function from `jsonMergePatch` to `JSONMergePatch` because the previous name was wrong. The previous name is still kept for compatibility. Improved diagnostic of errors in the function. This closes [#67304](https://github.com/ClickHouse/ClickHouse/issues/67304). [#67756](https://github.com/ClickHouse/ClickHouse/pull/67756) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||||
|
* Backported in [#67995](https://github.com/ClickHouse/ClickHouse/issues/67995): Validate experimental/suspicious data types in ALTER ADD/MODIFY COLUMN. [#67911](https://github.com/ClickHouse/ClickHouse/pull/67911) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||||
|
* Backported in [#68224](https://github.com/ClickHouse/ClickHouse/issues/68224): Fix wrong `count()` result when there is non-deterministic function in predicate. [#67922](https://github.com/ClickHouse/ClickHouse/pull/67922) ([János Benjamin Antal](https://github.com/antaljanosbenjamin)).
|
||||||
|
* Backported in [#68095](https://github.com/ClickHouse/ClickHouse/issues/68095): Fixed the calculation of the maximum thread soft limit in containerized environments where the usable CPU count is limited. [#67963](https://github.com/ClickHouse/ClickHouse/pull/67963) ([Robert Schulze](https://github.com/rschu1ze)).
|
||||||
|
* Backported in [#68126](https://github.com/ClickHouse/ClickHouse/issues/68126): Fixed skipping of untouched parts in mutations with new analyzer. Previously with enabled analyzer data in part could be rewritten by mutation even if mutation doesn't affect this part according to predicate. [#68052](https://github.com/ClickHouse/ClickHouse/pull/68052) ([Anton Popov](https://github.com/CurtizJ)).
|
||||||
|
* Backported in [#68223](https://github.com/ClickHouse/ClickHouse/issues/68223): Fixed a NULL pointer dereference, triggered by a specially crafted query, that crashed the server via hopEnd, hopStart, tumbleEnd, and tumbleStart. [#68098](https://github.com/ClickHouse/ClickHouse/pull/68098) ([Salvatore Mesoraca](https://github.com/aiven-sal)).
|
||||||
|
* Backported in [#68175](https://github.com/ClickHouse/ClickHouse/issues/68175): Removes an incorrect optimization to remove sorting in subqueries that use `OFFSET`. Fixes [#67906](https://github.com/ClickHouse/ClickHouse/issues/67906). [#68099](https://github.com/ClickHouse/ClickHouse/pull/68099) ([Graham Campbell](https://github.com/GrahamCampbell)).
|
||||||
|
* Backported in [#68341](https://github.com/ClickHouse/ClickHouse/issues/68341): Try fix postgres crash when query is cancelled. [#68288](https://github.com/ClickHouse/ClickHouse/pull/68288) ([Kseniia Sumarokova](https://github.com/kssenii)).
|
||||||
|
* Backported in [#68398](https://github.com/ClickHouse/ClickHouse/issues/68398): Fix missing sync replica mode in query `SYSTEM SYNC REPLICA`. [#68326](https://github.com/ClickHouse/ClickHouse/pull/68326) ([Duc Canh Le](https://github.com/canhld94)).
|
||||||
|
* Backported in [#68669](https://github.com/ClickHouse/ClickHouse/issues/68669): Fix `LOGICAL_ERROR`s when functions `sipHash64Keyed`, `sipHash128Keyed`, or `sipHash128ReferenceKeyed` are applied to empty arrays or tuples. [#68630](https://github.com/ClickHouse/ClickHouse/pull/68630) ([Robert Schulze](https://github.com/rschu1ze)).
|
||||||
|
|
||||||
|
#### NOT FOR CHANGELOG / INSIGNIFICANT
|
||||||
|
|
||||||
|
* Backported in [#67518](https://github.com/ClickHouse/ClickHouse/issues/67518): Split slow test 03036_dynamic_read_subcolumns. [#66954](https://github.com/ClickHouse/ClickHouse/pull/66954) ([Nikita Taranov](https://github.com/nickitat)).
|
||||||
|
* Backported in [#67516](https://github.com/ClickHouse/ClickHouse/issues/67516): Split 01508_partition_pruning_long. [#66983](https://github.com/ClickHouse/ClickHouse/pull/66983) ([Nikita Taranov](https://github.com/nickitat)).
|
||||||
|
* Backported in [#67529](https://github.com/ClickHouse/ClickHouse/issues/67529): Reduce max time of 00763_long_lock_buffer_alter_destination_table. [#67185](https://github.com/ClickHouse/ClickHouse/pull/67185) ([Raúl Marín](https://github.com/Algunenano)).
|
||||||
|
* Backported in [#67803](https://github.com/ClickHouse/ClickHouse/issues/67803): Disable some Dynamic tests under sanitizers, rewrite 03202_dynamic_null_map_subcolumn to sql. [#67359](https://github.com/ClickHouse/ClickHouse/pull/67359) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||||
|
* Backported in [#67643](https://github.com/ClickHouse/ClickHouse/issues/67643): [Green CI] Fix potentially flaky test_mask_sensitive_info integration test. [#67506](https://github.com/ClickHouse/ClickHouse/pull/67506) ([Alexey Katsman](https://github.com/alexkats)).
|
||||||
|
* Backported in [#67609](https://github.com/ClickHouse/ClickHouse/issues/67609): Fix test_zookeeper_config_load_balancing after adding the xdist worker name to the instance. [#67590](https://github.com/ClickHouse/ClickHouse/pull/67590) ([Pablo Marcos](https://github.com/pamarcos)).
|
||||||
|
* Backported in [#67871](https://github.com/ClickHouse/ClickHouse/issues/67871): Fix 02434_cancel_insert_when_client_dies. [#67600](https://github.com/ClickHouse/ClickHouse/pull/67600) ([vdimir](https://github.com/vdimir)).
|
||||||
|
* Backported in [#67704](https://github.com/ClickHouse/ClickHouse/issues/67704): Fix 02910_bad_logs_level_in_local in fast tests. [#67603](https://github.com/ClickHouse/ClickHouse/pull/67603) ([Raúl Marín](https://github.com/Algunenano)).
|
||||||
|
* Backported in [#67689](https://github.com/ClickHouse/ClickHouse/issues/67689): Fix 01605_adaptive_granularity_block_borders. [#67605](https://github.com/ClickHouse/ClickHouse/pull/67605) ([Nikita Taranov](https://github.com/nickitat)).
|
||||||
|
* Backported in [#67827](https://github.com/ClickHouse/ClickHouse/issues/67827): Try fix 03143_asof_join_ddb_long. [#67620](https://github.com/ClickHouse/ClickHouse/pull/67620) ([Nikita Taranov](https://github.com/nickitat)).
|
||||||
|
* Backported in [#67892](https://github.com/ClickHouse/ClickHouse/issues/67892): Revert "Merge pull request [#66510](https://github.com/ClickHouse/ClickHouse/issues/66510) from canhld94/fix_trivial_count_non_deterministic_func". [#67800](https://github.com/ClickHouse/ClickHouse/pull/67800) ([János Benjamin Antal](https://github.com/antaljanosbenjamin)).
|
||||||
|
* Backported in [#68081](https://github.com/ClickHouse/ClickHouse/issues/68081): Add an explicit error for `ALTER MODIFY SQL SECURITY` on non-view tables. [#67953](https://github.com/ClickHouse/ClickHouse/pull/67953) ([pufit](https://github.com/pufit)).
|
||||||
|
* Update version after release. [#68044](https://github.com/ClickHouse/ClickHouse/pull/68044) ([robot-clickhouse](https://github.com/robot-clickhouse)).
|
||||||
|
* Backported in [#68269](https://github.com/ClickHouse/ClickHouse/issues/68269): [Green CI] Fix test 01903_correct_block_size_prediction_with_default. [#68203](https://github.com/ClickHouse/ClickHouse/pull/68203) ([Pablo Marcos](https://github.com/pamarcos)).
|
||||||
|
* Backported in [#68432](https://github.com/ClickHouse/ClickHouse/issues/68432): tests: make 01600_parts_states_metrics_long better. [#68265](https://github.com/ClickHouse/ClickHouse/pull/68265) ([Azat Khuzhin](https://github.com/azat)).
|
||||||
|
* Backported in [#68538](https://github.com/ClickHouse/ClickHouse/issues/68538): CI: Native build for package_aarch64. [#68457](https://github.com/ClickHouse/ClickHouse/pull/68457) ([Max K.](https://github.com/maxknv)).
|
||||||
|
* Backported in [#68555](https://github.com/ClickHouse/ClickHouse/issues/68555): CI: Minor release workflow fix. [#68536](https://github.com/ClickHouse/ClickHouse/pull/68536) ([Max K.](https://github.com/maxknv)).
|
||||||
|
|
36
docs/changelogs/v24.7.4.51-stable.md
Normal file
36
docs/changelogs/v24.7.4.51-stable.md
Normal file
@ -0,0 +1,36 @@
|
|||||||
|
---
|
||||||
|
sidebar_position: 1
|
||||||
|
sidebar_label: 2024
|
||||||
|
---
|
||||||
|
|
||||||
|
# 2024 Changelog
|
||||||
|
|
||||||
|
### ClickHouse release v24.7.4.51-stable (70fe2f6fa52) FIXME as compared to v24.7.3.42-stable (63730bc4293)
|
||||||
|
|
||||||
|
#### Bug Fix (user-visible misbehavior in an official stable release)
|
||||||
|
* Backported in [#68232](https://github.com/ClickHouse/ClickHouse/issues/68232): Fixed `Not-ready Set` in some system tables when filtering using subqueries. [#66018](https://github.com/ClickHouse/ClickHouse/pull/66018) ([Michael Kolupaev](https://github.com/al13n321)).
|
||||||
|
* Backported in [#68068](https://github.com/ClickHouse/ClickHouse/issues/68068): Fix boolean literals in query sent to external database (for engines like `PostgreSQL`). [#66282](https://github.com/ClickHouse/ClickHouse/pull/66282) ([vdimir](https://github.com/vdimir)).
|
||||||
|
* Backported in [#68613](https://github.com/ClickHouse/ClickHouse/issues/68613): Fixes [#66026](https://github.com/ClickHouse/ClickHouse/issues/66026). Avoid unresolved table function arguments traversal in `ReplaceTableNodeToDummyVisitor`. [#67522](https://github.com/ClickHouse/ClickHouse/pull/67522) ([Dmitry Novik](https://github.com/novikd)).
|
||||||
|
* Backported in [#68278](https://github.com/ClickHouse/ClickHouse/issues/68278): Fix inserting into stream like engines (Kafka, RabbitMQ, NATS) through HTTP interface. [#67554](https://github.com/ClickHouse/ClickHouse/pull/67554) ([János Benjamin Antal](https://github.com/antaljanosbenjamin)).
|
||||||
|
* Backported in [#68040](https://github.com/ClickHouse/ClickHouse/issues/68040): Fix creation of view with recursive CTE. [#67587](https://github.com/ClickHouse/ClickHouse/pull/67587) ([Yakov Olkhovskiy](https://github.com/yakov-olkhovskiy)).
|
||||||
|
* Backported in [#68038](https://github.com/ClickHouse/ClickHouse/issues/68038): Fix crash on `percent_rank`. `percent_rank`'s default frame type is changed to `range unbounded preceding and unbounded following`. `IWindowFunction`'s default window frame is considered and now window functions without window frame definition in sql can be put into different `WindowTransfomer`s properly. [#67661](https://github.com/ClickHouse/ClickHouse/pull/67661) ([lgbo](https://github.com/lgbo-ustc)).
|
||||||
|
* Backported in [#68224](https://github.com/ClickHouse/ClickHouse/issues/68224): Fix wrong `count()` result when there is non-deterministic function in predicate. [#67922](https://github.com/ClickHouse/ClickHouse/pull/67922) ([János Benjamin Antal](https://github.com/antaljanosbenjamin)).
|
||||||
|
* Backported in [#68095](https://github.com/ClickHouse/ClickHouse/issues/68095): Fixed the calculation of the maximum thread soft limit in containerized environments where the usable CPU count is limited. [#67963](https://github.com/ClickHouse/ClickHouse/pull/67963) ([Robert Schulze](https://github.com/rschu1ze)).
|
||||||
|
* Backported in [#68126](https://github.com/ClickHouse/ClickHouse/issues/68126): Fixed skipping of untouched parts in mutations with new analyzer. Previously with enabled analyzer data in part could be rewritten by mutation even if mutation doesn't affect this part according to predicate. [#68052](https://github.com/ClickHouse/ClickHouse/pull/68052) ([Anton Popov](https://github.com/CurtizJ)).
|
||||||
|
* Backported in [#68223](https://github.com/ClickHouse/ClickHouse/issues/68223): Fixed a NULL pointer dereference, triggered by a specially crafted query, that crashed the server via hopEnd, hopStart, tumbleEnd, and tumbleStart. [#68098](https://github.com/ClickHouse/ClickHouse/pull/68098) ([Salvatore Mesoraca](https://github.com/aiven-sal)).
|
||||||
|
* Backported in [#68175](https://github.com/ClickHouse/ClickHouse/issues/68175): Removes an incorrect optimization to remove sorting in subqueries that use `OFFSET`. Fixes [#67906](https://github.com/ClickHouse/ClickHouse/issues/67906). [#68099](https://github.com/ClickHouse/ClickHouse/pull/68099) ([Graham Campbell](https://github.com/GrahamCampbell)).
|
||||||
|
* Backported in [#68341](https://github.com/ClickHouse/ClickHouse/issues/68341): Try fix postgres crash when query is cancelled. [#68288](https://github.com/ClickHouse/ClickHouse/pull/68288) ([Kseniia Sumarokova](https://github.com/kssenii)).
|
||||||
|
* Backported in [#68398](https://github.com/ClickHouse/ClickHouse/issues/68398): Fix missing sync replica mode in query `SYSTEM SYNC REPLICA`. [#68326](https://github.com/ClickHouse/ClickHouse/pull/68326) ([Duc Canh Le](https://github.com/canhld94)).
|
||||||
|
* Backported in [#68669](https://github.com/ClickHouse/ClickHouse/issues/68669): Fix `LOGICAL_ERROR`s when functions `sipHash64Keyed`, `sipHash128Keyed`, or `sipHash128ReferenceKeyed` are applied to empty arrays or tuples. [#68630](https://github.com/ClickHouse/ClickHouse/pull/68630) ([Robert Schulze](https://github.com/rschu1ze)).
|
||||||
|
|
||||||
|
#### NOT FOR CHANGELOG / INSIGNIFICANT
|
||||||
|
|
||||||
|
* Backported in [#67803](https://github.com/ClickHouse/ClickHouse/issues/67803): Disable some Dynamic tests under sanitizers, rewrite 03202_dynamic_null_map_subcolumn to sql. [#67359](https://github.com/ClickHouse/ClickHouse/pull/67359) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||||
|
* Backported in [#68081](https://github.com/ClickHouse/ClickHouse/issues/68081): Add an explicit error for `ALTER MODIFY SQL SECURITY` on non-view tables. [#67953](https://github.com/ClickHouse/ClickHouse/pull/67953) ([pufit](https://github.com/pufit)).
|
||||||
|
* Update version after release. [#68044](https://github.com/ClickHouse/ClickHouse/pull/68044) ([robot-clickhouse](https://github.com/robot-clickhouse)).
|
||||||
|
* Backported in [#68269](https://github.com/ClickHouse/ClickHouse/issues/68269): [Green CI] Fix test 01903_correct_block_size_prediction_with_default. [#68203](https://github.com/ClickHouse/ClickHouse/pull/68203) ([Pablo Marcos](https://github.com/pamarcos)).
|
||||||
|
* Backported in [#68432](https://github.com/ClickHouse/ClickHouse/issues/68432): tests: make 01600_parts_states_metrics_long better. [#68265](https://github.com/ClickHouse/ClickHouse/pull/68265) ([Azat Khuzhin](https://github.com/azat)).
|
||||||
|
* Backported in [#68538](https://github.com/ClickHouse/ClickHouse/issues/68538): CI: Native build for package_aarch64. [#68457](https://github.com/ClickHouse/ClickHouse/pull/68457) ([Max K.](https://github.com/maxknv)).
|
||||||
|
* Backported in [#68555](https://github.com/ClickHouse/ClickHouse/issues/68555): CI: Minor release workflow fix. [#68536](https://github.com/ClickHouse/ClickHouse/pull/68536) ([Max K.](https://github.com/maxknv)).
|
||||||
|
* Backported in [#68760](https://github.com/ClickHouse/ClickHouse/issues/68760): To make patch release possible from every commit on release branch, package_debug build is required and must not be skipped. [#68750](https://github.com/ClickHouse/ClickHouse/pull/68750) ([Max K.](https://github.com/maxknv)).
|
||||||
|
|
525
docs/changelogs/v24.8.1.2684-lts.md
Normal file
525
docs/changelogs/v24.8.1.2684-lts.md
Normal file
@ -0,0 +1,525 @@
|
|||||||
|
---
|
||||||
|
sidebar_position: 1
|
||||||
|
sidebar_label: 2024
|
||||||
|
---
|
||||||
|
|
||||||
|
# 2024 Changelog
|
||||||
|
|
||||||
|
### ClickHouse release v24.8.1.2684-lts (161c62fd295) FIXME as compared to v24.8.1.1-new (3f8b27d7acc)
|
||||||
|
|
||||||
|
#### Backward Incompatible Change
|
||||||
|
* `clickhouse-client` and `clickhouse-local` now default to multi-query mode (instead single-query mode). As an example, `clickhouse-client -q "SELECT 1; SELECT 2"` now works, whereas users previously had to add `--multiquery` (or `-n`). The `--multiquery/-n` switch became obsolete. INSERT queries in multi-query statements are treated specially based on their FORMAT clause: If the FORMAT is `VALUES` (the most common case), the end of the INSERT statement is represented by a trailing semicolon `;` at the end of the query. For all other FORMATs (e.g. `CSV` or `JSONEachRow`), the end of the INSERT statement is represented by two newlines `\n\n` at the end of the query. [#63898](https://github.com/ClickHouse/ClickHouse/pull/63898) ([FFish](https://github.com/wxybear)).
|
||||||
|
* In previous versions, it was possible to use an alternative syntax for `LowCardinality` data types by appending `WithDictionary` to the name of the data type. It was an initial working implementation, and it was never documented or exposed to the public. Now, it is deprecated. If you have used this syntax, you have to ALTER your tables and rename the data types to `LowCardinality`. [#66842](https://github.com/ClickHouse/ClickHouse/pull/66842) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||||
|
* Fix logical errors with storage `Buffer` used with distributed destination table. It's a backward incompatible change: queries using `Buffer` with a distributed destination table may stop working if the table appears more than once in the query (e.g., in a self-join). [#67015](https://github.com/ClickHouse/ClickHouse/pull/67015) ([vdimir](https://github.com/vdimir)).
|
||||||
|
* In previous versions, calling functions for random distributions based on the Gamma function (such as Chi-Squared, Student, Fisher) with negative arguments close to zero led to a long computation or an infinite loop. In the new version, calling these functions with zero or negative arguments will produce an exception. This closes [#67297](https://github.com/ClickHouse/ClickHouse/issues/67297). [#67326](https://github.com/ClickHouse/ClickHouse/pull/67326) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||||
|
* The system table `text_log` is enabled by default. This is fully compatible with previous versions, but you may notice subtly increased disk usage on the local disk (this system table takes a tiny amount of disk space). [#67428](https://github.com/ClickHouse/ClickHouse/pull/67428) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||||
|
* In previous versions, `arrayWithConstant` can be slow if asked to generate very large arrays. In the new version, it is limited to 1 GB per array. This closes [#32754](https://github.com/ClickHouse/ClickHouse/issues/32754). [#67741](https://github.com/ClickHouse/ClickHouse/pull/67741) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||||
|
* Fix REPLACE modifier formatting (forbid omitting brackets). [#67774](https://github.com/ClickHouse/ClickHouse/pull/67774) ([Azat Khuzhin](https://github.com/azat)).
|
||||||
|
* Backported in [#68349](https://github.com/ClickHouse/ClickHouse/issues/68349): Reimplement Dynamic type. Now when the limit of dynamic data types is reached new types are not casted to String but stored in a special data structure in binary format with binary encoded data type. Now any type ever inserted into Dynamic column can be read from it as subcolumn. [#68132](https://github.com/ClickHouse/ClickHouse/pull/68132) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||||
|
|
||||||
|
#### New Feature
|
||||||
|
* Add new experimental Kafka storage engine to store offsets in Keeper instead of relying on committing them to Kafka. [#57625](https://github.com/ClickHouse/ClickHouse/pull/57625) ([János Benjamin Antal](https://github.com/antaljanosbenjamin)).
|
||||||
|
* Add new TimeSeries table engine: - by default: ``` CREATE TABLE tbl ENGINE=TimeSeries ``` - or with specifying engines of its internal tables:. [#64183](https://github.com/ClickHouse/ClickHouse/pull/64183) ([Vitaly Baranov](https://github.com/vitlibar)).
|
||||||
|
* Support more join strictnesses (`LEFT/RIGHT SEMI/ANTI/ANY JOIN`) with inequal conditions which involve columns from both left and right table. e.g. t1.y < t2.y (see setting `allow_experimental_join_condition`). [#64281](https://github.com/ClickHouse/ClickHouse/pull/64281) ([lgbo](https://github.com/lgbo-ustc)).
|
||||||
|
* Add `_etag` virtual column for S3 table engine. Fixes [#65312](https://github.com/ClickHouse/ClickHouse/issues/65312). [#65386](https://github.com/ClickHouse/ClickHouse/pull/65386) ([skyoct](https://github.com/skyoct)).
|
||||||
|
* This pull request introduces Hive-style partitioning for different engines (`File`, `URL`, `S3`, `AzureBlobStorage`, `HDFS`). Hive-style partitioning organizes data into partitioned sub-directories, making it efficient to query and manage large datasets. Currently, it only creates virtual columns with the appropriate name and data. The follow-up PR will introduce the appropriate data filtering (performance speedup). [#65997](https://github.com/ClickHouse/ClickHouse/pull/65997) ([Yarik Briukhovetskyi](https://github.com/yariks5s)).
|
||||||
|
* Add function printf for spark compatiability. [#66257](https://github.com/ClickHouse/ClickHouse/pull/66257) ([李扬](https://github.com/taiyang-li)).
|
||||||
|
* Backported in [#68450](https://github.com/ClickHouse/ClickHouse/issues/68450): Implement new JSON data type. [#66444](https://github.com/ClickHouse/ClickHouse/pull/66444) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||||
|
* Add a new server setting: disable_insertion_and_mutation Set it to true. This node will deny all insertions and mutations(Alter table delete/update/drop partition). Include async insertion. [#66519](https://github.com/ClickHouse/ClickHouse/pull/66519) ([Xu Jia](https://github.com/XuJia0210)).
|
||||||
|
* Add options `restore_replace_external_engines_to_null` and `restore_replace_external_table_functions_to_null` to replace external engines and table_engines to Null engine that can be useful for testing. It should work for RESTORE and explicit table creation. [#66536](https://github.com/ClickHouse/ClickHouse/pull/66536) ([Ilya Yatsishin](https://github.com/qoega)).
|
||||||
|
* Added support for reading MULTILINESTRING geometry in WKT format using function readWKTLineString. [#67647](https://github.com/ClickHouse/ClickHouse/pull/67647) ([Jacob Reckhard](https://github.com/jacobrec)).
|
||||||
|
* Add a new table function `fuzzQuery`. This function allows the modification of a given query string with random variations. Example: `SELECT query FROM fuzzQuery('SELECT 1') LIMIT 5;`. [#67655](https://github.com/ClickHouse/ClickHouse/pull/67655) ([pufit](https://github.com/pufit)).
|
||||||
|
* Support query `DROP DETACHED PARTITION ALL` to drop all detached partitions. [#67885](https://github.com/ClickHouse/ClickHouse/pull/67885) ([Duc Canh Le](https://github.com/canhld94)).
|
||||||
|
* Added a tagging (namespace) mechanism for the query cache. The same queries with different tags are considered different by the query cache. Example: `SELECT 1 SETTINGS use_query_cache = 1, query_cache_tag = 'abc'` and `SELECT 1 SETTINGS use_query_cache = 1, query_cache_tag = 'def'` now create different query cache entries. [#68235](https://github.com/ClickHouse/ClickHouse/pull/68235) ([sakulali](https://github.com/sakulali)).
|
||||||
|
|
||||||
|
#### Performance Improvement
|
||||||
|
* Use adaptive read task size calculation method (adaptive meaning it depends on read column sizes) for parallel replicas. [#60377](https://github.com/ClickHouse/ClickHouse/pull/60377) ([Nikita Taranov](https://github.com/nickitat)).
|
||||||
|
* Store the `plain_rewritable` disk directory metadata in `__meta` layout, separately from the merge tree data in the object storage. Move the `plain_rewritable` disk to a flat directory structure. [#65751](https://github.com/ClickHouse/ClickHouse/pull/65751) ([Julia Kartseva](https://github.com/jkartseva)).
|
||||||
|
* Enable `compile_expressions` (JIT compiler for fragments of ordinary expressions) by default. This closes [#51264](https://github.com/ClickHouse/ClickHouse/issues/51264) and [#56386](https://github.com/ClickHouse/ClickHouse/issues/56386). [#66486](https://github.com/ClickHouse/ClickHouse/pull/66486) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||||
|
* Improve columns squashing for String/Array/Map/Variant/Dynamic types by reserving required memory in advance for all subcolumns. [#67043](https://github.com/ClickHouse/ClickHouse/pull/67043) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||||
|
* Speed up system flush logs, flush logs on shutdown. [#67472](https://github.com/ClickHouse/ClickHouse/pull/67472) ([Sema Checherinda](https://github.com/CheSema)).
|
||||||
|
* Backported in [#68496](https://github.com/ClickHouse/ClickHouse/issues/68496): Improved overall performance of merges by reducing the overhead of scheduling steps of merges. [#68016](https://github.com/ClickHouse/ClickHouse/pull/68016) ([Anton Popov](https://github.com/CurtizJ)).
|
||||||
|
* Setting `optimize_functions_to_subcolumns` is enabled by default. [#68053](https://github.com/ClickHouse/ClickHouse/pull/68053) ([Anton Popov](https://github.com/CurtizJ)).
|
||||||
|
|
||||||
|
#### Improvement
|
||||||
|
* ClickHouse server now supports new setting `max_keep_alive_requests`. For keep-alive HTTP connections to the server it works in tandem with `keep_alive_timeout` - if idle timeout not expired but there already more than `max_keep_alive_requests` requests done through the given connection - it will be closed by the server. [#61793](https://github.com/ClickHouse/ClickHouse/pull/61793) ([Nikita Taranov](https://github.com/nickitat)).
|
||||||
|
* As in the new version, SOURCES are checked based on Table Engine logic, even grant table engine is disabled by default, if a source is not granted, a prompt of table engine would popup instead, which is misleading. [#65419](https://github.com/ClickHouse/ClickHouse/pull/65419) ([jsc0218](https://github.com/jsc0218)).
|
||||||
|
* Added statistics type `count_min` (count-min sketches) which provide selectivity estimations for equality predicates like `col = 'val'`. Supported data types are string, date, datatime and numeric types. [#65521](https://github.com/ClickHouse/ClickHouse/pull/65521) ([JackyWoo](https://github.com/JackyWoo)).
|
||||||
|
* Do not pass logs for keeper explicitly in the image to allow overriding. [#65564](https://github.com/ClickHouse/ClickHouse/pull/65564) ([Azat Khuzhin](https://github.com/azat)).
|
||||||
|
* Use `Atomic` database by default in `clickhouse-local`. Address items 1 and 5 from [#50647](https://github.com/ClickHouse/ClickHouse/issues/50647). Closes [#44817](https://github.com/ClickHouse/ClickHouse/issues/44817). [#65860](https://github.com/ClickHouse/ClickHouse/pull/65860) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||||
|
* Add the `rows_before_aggregation_at_least` statistic to the query response when `rows_before_aggregation` is enabled. This statistic represents the number of rows read before aggregation. In the context of a distributed query, when using the `group by` or `max` aggregation function without a `limit`, `rows_before_aggregation_at_least` can reflect the number of rows hit by the query. [#66084](https://github.com/ClickHouse/ClickHouse/pull/66084) ([morning-color](https://github.com/morning-color)).
|
||||||
|
* Introduced `use_same_password_for_base_backup` settings for `BACKUP` and `RESTORE` queries, allowing to create and restore incremental backups to/from password protected archives. [#66214](https://github.com/ClickHouse/ClickHouse/pull/66214) ([Samuele](https://github.com/sguerrini97)).
|
||||||
|
* Ignore async_load_databases for ATTACH query (previously it was possible for ATTACH to return before the tables had been attached). [#66240](https://github.com/ClickHouse/ClickHouse/pull/66240) ([Azat Khuzhin](https://github.com/azat)).
|
||||||
|
* [Replicated]MergeTreeSink has to properly cancel its delayed_chunk on `onCancel()` method. [#66279](https://github.com/ClickHouse/ClickHouse/pull/66279) ([Sema Checherinda](https://github.com/CheSema)).
|
||||||
|
* Added logs and metrics for rejected connections (where there are not enough resources). [#66410](https://github.com/ClickHouse/ClickHouse/pull/66410) ([Alexander Tokmakov](https://github.com/tavplubix)).
|
||||||
|
* Setting `allow_experimental_analyzer` is renamed to `enable_analyzer`. The old name is preserved in a form of an alias. [#66438](https://github.com/ClickHouse/ClickHouse/pull/66438) ([Nikita Mikhaylov](https://github.com/nikitamikhaylov)).
|
||||||
|
* Support true UUID type for MongoDB engine. [#66671](https://github.com/ClickHouse/ClickHouse/pull/66671) ([Azat Khuzhin](https://github.com/azat)).
|
||||||
|
* Added a new `MergeTree` setting `deduplicate_merge_projection_mode` to control the projections during merges (for specific engines) and `OPTIMIZE DEDUPLICATE` query. Supported options: `throw` (throw an exception in case the projection is not fully supported for *MergeTree engine), `drop` (remove projection during merge if it can't be merged itself consistently) and `rebuild` (rebuild projection from scratch, which is a heavy operation). [#66672](https://github.com/ClickHouse/ClickHouse/pull/66672) ([jsc0218](https://github.com/jsc0218)).
|
||||||
|
* Add replication lag and recovery time metrics. [#66703](https://github.com/ClickHouse/ClickHouse/pull/66703) ([Miсhael Stetsyuk](https://github.com/mstetsyuk)).
|
||||||
|
* Add S3DiskNoKeyErrors metric. [#66704](https://github.com/ClickHouse/ClickHouse/pull/66704) ([Miсhael Stetsyuk](https://github.com/mstetsyuk)).
|
||||||
|
* Ensure COMMENT clause works for all table engines. [#66832](https://github.com/ClickHouse/ClickHouse/pull/66832) ([Joe Lynch](https://github.com/joelynch)).
|
||||||
|
* Update the usage of error code `BAD_ARGUMENTS` and `ILLEGAL_TYPE_OF_ARGUMENT` by more accurate error codes when appropriate. [#66851](https://github.com/ClickHouse/ClickHouse/pull/66851) ([Yohann Jardin](https://github.com/yohannj)).
|
||||||
|
* Function `mapFromArrays` now accepts `Map(K, V)` as first argument, for example: `SELECT mapFromArrays(map('a', 4, 'b', 4), ['aa', 'bb'])` now works and returns `{('a',4):'aa',('b',4):'bb'}`. Also, if the 1st argument is an Array, it can now also be of type `Array(Nullable(T))` or `Array(LowCardinality(Nullable(T)))` as long as the actual array values are not `NULL`. [#67103](https://github.com/ClickHouse/ClickHouse/pull/67103) ([李扬](https://github.com/taiyang-li)).
|
||||||
|
* Read configuration for clickhouse-local from ~/.clickhouse-local. [#67135](https://github.com/ClickHouse/ClickHouse/pull/67135) ([Azat Khuzhin](https://github.com/azat)).
|
||||||
|
* Rename setting `input_format_orc_read_use_writer_time_zone` to `input_format_orc_reader_timezone` and allow the user to set the reader timezone. [#67175](https://github.com/ClickHouse/ClickHouse/pull/67175) ([kevinyhzou](https://github.com/KevinyhZou)).
|
||||||
|
* Decrease level of 'Socket is not connected' error when HTTP connection immediately reset by peer after connecting, close [#34218](https://github.com/ClickHouse/ClickHouse/issues/34218). [#67177](https://github.com/ClickHouse/ClickHouse/pull/67177) ([vdimir](https://github.com/vdimir)).
|
||||||
|
* Speed up tables removal for `DROP DATABASE` query, increased the default value for `database_catalog_drop_table_concurrency` to 16. [#67228](https://github.com/ClickHouse/ClickHouse/pull/67228) ([Nikita Mikhaylov](https://github.com/nikitamikhaylov)).
|
||||||
|
* Add ability to load dashboards for system.dashboards from config (once set, they overrides the default dashboards preset). [#67232](https://github.com/ClickHouse/ClickHouse/pull/67232) ([Azat Khuzhin](https://github.com/azat)).
|
||||||
|
* The window functions in SQL are traditionally in snake case. ClickHouse uses camelCase, so new aliases `denseRank()` and `percentRank()` have been created. These new functions can be called the exact same as the original `dense_rank()` and `percent_rank()` functions. Both snake case and camelCase syntaxes remain usable. A new test for each of the functions has been added as well. This closes [#67042](https://github.com/ClickHouse/ClickHouse/issues/67042) . [#67334](https://github.com/ClickHouse/ClickHouse/pull/67334) ([Peter Nguyen](https://github.com/petern48)).
|
||||||
|
* Autodetect configuration file format if is not .xml, .yml or .yaml. If the file begins with < it might be XML, otherwise it might be YAML. Non regular file just parse as XML such as PIPE: /dev/fd/X. [#67391](https://github.com/ClickHouse/ClickHouse/pull/67391) ([sakulali](https://github.com/sakulali)).
|
||||||
|
* Functions `formatDateTime` and `formatDateTimeInJodaSyntax` now treat their format parameter as optional. If it is not specified, format strings `%Y-%m-%d %H:%i:%s` and `yyyy-MM-dd HH:mm:ss` are assumed. Example: `SELECT parseDateTime('2021-01-04 23:12:34')` now returns DateTime value `2021-01-04 23:12:34` (previously, this threw an exception). [#67399](https://github.com/ClickHouse/ClickHouse/pull/67399) ([Robert Schulze](https://github.com/rschu1ze)).
|
||||||
|
* Automatically retry Keeper requests in KeeperMap if they happen because of timeout or connection loss. [#67448](https://github.com/ClickHouse/ClickHouse/pull/67448) ([Antonio Andelic](https://github.com/antonio2368)).
|
||||||
|
* Rework usage of custom table's disks. [#67684](https://github.com/ClickHouse/ClickHouse/pull/67684) ([Sema Checherinda](https://github.com/CheSema)).
|
||||||
|
* Various improvements in the advanced dashboard. This closes [#67697](https://github.com/ClickHouse/ClickHouse/issues/67697). This closes [#63407](https://github.com/ClickHouse/ClickHouse/issues/63407). This closes [#51129](https://github.com/ClickHouse/ClickHouse/issues/51129). This closes [#61204](https://github.com/ClickHouse/ClickHouse/issues/61204). [#67701](https://github.com/ClickHouse/ClickHouse/pull/67701) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||||
|
* Avoid allocate too much capacity for array column while writing orc & some minor refactors to make code cleaner. Performance speeds up 15% for array column. [#67879](https://github.com/ClickHouse/ClickHouse/pull/67879) ([李扬](https://github.com/taiyang-li)).
|
||||||
|
* Support OPTIMIZE query on Join table engine to reduce Join tables memory footprint. [#67883](https://github.com/ClickHouse/ClickHouse/pull/67883) ([Duc Canh Le](https://github.com/canhld94)).
|
||||||
|
* Add replication lag and recovery time metrics. [#67913](https://github.com/ClickHouse/ClickHouse/pull/67913) ([Miсhael Stetsyuk](https://github.com/mstetsyuk)).
|
||||||
|
* Add '-no-pie' to aarch64 Linux builds to allow proper introspection and symbolizing of stacktraces after a ClickHouse restart. [#67916](https://github.com/ClickHouse/ClickHouse/pull/67916) ([filimonov](https://github.com/filimonov)).
|
||||||
|
* Backported in [#68481](https://github.com/ClickHouse/ClickHouse/issues/68481): Added profile events for merges and mutations for better introspection. [#68015](https://github.com/ClickHouse/ClickHouse/pull/68015) ([Anton Popov](https://github.com/CurtizJ)).
|
||||||
|
* Fix settings/current_database in system.processes for async BACKUP/RESTORE. [#68163](https://github.com/ClickHouse/ClickHouse/pull/68163) ([Azat Khuzhin](https://github.com/azat)).
|
||||||
|
* Remove unnecessary logs for MergeTree that doesn't support replication. [#68238](https://github.com/ClickHouse/ClickHouse/pull/68238) ([Daniil Ivanik](https://github.com/divanik)).
|
||||||
|
* Backported in [#68430](https://github.com/ClickHouse/ClickHouse/issues/68430): Improve schema inference of date times. Now DateTime64 used only when date time has fractional part, otherwise regular DateTime is used. Inference of Date/DateTime is more strict now, especially when `date_time_input_format='best_effort'` to avoid inferring date times from strings in corner cases. [#68382](https://github.com/ClickHouse/ClickHouse/pull/68382) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||||
|
|
||||||
|
#### Bug Fix (user-visible misbehavior in an official stable release)
|
||||||
|
* Fixed `Not-ready Set` in some system tables when filtering using subqueries. [#66018](https://github.com/ClickHouse/ClickHouse/pull/66018) ([Michael Kolupaev](https://github.com/al13n321)).
|
||||||
|
* Fixed reading of subcolumns after `ALTER ADD COLUMN` query. [#66243](https://github.com/ClickHouse/ClickHouse/pull/66243) ([Anton Popov](https://github.com/CurtizJ)).
|
||||||
|
* Fix boolean literals in query sent to external database (for engines like `PostgreSQL`). [#66282](https://github.com/ClickHouse/ClickHouse/pull/66282) ([vdimir](https://github.com/vdimir)).
|
||||||
|
* Fix formatting of query with aliased JOIN ON expression, e.g. `... JOIN t2 ON (x = y) AS e ORDER BY x` should be formatted as `... JOIN t2 ON ((x = y) AS e) ORDER BY x`. [#66312](https://github.com/ClickHouse/ClickHouse/pull/66312) ([vdimir](https://github.com/vdimir)).
|
||||||
|
* Fix cluster() for inter-server secret (preserve initial user as before). [#66364](https://github.com/ClickHouse/ClickHouse/pull/66364) ([Azat Khuzhin](https://github.com/azat)).
|
||||||
|
* Fix possible runtime error while converting Array field with nulls to Array(Variant). [#66727](https://github.com/ClickHouse/ClickHouse/pull/66727) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||||
|
* Fix for occasional deadlock in Context::getDDLWorker. [#66843](https://github.com/ClickHouse/ClickHouse/pull/66843) ([Alexander Gololobov](https://github.com/davenger)).
|
||||||
|
* Fix creating KeeperMap table after an incomplete drop. [#66865](https://github.com/ClickHouse/ClickHouse/pull/66865) ([Antonio Andelic](https://github.com/antonio2368)).
|
||||||
|
* Fix broken part error while restoring to a `s3_plain_rewritable` disk. [#66881](https://github.com/ClickHouse/ClickHouse/pull/66881) ([Vitaly Baranov](https://github.com/vitlibar)).
|
||||||
|
* In rare cases ClickHouse could consider parts as broken because of some unexpected projections on disk. Now it's fixed. [#66898](https://github.com/ClickHouse/ClickHouse/pull/66898) ([alesapin](https://github.com/alesapin)).
|
||||||
|
* Fix invalid format detection in schema inference that could lead to logical error Format {} doesn't support schema inference. [#66899](https://github.com/ClickHouse/ClickHouse/pull/66899) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||||
|
* Fix possible deadlock on query cancel with parallel replicas. [#66905](https://github.com/ClickHouse/ClickHouse/pull/66905) ([Nikita Taranov](https://github.com/nickitat)).
|
||||||
|
* Forbid create as select even when database_replicated_allow_heavy_create is set. It was unconditionally forbidden in 23.12 and accidentally allowed under the setting in unreleased 24.7. [#66980](https://github.com/ClickHouse/ClickHouse/pull/66980) ([vdimir](https://github.com/vdimir)).
|
||||||
|
* Reading from the `numbers` could wrongly throw an exception when the `max_rows_to_read` limit was set. This closes [#66992](https://github.com/ClickHouse/ClickHouse/issues/66992). [#66996](https://github.com/ClickHouse/ClickHouse/pull/66996) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||||
|
* Add proper type conversion to lagInFrame and leadInFrame window functions - fixes msan test. [#67091](https://github.com/ClickHouse/ClickHouse/pull/67091) ([Yakov Olkhovskiy](https://github.com/yakov-olkhovskiy)).
|
||||||
|
* Only relevant to the experimental Variant data type. Fix crash with Variant + AggregateFunction type. [#67122](https://github.com/ClickHouse/ClickHouse/pull/67122) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||||
|
* TRUNCATE DATABASE used to stop replication as if it was a DROP DATABASE query, it's fixed. [#67129](https://github.com/ClickHouse/ClickHouse/pull/67129) ([Alexander Tokmakov](https://github.com/tavplubix)).
|
||||||
|
* Use a separate client context in `clickhouse-local`. [#67133](https://github.com/ClickHouse/ClickHouse/pull/67133) ([Vitaly Baranov](https://github.com/vitlibar)).
|
||||||
|
* Fix error `Cannot convert column because it is non constant in source stream but must be constant in result.` for a query that reads from the `Merge` table over the `Distriburted` table with one shard. [#67146](https://github.com/ClickHouse/ClickHouse/pull/67146) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
|
||||||
|
* Correct behavior of `ORDER BY all` with disabled `enable_order_by_all` and parallel replicas (distributed queries as well). [#67153](https://github.com/ClickHouse/ClickHouse/pull/67153) ([Igor Nikonov](https://github.com/devcrafter)).
|
||||||
|
* Fix wrong usage of input_format_max_bytes_to_read_for_schema_inference in schema cache. [#67157](https://github.com/ClickHouse/ClickHouse/pull/67157) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||||
|
* Fix the memory leak for count distinct, when exception issued during group by single nullable key. [#67171](https://github.com/ClickHouse/ClickHouse/pull/67171) ([Jet He](https://github.com/compasses)).
|
||||||
|
* This closes [#67156](https://github.com/ClickHouse/ClickHouse/issues/67156). This closes [#66447](https://github.com/ClickHouse/ClickHouse/issues/66447). The bug was introduced in https://github.com/ClickHouse/ClickHouse/pull/62907. [#67178](https://github.com/ClickHouse/ClickHouse/pull/67178) ([Maksim Kita](https://github.com/kitaisreal)).
|
||||||
|
* Fix crash in DistributedAsyncInsert when connection is empty. [#67219](https://github.com/ClickHouse/ClickHouse/pull/67219) ([Pablo Marcos](https://github.com/pamarcos)).
|
||||||
|
* Fix error `Conversion from AggregateFunction(name, Type) to AggregateFunction(name, Nullable(Type)) is not supported`. The bug was caused by the `optimize_rewrite_aggregate_function_with_if` optimization. Fixes [#67112](https://github.com/ClickHouse/ClickHouse/issues/67112). [#67229](https://github.com/ClickHouse/ClickHouse/pull/67229) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
|
||||||
|
* Fix hung query when using empty tuple as lhs of function IN. [#67295](https://github.com/ClickHouse/ClickHouse/pull/67295) ([Duc Canh Le](https://github.com/canhld94)).
|
||||||
|
* Fix crash of `uniq` and `uniqTheta ` with `tuple()` argument. Closes [#67303](https://github.com/ClickHouse/ClickHouse/issues/67303). [#67306](https://github.com/ClickHouse/ClickHouse/pull/67306) ([flynn](https://github.com/ucasfl)).
|
||||||
|
* It was possible to create a very deep nested JSON data that triggered stack overflow while skipping unknown fields. This closes [#67292](https://github.com/ClickHouse/ClickHouse/issues/67292). [#67324](https://github.com/ClickHouse/ClickHouse/pull/67324) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||||
|
* Fix attaching ReplicatedMergeTree table after exception during startup. [#67360](https://github.com/ClickHouse/ClickHouse/pull/67360) ([Antonio Andelic](https://github.com/antonio2368)).
|
||||||
|
* Fix segfault caused by incorrectly detaching from thread group in `Aggregator`. [#67385](https://github.com/ClickHouse/ClickHouse/pull/67385) ([Antonio Andelic](https://github.com/antonio2368)).
|
||||||
|
* Fix one more case when a non-deterministic function is specified in PK. [#67395](https://github.com/ClickHouse/ClickHouse/pull/67395) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
|
||||||
|
* Fixed `bloom_filter` index breaking queries with mildly weird conditions like `(k=2)=(k=2)` or `has([1,2,3], k)`. [#67423](https://github.com/ClickHouse/ClickHouse/pull/67423) ([Michael Kolupaev](https://github.com/al13n321)).
|
||||||
|
* Correctly parse file name/URI containing `::` if it's not an archive. [#67433](https://github.com/ClickHouse/ClickHouse/pull/67433) ([Antonio Andelic](https://github.com/antonio2368)).
|
||||||
|
* Fix wait for tasks in ~WriteBufferFromS3 in case WriteBuffer was cancelled. [#67459](https://github.com/ClickHouse/ClickHouse/pull/67459) ([Kseniia Sumarokova](https://github.com/kssenii)).
|
||||||
|
* Protect temporary part directories from removing during RESTORE. [#67491](https://github.com/ClickHouse/ClickHouse/pull/67491) ([Vitaly Baranov](https://github.com/vitlibar)).
|
||||||
|
* Fix execution of nested short-circuit functions. [#67520](https://github.com/ClickHouse/ClickHouse/pull/67520) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||||
|
* Fixes [#66026](https://github.com/ClickHouse/ClickHouse/issues/66026). Avoid unresolved table function arguments traversal in `ReplaceTableNodeToDummyVisitor`. [#67522](https://github.com/ClickHouse/ClickHouse/pull/67522) ([Dmitry Novik](https://github.com/novikd)).
|
||||||
|
* Fix `Logical error: Expected the argument №N of type T to have X rows, but it has 0`. The error could happen in a remote query with constant expression in `GROUP BY` (with a new analyzer). [#67536](https://github.com/ClickHouse/ClickHouse/pull/67536) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
|
||||||
|
* Fix join on tuple with NULLs: Some queries with the new analyzer and `NULL` inside the tuple in the `JOIN ON` section returned incorrect results. [#67538](https://github.com/ClickHouse/ClickHouse/pull/67538) ([vdimir](https://github.com/vdimir)).
|
||||||
|
* Fix redundant reschedule of FileCache::freeSpaceRatioKeepingThreadFunc() in case of full non-evictable cache. [#67540](https://github.com/ClickHouse/ClickHouse/pull/67540) ([Kseniia Sumarokova](https://github.com/kssenii)).
|
||||||
|
* Fix inserting into stream like engines (Kafka, RabbitMQ, NATS) through HTTP interface. [#67554](https://github.com/ClickHouse/ClickHouse/pull/67554) ([János Benjamin Antal](https://github.com/antaljanosbenjamin)).
|
||||||
|
* Fix for function `toStartOfWeek` which returned the wrong result with a small `DateTime64` value. [#67558](https://github.com/ClickHouse/ClickHouse/pull/67558) ([Yarik Briukhovetskyi](https://github.com/yariks5s)).
|
||||||
|
* Fix creation of view with recursive CTE. [#67587](https://github.com/ClickHouse/ClickHouse/pull/67587) ([Yakov Olkhovskiy](https://github.com/yakov-olkhovskiy)).
|
||||||
|
* Fix `Logical error: 'file_offset_of_buffer_end <= read_until_position'` in filesystem cache. Closes [#57508](https://github.com/ClickHouse/ClickHouse/issues/57508). [#67623](https://github.com/ClickHouse/ClickHouse/pull/67623) ([Kseniia Sumarokova](https://github.com/kssenii)).
|
||||||
|
* Fixes [#62282](https://github.com/ClickHouse/ClickHouse/issues/62282). Removed the call to `convertFieldToString()` and added datatype specific serialization code. Parameterized view substitution was broken for multiple datatypes when parameter value was a function or expression returning datatype instance. [#67654](https://github.com/ClickHouse/ClickHouse/pull/67654) ([Shankar](https://github.com/shiyer7474)).
|
||||||
|
* Fix crash on `percent_rank`. `percent_rank`'s default frame type is changed to `range unbounded preceding and unbounded following`. `IWindowFunction`'s default window frame is considered and now window functions without window frame definition in sql can be put into different `WindowTransfomer`s properly. [#67661](https://github.com/ClickHouse/ClickHouse/pull/67661) ([lgbo](https://github.com/lgbo-ustc)).
|
||||||
|
* Fix reloading SQL UDFs with UNION. Previously, restarting the server could make UDF invalid. [#67665](https://github.com/ClickHouse/ClickHouse/pull/67665) ([Antonio Andelic](https://github.com/antonio2368)).
|
||||||
|
* Fix possible logical error "Unexpected return type from if" with experimental Variant type and enabled setting `use_variant_as_common_type ` in function if with Tuples and Maps. [#67687](https://github.com/ClickHouse/ClickHouse/pull/67687) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||||
|
* Due to a bug in Linux Kernel, a query can hung in `TimerDescriptor::drain`. This closes [#37686](https://github.com/ClickHouse/ClickHouse/issues/37686). [#67702](https://github.com/ClickHouse/ClickHouse/pull/67702) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||||
|
* Fix completion of `RESTORE ON CLUSTER` command. [#67720](https://github.com/ClickHouse/ClickHouse/pull/67720) ([Vitaly Baranov](https://github.com/vitlibar)).
|
||||||
|
* Fix dictionary hang in case of CANNOT_SCHEDULE_TASK while loading. [#67751](https://github.com/ClickHouse/ClickHouse/pull/67751) ([Azat Khuzhin](https://github.com/azat)).
|
||||||
|
* Fix potential stack overflow in `JSONMergePatch` function. Renamed this function from `jsonMergePatch` to `JSONMergePatch` because the previous name was wrong. The previous name is still kept for compatibility. Improved diagnostic of errors in the function. This closes [#67304](https://github.com/ClickHouse/ClickHouse/issues/67304). [#67756](https://github.com/ClickHouse/ClickHouse/pull/67756) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||||
|
* Queries like `SELECT count() FROM t WHERE cast(c = 1 or c = 9999 AS Bool) SETTINGS use_skip_indexes=1` with bloom filter indexes on `c` now work correctly. [#67781](https://github.com/ClickHouse/ClickHouse/pull/67781) ([jsc0218](https://github.com/jsc0218)).
|
||||||
|
* Fix wrong aggregation result in some queries with aggregation without keys and filter, close [#67419](https://github.com/ClickHouse/ClickHouse/issues/67419). [#67804](https://github.com/ClickHouse/ClickHouse/pull/67804) ([vdimir](https://github.com/vdimir)).
|
||||||
|
* Validate experimental/suspicious data types in ALTER ADD/MODIFY COLUMN. [#67911](https://github.com/ClickHouse/ClickHouse/pull/67911) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||||
|
* Fix DateTime64 parsing after constant folding in distributed queries, close [#66773](https://github.com/ClickHouse/ClickHouse/issues/66773). [#67920](https://github.com/ClickHouse/ClickHouse/pull/67920) ([vdimir](https://github.com/vdimir)).
|
||||||
|
* Fix wrong `count()` result when there is non-deterministic function in predicate. [#67922](https://github.com/ClickHouse/ClickHouse/pull/67922) ([János Benjamin Antal](https://github.com/antaljanosbenjamin)).
|
||||||
|
* Fixed the calculation of the maximum thread soft limit in containerized environments where the usable CPU count is limited. [#67963](https://github.com/ClickHouse/ClickHouse/pull/67963) ([Robert Schulze](https://github.com/rschu1ze)).
|
||||||
|
* Now ClickHouse doesn't consider part as broken if projection doesn't exist on disk but exists in `checksums.txt`. [#68003](https://github.com/ClickHouse/ClickHouse/pull/68003) ([alesapin](https://github.com/alesapin)).
|
||||||
|
* Fixed skipping of untouched parts in mutations with new analyzer. Previously with enabled analyzer data in part could be rewritten by mutation even if mutation doesn't affect this part according to predicate. [#68052](https://github.com/ClickHouse/ClickHouse/pull/68052) ([Anton Popov](https://github.com/CurtizJ)).
|
||||||
|
* Fixed a NULL pointer dereference, triggered by a specially crafted query, that crashed the server via hopEnd, hopStart, tumbleEnd, and tumbleStart. [#68098](https://github.com/ClickHouse/ClickHouse/pull/68098) ([Salvatore Mesoraca](https://github.com/aiven-sal)).
|
||||||
|
* Removes an incorrect optimization to remove sorting in subqueries that use `OFFSET`. Fixes [#67906](https://github.com/ClickHouse/ClickHouse/issues/67906). [#68099](https://github.com/ClickHouse/ClickHouse/pull/68099) ([Graham Campbell](https://github.com/GrahamCampbell)).
|
||||||
|
* Attempt to fix `Block structure mismatch in AggregatingStep stream: different types` for aggregate projection optimization. [#68107](https://github.com/ClickHouse/ClickHouse/pull/68107) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
|
||||||
|
* Backported in [#68343](https://github.com/ClickHouse/ClickHouse/issues/68343): Try fix postgres crash when query is cancelled. [#68288](https://github.com/ClickHouse/ClickHouse/pull/68288) ([Kseniia Sumarokova](https://github.com/kssenii)).
|
||||||
|
* Backported in [#68400](https://github.com/ClickHouse/ClickHouse/issues/68400): Fix missing sync replica mode in query `SYSTEM SYNC REPLICA`. [#68326](https://github.com/ClickHouse/ClickHouse/pull/68326) ([Duc Canh Le](https://github.com/canhld94)).
|
||||||
|
|
||||||
|
#### Build/Testing/Packaging Improvement
|
||||||
|
* Improved `test_storage_s3` tests: increased `s3_max_single_read_retries` for read from "unstable" s3 source and allowed all tests to run multiple times in a row. [#66896](https://github.com/ClickHouse/ClickHouse/pull/66896) ([Ilya Yatsishin](https://github.com/qoega)).
|
||||||
|
* Integration tests flaky check will not run each test case multiple times to find more issues in tests and make them more reliable. It is using `pytest-repeat` library to run test case multiple times for the same environment. It is important to cleanup tables and other entities in the end of a test case to pass. Repeat works much faster than several pytest runs as it starts necessary containers only once. [#66986](https://github.com/ClickHouse/ClickHouse/pull/66986) ([Ilya Yatsishin](https://github.com/qoega)).
|
||||||
|
* Allow to use CLion with ClickHouse. In previous versions, CLion freezed for a minute on every keypress. This closes [#66994](https://github.com/ClickHouse/ClickHouse/issues/66994). [#66995](https://github.com/ClickHouse/ClickHouse/pull/66995) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||||
|
* Getauxval: avoid crash under sanitizer re-exec due to high aslr entropy. [#67081](https://github.com/ClickHouse/ClickHouse/pull/67081) ([Raúl Marín](https://github.com/Algunenano)).
|
||||||
|
* Some parts of client code are extracted to a single file and highest possible level optimization is applied to them even for debug builds. This closes: [#65745](https://github.com/ClickHouse/ClickHouse/issues/65745). [#67215](https://github.com/ClickHouse/ClickHouse/pull/67215) ([Nikita Mikhaylov](https://github.com/nikitamikhaylov)).
|
||||||
|
|
||||||
|
#### NO CL CATEGORY
|
||||||
|
|
||||||
|
* Backported in [#68416](https://github.com/ClickHouse/ClickHouse/issues/68416):. [#68386](https://github.com/ClickHouse/ClickHouse/pull/68386) ([János Benjamin Antal](https://github.com/antaljanosbenjamin)).
|
||||||
|
|
||||||
|
#### NO CL ENTRY
|
||||||
|
|
||||||
|
* NO CL ENTRY: 'Revert "Fix for 992 and friends"'. [#66993](https://github.com/ClickHouse/ClickHouse/pull/66993) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||||
|
* NO CL ENTRY: 'Revert "Revert "Fix for 992 and friends""'. [#67029](https://github.com/ClickHouse/ClickHouse/pull/67029) ([Alexander Tokmakov](https://github.com/tavplubix)).
|
||||||
|
* NO CL ENTRY: 'Revert "FuzzQuery table function"'. [#67040](https://github.com/ClickHouse/ClickHouse/pull/67040) ([Raúl Marín](https://github.com/Algunenano)).
|
||||||
|
* NO CL ENTRY: 'Revert "Enable `compile_expressions` by default."'. [#67299](https://github.com/ClickHouse/ClickHouse/pull/67299) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||||
|
* NO CL ENTRY: 'Revert "Slightly better calculation of primary index"'. [#67392](https://github.com/ClickHouse/ClickHouse/pull/67392) ([alesapin](https://github.com/alesapin)).
|
||||||
|
* NO CL ENTRY: 'Revert "Add settings to replace external engines to Null during create"'. [#67507](https://github.com/ClickHouse/ClickHouse/pull/67507) ([Raúl Marín](https://github.com/Algunenano)).
|
||||||
|
* NO CL ENTRY: 'Revert "Revert "Add settings to replace external engines to Null during create""'. [#67511](https://github.com/ClickHouse/ClickHouse/pull/67511) ([Ilya Yatsishin](https://github.com/qoega)).
|
||||||
|
* NO CL ENTRY: 'Revert "Add replication lag and recovery time metrics"'. [#67731](https://github.com/ClickHouse/ClickHouse/pull/67731) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||||
|
* NO CL ENTRY: 'Revert "Revert "Slightly better calculation of primary index""'. [#67846](https://github.com/ClickHouse/ClickHouse/pull/67846) ([Anton Popov](https://github.com/CurtizJ)).
|
||||||
|
* NO CL ENTRY: 'Revert "CI: Strict job timeout 1.5h for tests, 2h for builds"'. [#67986](https://github.com/ClickHouse/ClickHouse/pull/67986) ([Max K.](https://github.com/maxknv)).
|
||||||
|
* NO CL ENTRY: 'Revert "Bump rocksdb from v8.10 to v9.4 + enable jemalloc and liburing"'. [#68014](https://github.com/ClickHouse/ClickHouse/pull/68014) ([Nikita Mikhaylov](https://github.com/nikitamikhaylov)).
|
||||||
|
* NO CL ENTRY: 'Revert "Use `Atomic` database by default in `clickhouse-local`"'. [#68023](https://github.com/ClickHouse/ClickHouse/pull/68023) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||||
|
* NO CL ENTRY: 'Revert "Refactor tests for (experimental) statistics"'. [#68156](https://github.com/ClickHouse/ClickHouse/pull/68156) ([Alexander Tokmakov](https://github.com/tavplubix)).
|
||||||
|
|
||||||
|
#### NOT FOR CHANGELOG / INSIGNIFICANT
|
||||||
|
|
||||||
|
* CI: enable libfuzzer (fixing build and docker). [#61908](https://github.com/ClickHouse/ClickHouse/pull/61908) ([Yakov Olkhovskiy](https://github.com/yakov-olkhovskiy)).
|
||||||
|
* Initial implementation of vector similarity index. [#63675](https://github.com/ClickHouse/ClickHouse/pull/63675) ([Robert Schulze](https://github.com/rschu1ze)).
|
||||||
|
* Update zlib-ng from 2.0.2 to 2.1.7. [#64489](https://github.com/ClickHouse/ClickHouse/pull/64489) ([Michael Kolupaev](https://github.com/al13n321)).
|
||||||
|
* Fix 02444_async_broken_outdated_part_loading flakiness. [#64956](https://github.com/ClickHouse/ClickHouse/pull/64956) ([Azat Khuzhin](https://github.com/azat)).
|
||||||
|
* attach_gdb.lib: print more information before all stacks. [#65253](https://github.com/ClickHouse/ClickHouse/pull/65253) ([Michael Kolupaev](https://github.com/al13n321)).
|
||||||
|
* Fix some perf tests. [#65320](https://github.com/ClickHouse/ClickHouse/pull/65320) ([Nikita Taranov](https://github.com/nickitat)).
|
||||||
|
* Remove ActionsDAGPtr whenever it is possible. [#65414](https://github.com/ClickHouse/ClickHouse/pull/65414) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
|
||||||
|
* Scheduler queue throughput introspection, Fix CPU indication in client. [#65654](https://github.com/ClickHouse/ClickHouse/pull/65654) ([Sergei Trifonov](https://github.com/serxa)).
|
||||||
|
* Increase timeout in 02122_join_group_by_timeout for tsan build. [#65976](https://github.com/ClickHouse/ClickHouse/pull/65976) ([vdimir](https://github.com/vdimir)).
|
||||||
|
* Remove default values for certificateFile/privateKeyFile/dhParamsFile in keeper config (to avoid annoying errors in logs). [#65978](https://github.com/ClickHouse/ClickHouse/pull/65978) ([Azat Khuzhin](https://github.com/azat)).
|
||||||
|
* Update version_date.tsv and changelogs after v24.3.5.46-lts. [#66054](https://github.com/ClickHouse/ClickHouse/pull/66054) ([robot-clickhouse](https://github.com/robot-clickhouse)).
|
||||||
|
* Fix flaky `02814_currentDatabase_for_table_functions`. [#66111](https://github.com/ClickHouse/ClickHouse/pull/66111) ([Nikita Mikhaylov](https://github.com/nikitamikhaylov)).
|
||||||
|
* Fix possible data-race StorageKafka with statistics_interval_ms>0. [#66311](https://github.com/ClickHouse/ClickHouse/pull/66311) ([Azat Khuzhin](https://github.com/azat)).
|
||||||
|
* Avoid unneeded calculation in SeriesPeriodDetect. [#66320](https://github.com/ClickHouse/ClickHouse/pull/66320) ([Ruihang Xia](https://github.com/waynexia)).
|
||||||
|
* It aims to complete [#58630](https://github.com/ClickHouse/ClickHouse/issues/58630). This is made possible by [#60463](https://github.com/ClickHouse/ClickHouse/issues/60463), [#61459](https://github.com/ClickHouse/ClickHouse/issues/61459) and [#60082](https://github.com/ClickHouse/ClickHouse/issues/60082). [#66443](https://github.com/ClickHouse/ClickHouse/pull/66443) ([Amos Bird](https://github.com/amosbird)).
|
||||||
|
* Allow run query instantly in play. [#66457](https://github.com/ClickHouse/ClickHouse/pull/66457) ([Aleksandr Musorin](https://github.com/AVMusorin)).
|
||||||
|
* Bump ICU from v70 to v75. [#66474](https://github.com/ClickHouse/ClickHouse/pull/66474) ([Robert Schulze](https://github.com/rschu1ze)).
|
||||||
|
* Bump RocksDB from v6.29.5 to v7.10.2. [#66475](https://github.com/ClickHouse/ClickHouse/pull/66475) ([Robert Schulze](https://github.com/rschu1ze)).
|
||||||
|
* Bump RocksDB from v7.10.2 to v8.9.1. [#66479](https://github.com/ClickHouse/ClickHouse/pull/66479) ([Robert Schulze](https://github.com/rschu1ze)).
|
||||||
|
* I believe the error code for this function should not be "NOT_ALLOWED" since it simply is an invalid query and "BAD_QUERY_PARAMETER" is a more reasonable error code for this. [#66491](https://github.com/ClickHouse/ClickHouse/pull/66491) ([Ali](https://github.com/xogoodnow)).
|
||||||
|
* Update gdb to 15.1 (by compiling from sources). [#66494](https://github.com/ClickHouse/ClickHouse/pull/66494) ([Azat Khuzhin](https://github.com/azat)).
|
||||||
|
* Ensure that llvm-symbolizer is used for symbolizing sanitizer reports. [#66495](https://github.com/ClickHouse/ClickHouse/pull/66495) ([Azat Khuzhin](https://github.com/azat)).
|
||||||
|
* Remove unused local variables. [#66503](https://github.com/ClickHouse/ClickHouse/pull/66503) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||||
|
* This will solve a lot of problems with inconsistent formatting. And it opens the path for [#65753](https://github.com/ClickHouse/ClickHouse/issues/65753). This closes [#66807](https://github.com/ClickHouse/ClickHouse/issues/66807). This closes [#61611](https://github.com/ClickHouse/ClickHouse/issues/61611). This closes [#61711](https://github.com/ClickHouse/ClickHouse/issues/61711). This closes [#67445](https://github.com/ClickHouse/ClickHouse/issues/67445). [#66506](https://github.com/ClickHouse/ClickHouse/pull/66506) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||||
|
* Rename Context::getSettings() to Context::getSettingsCopy(). [#66528](https://github.com/ClickHouse/ClickHouse/pull/66528) ([Raúl Marín](https://github.com/Algunenano)).
|
||||||
|
* Uninteresting change: introducing `ClientApplicationBase`. [#66549](https://github.com/ClickHouse/ClickHouse/pull/66549) ([Nikita Mikhaylov](https://github.com/nikitamikhaylov)).
|
||||||
|
* Respond to parallel replicas protocol requests with priority on initiator. [#66618](https://github.com/ClickHouse/ClickHouse/pull/66618) ([Igor Nikonov](https://github.com/devcrafter)).
|
||||||
|
* Fix bad code: it was catching exceptions. [#66628](https://github.com/ClickHouse/ClickHouse/pull/66628) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||||
|
* Dump all memory stats in CgroupsMemoryUsageObserver on hitting the limit. [#66732](https://github.com/ClickHouse/ClickHouse/pull/66732) ([Nikita Taranov](https://github.com/nickitat)).
|
||||||
|
* Save writer thread id in shared mutex for debugging. [#66745](https://github.com/ClickHouse/ClickHouse/pull/66745) ([Alexander Gololobov](https://github.com/davenger)).
|
||||||
|
* Increase asio pool size in case the server is tiny. [#66761](https://github.com/ClickHouse/ClickHouse/pull/66761) ([alesapin](https://github.com/alesapin)).
|
||||||
|
* Looks like it runs too many mutations sometimes and fails to process them within the timeout. So if a query waits for mutations - the test fails. [#66785](https://github.com/ClickHouse/ClickHouse/pull/66785) ([Alexander Tokmakov](https://github.com/tavplubix)).
|
||||||
|
* Better diagnostics in functional tests. [#66790](https://github.com/ClickHouse/ClickHouse/pull/66790) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||||
|
* Fix bad test `01042_system_reload_dictionary_reloads_completely`. [#66811](https://github.com/ClickHouse/ClickHouse/pull/66811) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||||
|
* Something is strange with the test about refreshable materialized views. [#66816](https://github.com/ClickHouse/ClickHouse/pull/66816) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||||
|
* Randomize `trace_profile_events`. [#66821](https://github.com/ClickHouse/ClickHouse/pull/66821) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||||
|
* Stop ignoring SIGSEGV in GDB. [#66847](https://github.com/ClickHouse/ClickHouse/pull/66847) ([Antonio Andelic](https://github.com/antonio2368)).
|
||||||
|
* Followup [#66725](https://github.com/ClickHouse/ClickHouse/issues/66725). [#66869](https://github.com/ClickHouse/ClickHouse/pull/66869) ([vdimir](https://github.com/vdimir)).
|
||||||
|
* When executing queries with parallel replicas that involve only a subset of nodes within a shard, the current behavior is that if all participating replicas are unavailable, the query completes without any errors but returns no results. Referencing issue [#65467](https://github.com/ClickHouse/ClickHouse/issues/65467), this pull request addresses the issue where only a portion of the nodes in a shard are participating in the execution. [#66880](https://github.com/ClickHouse/ClickHouse/pull/66880) ([zoomxi](https://github.com/zoomxi)).
|
||||||
|
* Speed up stateful tests setup. [#66886](https://github.com/ClickHouse/ClickHouse/pull/66886) ([Raúl Marín](https://github.com/Algunenano)).
|
||||||
|
* Functions [h-r]*: Iterate over input_rows_count where appropriate. [#66897](https://github.com/ClickHouse/ClickHouse/pull/66897) ([Robert Schulze](https://github.com/rschu1ze)).
|
||||||
|
* Stateless tests: Change status of failed tests in case of server crash and add no-parallel to high-load tests. [#66901](https://github.com/ClickHouse/ClickHouse/pull/66901) ([Nikita Fomichev](https://github.com/fm4v)).
|
||||||
|
* Fix performance test about the generateRandom table function, supposedly. [#66906](https://github.com/ClickHouse/ClickHouse/pull/66906) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||||
|
* Fix bad tests `share_big_sets`, CC @davenger. [#66908](https://github.com/ClickHouse/ClickHouse/pull/66908) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||||
|
* Speed up mutations for non-replicated MergeTree a bit. [#66909](https://github.com/ClickHouse/ClickHouse/pull/66909) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||||
|
* Speed up mutations for non-replicated MergeTree significantly. [#66911](https://github.com/ClickHouse/ClickHouse/pull/66911) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||||
|
* Fix views over distributed tables with Analyzer. [#66912](https://github.com/ClickHouse/ClickHouse/pull/66912) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||||
|
* [CI fest] Try to fix `test_broken_projections/test.py::test_broken_ignored_replicated`. [#66915](https://github.com/ClickHouse/ClickHouse/pull/66915) ([Andrey Zvonov](https://github.com/zvonand)).
|
||||||
|
* Decrease rate limit in `01923_network_receive_time_metric_insert`. [#66924](https://github.com/ClickHouse/ClickHouse/pull/66924) ([János Benjamin Antal](https://github.com/antaljanosbenjamin)).
|
||||||
|
* Grouparrayintersect: fix serialization bug. [#66928](https://github.com/ClickHouse/ClickHouse/pull/66928) ([Raúl Marín](https://github.com/Algunenano)).
|
||||||
|
* Update version after release branch. [#66929](https://github.com/ClickHouse/ClickHouse/pull/66929) ([Raúl Marín](https://github.com/Algunenano)).
|
||||||
|
* Un-flake test_runtime_configurable_cache_size. [#66934](https://github.com/ClickHouse/ClickHouse/pull/66934) ([Robert Schulze](https://github.com/rschu1ze)).
|
||||||
|
* fix unit tests ResolvePoolTest with timeouts. [#66953](https://github.com/ClickHouse/ClickHouse/pull/66953) ([Sema Checherinda](https://github.com/CheSema)).
|
||||||
|
* Split slow test 03036_dynamic_read_subcolumns. [#66954](https://github.com/ClickHouse/ClickHouse/pull/66954) ([Nikita Taranov](https://github.com/nickitat)).
|
||||||
|
* CI: Fixes docker server build for release branches. [#66955](https://github.com/ClickHouse/ClickHouse/pull/66955) ([Max K.](https://github.com/maxknv)).
|
||||||
|
* Addressing issue [#64936](https://github.com/ClickHouse/ClickHouse/issues/64936). [#66973](https://github.com/ClickHouse/ClickHouse/pull/66973) ([alesapin](https://github.com/alesapin)).
|
||||||
|
* Add initial 24.7 changelog. [#66976](https://github.com/ClickHouse/ClickHouse/pull/66976) ([Raúl Marín](https://github.com/Algunenano)).
|
||||||
|
* Apply libunwind fix. [#66977](https://github.com/ClickHouse/ClickHouse/pull/66977) ([Michael Kolupaev](https://github.com/al13n321)).
|
||||||
|
* CI: Add logs for debugging. [#66979](https://github.com/ClickHouse/ClickHouse/pull/66979) ([Max K.](https://github.com/maxknv)).
|
||||||
|
* [CI Fest] Split dynamic tests and rewrite them from sh to sql to avoid timeouts. [#66981](https://github.com/ClickHouse/ClickHouse/pull/66981) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||||
|
* Split 01508_partition_pruning_long. [#66983](https://github.com/ClickHouse/ClickHouse/pull/66983) ([Nikita Taranov](https://github.com/nickitat)).
|
||||||
|
* [CI Fest] Fix use-of-uninitialized-value in JSONExtract* numeric functions. [#66984](https://github.com/ClickHouse/ClickHouse/pull/66984) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||||
|
* It should fix SQLancer checks, but for some reason we stopped invalidating cache for docker builds and fix was not published to our CI for a while. [#66987](https://github.com/ClickHouse/ClickHouse/pull/66987) ([Ilya Yatsishin](https://github.com/qoega)).
|
||||||
|
* Fixes [#66941](https://github.com/ClickHouse/ClickHouse/issues/66941). [#66991](https://github.com/ClickHouse/ClickHouse/pull/66991) ([Konstantin Bogdanov](https://github.com/thevar1able)).
|
||||||
|
* Remove the support for Kerberized HDFS. [#66998](https://github.com/ClickHouse/ClickHouse/pull/66998) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||||
|
* CI: Fix for workflow results parsing. [#67000](https://github.com/ClickHouse/ClickHouse/pull/67000) ([Max K.](https://github.com/maxknv)).
|
||||||
|
* Fix flaky `01454_storagememory_data_race_challenge`. [#67003](https://github.com/ClickHouse/ClickHouse/pull/67003) ([Antonio Andelic](https://github.com/antonio2368)).
|
||||||
|
* CI: Jepsen Workflow fix for skipped builds and observability. [#67004](https://github.com/ClickHouse/ClickHouse/pull/67004) ([Max K.](https://github.com/maxknv)).
|
||||||
|
* bugfix AttachedTable counting not symmetry, and adding some test logs…. [#67007](https://github.com/ClickHouse/ClickHouse/pull/67007) ([Xu Jia](https://github.com/XuJia0210)).
|
||||||
|
* CI: Automerge when required and non-required checks completed. [#67008](https://github.com/ClickHouse/ClickHouse/pull/67008) ([Max K.](https://github.com/maxknv)).
|
||||||
|
* Fix test `very_long_arrays`. [#67009](https://github.com/ClickHouse/ClickHouse/pull/67009) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||||
|
* Try to fix exception logging in destructors of static objects. [#67016](https://github.com/ClickHouse/ClickHouse/pull/67016) ([Antonio Andelic](https://github.com/antonio2368)).
|
||||||
|
* [Green CI] Fix test test_storage_azure_blob_storage. [#67019](https://github.com/ClickHouse/ClickHouse/pull/67019) ([Daniil Ivanik](https://github.com/divanik)).
|
||||||
|
* Integration tests: fix flaky tests `test_backup_restore_on_cluster/test_concurrency.py` & `test_manipulate_statistics/test.py`. [#67027](https://github.com/ClickHouse/ClickHouse/pull/67027) ([Nikita Fomichev](https://github.com/fm4v)).
|
||||||
|
* [Green CI] Fix test test_storage_s3_queue/test.py::test_max_set_age. [#67035](https://github.com/ClickHouse/ClickHouse/pull/67035) ([Pablo Marcos](https://github.com/pamarcos)).
|
||||||
|
* Test for alter select with parallel replicas. [#67041](https://github.com/ClickHouse/ClickHouse/pull/67041) ([Igor Nikonov](https://github.com/devcrafter)).
|
||||||
|
* Split query into multiple queries to consume less memory at once + use less data. Fixes [#67034](https://github.com/ClickHouse/ClickHouse/issues/67034). [#67044](https://github.com/ClickHouse/ClickHouse/pull/67044) ([alesapin](https://github.com/alesapin)).
|
||||||
|
* Disable setting `optimize_functions_to_subcolumns`. [#67046](https://github.com/ClickHouse/ClickHouse/pull/67046) ([Anton Popov](https://github.com/CurtizJ)).
|
||||||
|
* Increase max allocation size for sanitizers. [#67049](https://github.com/ClickHouse/ClickHouse/pull/67049) ([János Benjamin Antal](https://github.com/antaljanosbenjamin)).
|
||||||
|
* rewrite 01171 test. [#67054](https://github.com/ClickHouse/ClickHouse/pull/67054) ([Sema Checherinda](https://github.com/CheSema)).
|
||||||
|
* Add `**` to `hdfs` docs, add test for `**` in `hdfs`. [#67064](https://github.com/ClickHouse/ClickHouse/pull/67064) ([Andrey Zvonov](https://github.com/zvonand)).
|
||||||
|
* Very sad failure: ``` 2024.07.24 13:28:45.517777 [ 10 ] {08745bf9-4bc1-4946-b9a8-c03d82ec55dc} <Debug> executeQuery: (from 172.16.11.1:55890) OPTIMIZE TABLE replicated_mt FINAL (stage: Complete) 2024.07.24 13:28:45.525945 [ 10 ] {08745bf9-4bc1-4946-b9a8-c03d82ec55dc} <Trace> default.replicated_mt (ReplicatedMergeTreeQueue): Waiting for 4 entries to be processed: queue-0000000004, queue-0000000002, queue-0000000001, queue-0000000000 2024.07.24 13:29:15.528024 [ 10 ] {08745bf9-4bc1-4946-b9a8-c03d82ec55dc} <Debug> default.replicated_mt (6581a6fb-8458-466d-8350-89951eb1ac8e) (MergerMutator): Selected 3 parts from all_0_0_0 to all_2_2_0 2024.07.24 13:29:15.530736 [ 10 ] {08745bf9-4bc1-4946-b9a8-c03d82ec55dc} <Trace> default.replicated_mt (6581a6fb-8458-466d-8350-89951eb1ac8e): Created log entry /clickhouse/tables/replicated_mt/log/log-0000000004 for merge all_0_2_1 2024.07.24 13:29:15.530873 [ 10 ] {08745bf9-4bc1-4946-b9a8-c03d82ec55dc} <Debug> default.replicated_mt (6581a6fb-8458-466d-8350-89951eb1ac8e): Waiting for node1 to process log entry 2024.07.24 13:29:15.530919 [ 10 ] {08745bf9-4bc1-4946-b9a8-c03d82ec55dc} <Debug> default.replicated_mt (6581a6fb-8458-466d-8350-89951eb1ac8e): Waiting for node1 to pull log-0000000004 to queue 2024.07.24 13:29:15.534286 [ 10 ] {08745bf9-4bc1-4946-b9a8-c03d82ec55dc} <Debug> default.replicated_mt (6581a6fb-8458-466d-8350-89951eb1ac8e): Looking for node corresponding to log-0000000004 in node1 queue 2024.07.24 13:29:15.534793 [ 10 ] {08745bf9-4bc1-4946-b9a8-c03d82ec55dc} <Debug> default.replicated_mt (6581a6fb-8458-466d-8350-89951eb1ac8e): Waiting for queue-0000000005 to disappear from node1 queue 2024.07.24 13:29:15.585533 [ 10 ] {08745bf9-4bc1-4946-b9a8-c03d82ec55dc} <Debug> TCPHandler: Processed in 30.067804125 sec. ```. [#67067](https://github.com/ClickHouse/ClickHouse/pull/67067) ([alesapin](https://github.com/alesapin)).
|
||||||
|
* Fix flaky `test_seekable_formats_url` and `test_seekable_formats` S3 storage tests. [#67070](https://github.com/ClickHouse/ClickHouse/pull/67070) ([János Benjamin Antal](https://github.com/antaljanosbenjamin)).
|
||||||
|
* CI: Docker server build fix for new release workflow. [#67075](https://github.com/ClickHouse/ClickHouse/pull/67075) ([Max K.](https://github.com/maxknv)).
|
||||||
|
* Fix 2680 flasky. [#67078](https://github.com/ClickHouse/ClickHouse/pull/67078) ([jsc0218](https://github.com/jsc0218)).
|
||||||
|
* [CI Fest] Fix flaky 02447_drop_replica test. [#67085](https://github.com/ClickHouse/ClickHouse/pull/67085) ([Nikolay Degterinsky](https://github.com/evillique)).
|
||||||
|
* Fixes [#67030](https://github.com/ClickHouse/ClickHouse/issues/67030). [#67086](https://github.com/ClickHouse/ClickHouse/pull/67086) ([Alexander Tokmakov](https://github.com/tavplubix)).
|
||||||
|
* Increase timeout for curl in tests. [#67089](https://github.com/ClickHouse/ClickHouse/pull/67089) ([Anton Popov](https://github.com/CurtizJ)).
|
||||||
|
* Try calculating available memory if ClickHouse is bound to subset of NUMA nodes. [#67098](https://github.com/ClickHouse/ClickHouse/pull/67098) ([Antonio Andelic](https://github.com/antonio2368)).
|
||||||
|
* A more precise way of tracking flushing time in 01246_buffer_flush. [#67099](https://github.com/ClickHouse/ClickHouse/pull/67099) ([Azat Khuzhin](https://github.com/azat)).
|
||||||
|
* Do not fail CheckReadyForMerge on failed Tests_2 (non-required jobs) Do not skip CiBuddy report step on failures. [#67101](https://github.com/ClickHouse/ClickHouse/pull/67101) ([Max K.](https://github.com/maxknv)).
|
||||||
|
* Tststs_1 - for all required checks Tststs_2 - for all non-required checks (normal mode) Tststs_2_ww - for all non-required checks (woolenwolfdog mode). [#67104](https://github.com/ClickHouse/ClickHouse/pull/67104) ([Max K.](https://github.com/maxknv)).
|
||||||
|
* Functions [s-t]*: Iterate over input_rows_count where appropriate. [#67105](https://github.com/ClickHouse/ClickHouse/pull/67105) ([Robert Schulze](https://github.com/rschu1ze)).
|
||||||
|
* Reintroduce 02805_distributed_queries_timeouts. [#67106](https://github.com/ClickHouse/ClickHouse/pull/67106) ([Azat Khuzhin](https://github.com/azat)).
|
||||||
|
* Added some tests in relation with [#54881](https://github.com/ClickHouse/ClickHouse/issues/54881). [#67110](https://github.com/ClickHouse/ClickHouse/pull/67110) ([max-vostrikov](https://github.com/max-vostrikov)).
|
||||||
|
* Reintroduce 03002_part_log_rmt_fetch_*_error tests without flakiness and less time. [#67113](https://github.com/ClickHouse/ClickHouse/pull/67113) ([Azat Khuzhin](https://github.com/azat)).
|
||||||
|
* Improve tag matching in backport scripts. [#67118](https://github.com/ClickHouse/ClickHouse/pull/67118) ([Raúl Marín](https://github.com/Algunenano)).
|
||||||
|
* Fixes [#67111](https://github.com/ClickHouse/ClickHouse/issues/67111). [#67121](https://github.com/ClickHouse/ClickHouse/pull/67121) ([Konstantin Bogdanov](https://github.com/thevar1able)).
|
||||||
|
* Increase lock_acquire_timeout_for_background_operations setting in dynamic merges tests. [#67126](https://github.com/ClickHouse/ClickHouse/pull/67126) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||||
|
* Attempt to fix flakiness of some window view tests. [#67130](https://github.com/ClickHouse/ClickHouse/pull/67130) ([Robert Schulze](https://github.com/rschu1ze)).
|
||||||
|
* Update assert in cache. [#67138](https://github.com/ClickHouse/ClickHouse/pull/67138) ([Kseniia Sumarokova](https://github.com/kssenii)).
|
||||||
|
* Fix test `00673_subquery_prepared_set_performance`. [#67141](https://github.com/ClickHouse/ClickHouse/pull/67141) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||||
|
* Fixes [#67047](https://github.com/ClickHouse/ClickHouse/issues/67047). [#67142](https://github.com/ClickHouse/ClickHouse/pull/67142) ([Alexander Tokmakov](https://github.com/tavplubix)).
|
||||||
|
* Lower max allocation size in query fuzzer. [#67145](https://github.com/ClickHouse/ClickHouse/pull/67145) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||||
|
* Fixes [#66966](https://github.com/ClickHouse/ClickHouse/issues/66966). [#67147](https://github.com/ClickHouse/ClickHouse/pull/67147) ([Alexander Tokmakov](https://github.com/tavplubix)).
|
||||||
|
* Try fix `02481_async_insert_race_long` flakiness. [#67148](https://github.com/ClickHouse/ClickHouse/pull/67148) ([Julia Kartseva](https://github.com/jkartseva)).
|
||||||
|
* Rename (unreleased) bad setting. [#67149](https://github.com/ClickHouse/ClickHouse/pull/67149) ([Raúl Marín](https://github.com/Algunenano)).
|
||||||
|
* Uncomment accidentally commented out code in QueryProfiler. [#67152](https://github.com/ClickHouse/ClickHouse/pull/67152) ([Michael Kolupaev](https://github.com/al13n321)).
|
||||||
|
* Try to fix 2572. [#67158](https://github.com/ClickHouse/ClickHouse/pull/67158) ([jsc0218](https://github.com/jsc0218)).
|
||||||
|
* Fix benign data race in ZooKeeper. [#67164](https://github.com/ClickHouse/ClickHouse/pull/67164) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||||
|
* Remove too long unit test. [#67168](https://github.com/ClickHouse/ClickHouse/pull/67168) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||||
|
* Fix `00705_drop_create_merge_tree`. [#67170](https://github.com/ClickHouse/ClickHouse/pull/67170) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||||
|
* Fix stacktrace cache. [#67173](https://github.com/ClickHouse/ClickHouse/pull/67173) ([Antonio Andelic](https://github.com/antonio2368)).
|
||||||
|
* Fixes [#67151](https://github.com/ClickHouse/ClickHouse/issues/67151). [#67174](https://github.com/ClickHouse/ClickHouse/pull/67174) ([Konstantin Bogdanov](https://github.com/thevar1able)).
|
||||||
|
* Make 02908_many_requests_to_system_replicas less stressful. [#67176](https://github.com/ClickHouse/ClickHouse/pull/67176) ([Alexander Gololobov](https://github.com/davenger)).
|
||||||
|
* Reduce max time of 00763_long_lock_buffer_alter_destination_table. [#67185](https://github.com/ClickHouse/ClickHouse/pull/67185) ([Raúl Marín](https://github.com/Algunenano)).
|
||||||
|
* Do not count AttachedTable for tables in information schema databases. [#67187](https://github.com/ClickHouse/ClickHouse/pull/67187) ([Sergei Trifonov](https://github.com/serxa)).
|
||||||
|
* Verbose output for 03203_client_benchmark_options. [#67188](https://github.com/ClickHouse/ClickHouse/pull/67188) ([vdimir](https://github.com/vdimir)).
|
||||||
|
* Split test 02967_parallel_replicas_join_algo_and_analyzer. [#67211](https://github.com/ClickHouse/ClickHouse/pull/67211) ([Nikita Taranov](https://github.com/nickitat)).
|
||||||
|
* Fix flaky `test_pkill_query_log` (tsan). [#67223](https://github.com/ClickHouse/ClickHouse/pull/67223) ([Sergei Trifonov](https://github.com/serxa)).
|
||||||
|
* Remove integration test `test_broken_projections_in_backups_1`. [#67231](https://github.com/ClickHouse/ClickHouse/pull/67231) ([Vitaly Baranov](https://github.com/vitlibar)).
|
||||||
|
* Debug logging for [#67002](https://github.com/ClickHouse/ClickHouse/issues/67002). [#67233](https://github.com/ClickHouse/ClickHouse/pull/67233) ([Nikita Taranov](https://github.com/nickitat)).
|
||||||
|
* Fix oss-fuzz build. [#67235](https://github.com/ClickHouse/ClickHouse/pull/67235) ([Nikita Taranov](https://github.com/nickitat)).
|
||||||
|
* Fix flaky 00180_no_seek_avoiding_when_reading_from_cache. [#67236](https://github.com/ClickHouse/ClickHouse/pull/67236) ([Yakov Olkhovskiy](https://github.com/yakov-olkhovskiy)).
|
||||||
|
* English. [#67258](https://github.com/ClickHouse/ClickHouse/pull/67258) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||||
|
* Remove strange code. [#67260](https://github.com/ClickHouse/ClickHouse/pull/67260) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||||
|
* Fix MSan report in DatabaseReplicated. [#67262](https://github.com/ClickHouse/ClickHouse/pull/67262) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||||
|
* Fix test `02310_clickhouse_local_INSERT_progress_profile_events`. [#67264](https://github.com/ClickHouse/ClickHouse/pull/67264) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||||
|
* Remove test `02982_aggregation_states_destruction`. [#67266](https://github.com/ClickHouse/ClickHouse/pull/67266) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||||
|
* Fix OOM in test runs. [#67268](https://github.com/ClickHouse/ClickHouse/pull/67268) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||||
|
* Remove harmful stuff from tests. [#67275](https://github.com/ClickHouse/ClickHouse/pull/67275) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||||
|
* Fix test `03201_variant_null_map_subcolumn`. [#67276](https://github.com/ClickHouse/ClickHouse/pull/67276) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||||
|
* Split `01651_lc_insert_tiny_log`. [#67279](https://github.com/ClickHouse/ClickHouse/pull/67279) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||||
|
* Debug test `02490_benchmark_max_consecutive_errors`. [#67281](https://github.com/ClickHouse/ClickHouse/pull/67281) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||||
|
* Fix bad test `02833_concurrrent_sessions`. [#67282](https://github.com/ClickHouse/ClickHouse/pull/67282) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||||
|
* Add a separate test for exception handling. [#67283](https://github.com/ClickHouse/ClickHouse/pull/67283) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||||
|
* Debug test `01600_parts_states_metrics_long`. [#67284](https://github.com/ClickHouse/ClickHouse/pull/67284) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||||
|
* Faster test `02231_buffer_aggregate_states_leak`. [#67285](https://github.com/ClickHouse/ClickHouse/pull/67285) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||||
|
* Fix curiosities in `TimerDescriptor`. [#67287](https://github.com/ClickHouse/ClickHouse/pull/67287) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||||
|
* Add retries to test `02911_backup_restore_keeper_map`. [#67290](https://github.com/ClickHouse/ClickHouse/pull/67290) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||||
|
* Functions: Iterate over input_rows_count where appropriate. [#67294](https://github.com/ClickHouse/ClickHouse/pull/67294) ([Robert Schulze](https://github.com/rschu1ze)).
|
||||||
|
* Add documentation for `compile_expressions`. [#67300](https://github.com/ClickHouse/ClickHouse/pull/67300) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||||
|
* Wrap log lines in the CI report for functional tests. [#67301](https://github.com/ClickHouse/ClickHouse/pull/67301) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||||
|
* Fix bad test `02050_client_profile_events`. [#67309](https://github.com/ClickHouse/ClickHouse/pull/67309) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||||
|
* I do not want to think about this code. [#67312](https://github.com/ClickHouse/ClickHouse/pull/67312) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||||
|
* Fix test `00940_max_parts_in_total`. [#67313](https://github.com/ClickHouse/ClickHouse/pull/67313) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||||
|
* Tests for Kafka cannot run in parallel. [#67315](https://github.com/ClickHouse/ClickHouse/pull/67315) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||||
|
* Add a test for [#66285](https://github.com/ClickHouse/ClickHouse/issues/66285). [#67325](https://github.com/ClickHouse/ClickHouse/pull/67325) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||||
|
* Follow-up for [#67301](https://github.com/ClickHouse/ClickHouse/issues/67301). [#67327](https://github.com/ClickHouse/ClickHouse/pull/67327) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||||
|
* Add a test for [#61659](https://github.com/ClickHouse/ClickHouse/issues/61659). [#67332](https://github.com/ClickHouse/ClickHouse/pull/67332) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||||
|
* Fix integration test `test_backup_restore_on_cluster/test_disallow_concurrency`. [#67336](https://github.com/ClickHouse/ClickHouse/pull/67336) ([Vitaly Baranov](https://github.com/vitlibar)).
|
||||||
|
* Faster and less flaky 01246_buffer_flush (by using HTTP over clickhouse-client). [#67340](https://github.com/ClickHouse/ClickHouse/pull/67340) ([Azat Khuzhin](https://github.com/azat)).
|
||||||
|
* Fix: data race in TCPHandler on socket timeouts settings. [#67341](https://github.com/ClickHouse/ClickHouse/pull/67341) ([Yakov Olkhovskiy](https://github.com/yakov-olkhovskiy)).
|
||||||
|
* CI: Print stdout, stderr for docker pull command. [#67343](https://github.com/ClickHouse/ClickHouse/pull/67343) ([Max K.](https://github.com/maxknv)).
|
||||||
|
* Followup [#67290](https://github.com/ClickHouse/ClickHouse/issues/67290). [#67348](https://github.com/ClickHouse/ClickHouse/pull/67348) ([vdimir](https://github.com/vdimir)).
|
||||||
|
* Skip parallel for `test_storage_kerberized_kafka`. [#67349](https://github.com/ClickHouse/ClickHouse/pull/67349) ([Andrey Zvonov](https://github.com/zvonand)).
|
||||||
|
* Don't use PeekableReadBuffer in JSONAsObject format. [#67354](https://github.com/ClickHouse/ClickHouse/pull/67354) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||||
|
* This closes: [#57316](https://github.com/ClickHouse/ClickHouse/issues/57316). [#67355](https://github.com/ClickHouse/ClickHouse/pull/67355) ([Nikita Mikhaylov](https://github.com/nikitamikhaylov)).
|
||||||
|
* Remove duplicated tests. [#67357](https://github.com/ClickHouse/ClickHouse/pull/67357) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||||
|
* Release branch was not detected properly and job which must run on release branch could be reused from feature branches. PR Fixes detection of release branches. [#67358](https://github.com/ClickHouse/ClickHouse/pull/67358) ([Max K.](https://github.com/maxknv)).
|
||||||
|
* Disable some Dynamic tests under sanitizers, rewrite 03202_dynamic_null_map_subcolumn to sql. [#67359](https://github.com/ClickHouse/ClickHouse/pull/67359) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||||
|
* Add no-distributed-cache tag in tests. [#67361](https://github.com/ClickHouse/ClickHouse/pull/67361) ([Kseniia Sumarokova](https://github.com/kssenii)).
|
||||||
|
* Trying to fix test_cache_evicted_by_temporary_data and print debug info. [#67362](https://github.com/ClickHouse/ClickHouse/pull/67362) ([vdimir](https://github.com/vdimir)).
|
||||||
|
* Try to fix: ALL_CONNECTION_TRIES_FAILED with parallel replicas. [#67389](https://github.com/ClickHouse/ClickHouse/pull/67389) ([Igor Nikonov](https://github.com/devcrafter)).
|
||||||
|
* Fix bad test `01036_no_superfluous_dict_reload_on_create_database`. [#67390](https://github.com/ClickHouse/ClickHouse/pull/67390) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||||
|
* Adding `SelectedPartsTotal` and `SelectedMarksTotal` as new ProfileEvents. [#67393](https://github.com/ClickHouse/ClickHouse/pull/67393) ([Jordi Villar](https://github.com/jrdi)).
|
||||||
|
* Print debug info in `test_storage_s3_queue/test.py::test_shards_distributed`. [#67394](https://github.com/ClickHouse/ClickHouse/pull/67394) ([János Benjamin Antal](https://github.com/antaljanosbenjamin)).
|
||||||
|
* Mute degraded perf test. [#67396](https://github.com/ClickHouse/ClickHouse/pull/67396) ([Nikita Taranov](https://github.com/nickitat)).
|
||||||
|
* Debug TimerDescriptor. [#67397](https://github.com/ClickHouse/ClickHouse/pull/67397) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||||
|
* Remove has_single_port property from plan stream. [#67398](https://github.com/ClickHouse/ClickHouse/pull/67398) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
|
||||||
|
* Fix typo. [#67400](https://github.com/ClickHouse/ClickHouse/pull/67400) ([Halersson Paris](https://github.com/halersson)).
|
||||||
|
* CI: Fix build results for release branches. [#67402](https://github.com/ClickHouse/ClickHouse/pull/67402) ([Max K.](https://github.com/maxknv)).
|
||||||
|
* Disable 02932_refreshable_materialized_views. [#67404](https://github.com/ClickHouse/ClickHouse/pull/67404) ([Michael Kolupaev](https://github.com/al13n321)).
|
||||||
|
* Follow-up to [#67294](https://github.com/ClickHouse/ClickHouse/issues/67294). [#67405](https://github.com/ClickHouse/ClickHouse/pull/67405) ([Robert Schulze](https://github.com/rschu1ze)).
|
||||||
|
* Fix DWARF range list parsing in stack symbolizer. [#67417](https://github.com/ClickHouse/ClickHouse/pull/67417) ([Michael Kolupaev](https://github.com/al13n321)).
|
||||||
|
* Make Dwarf::findAddress() fallback slow path less slow. [#67418](https://github.com/ClickHouse/ClickHouse/pull/67418) ([Michael Kolupaev](https://github.com/al13n321)).
|
||||||
|
* Fix inconsistent formatting of CODEC and STATISTICS. [#67421](https://github.com/ClickHouse/ClickHouse/pull/67421) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||||
|
* Reduced complexity of the test 02832_alter_max_sessions_for_user. [#67425](https://github.com/ClickHouse/ClickHouse/pull/67425) ([Alexey Gerasimchuck](https://github.com/Demilivor)).
|
||||||
|
* Remove obsolete `--multiquery` parameter from tests. [#67435](https://github.com/ClickHouse/ClickHouse/pull/67435) ([Robert Schulze](https://github.com/rschu1ze)).
|
||||||
|
* Fix public backports. [#67439](https://github.com/ClickHouse/ClickHouse/pull/67439) ([Raúl Marín](https://github.com/Algunenano)).
|
||||||
|
* Bump Azure from v1.12 to v1.13. [#67446](https://github.com/ClickHouse/ClickHouse/pull/67446) ([Robert Schulze](https://github.com/rschu1ze)).
|
||||||
|
* 24.7 add missing documentation and testing. [#67454](https://github.com/ClickHouse/ClickHouse/pull/67454) ([Nikita Fomichev](https://github.com/fm4v)).
|
||||||
|
* Use correct order of fields in `StorageURLSource`. [#67455](https://github.com/ClickHouse/ClickHouse/pull/67455) ([Antonio Andelic](https://github.com/antonio2368)).
|
||||||
|
* run 01171 test in parallel. [#67470](https://github.com/ClickHouse/ClickHouse/pull/67470) ([Sema Checherinda](https://github.com/CheSema)).
|
||||||
|
* [Green CI] Fix WriteBuffer destructor when finalize has failed for MergeTreeDeduplicationLog::shutdown. [#67474](https://github.com/ClickHouse/ClickHouse/pull/67474) ([Alexey Katsman](https://github.com/alexkats)).
|
||||||
|
* Reduce 02473_multistep_prewhere run time. [#67475](https://github.com/ClickHouse/ClickHouse/pull/67475) ([Alexander Gololobov](https://github.com/davenger)).
|
||||||
|
* Update version_date.tsv and changelogs after v24.7.1.2915-stable. [#67483](https://github.com/ClickHouse/ClickHouse/pull/67483) ([robot-clickhouse](https://github.com/robot-clickhouse)).
|
||||||
|
* Revert [#61750](https://github.com/ClickHouse/ClickHouse/issues/61750) "Improve JSONEachRow reading by ignoring the keys case". [#67484](https://github.com/ClickHouse/ClickHouse/pull/67484) ([Michael Kolupaev](https://github.com/al13n321)).
|
||||||
|
* Disable parallel run for `01923_network_receive_time_metric_insert.sh`. [#67492](https://github.com/ClickHouse/ClickHouse/pull/67492) ([Julia Kartseva](https://github.com/jkartseva)).
|
||||||
|
* Fix test `test_backup_restore_on_cluster/test.py::test_mutation`. [#67494](https://github.com/ClickHouse/ClickHouse/pull/67494) ([Vitaly Baranov](https://github.com/vitlibar)).
|
||||||
|
* [Green CI] Fix potentially flaky test_mask_sensitive_info integration test. [#67506](https://github.com/ClickHouse/ClickHouse/pull/67506) ([Alexey Katsman](https://github.com/alexkats)).
|
||||||
|
* [Green CI] Test `test_storage_azure_blob_storage/test.py` is flaky. [#67512](https://github.com/ClickHouse/ClickHouse/pull/67512) ([Daniil Ivanik](https://github.com/divanik)).
|
||||||
|
* Prepare Release workflow for production. [#67523](https://github.com/ClickHouse/ClickHouse/pull/67523) ([Max K.](https://github.com/maxknv)).
|
||||||
|
* Fix upgrade check. [#67524](https://github.com/ClickHouse/ClickHouse/pull/67524) ([Raúl Marín](https://github.com/Algunenano)).
|
||||||
|
* [Green CI] test 03164_s3_settings_for_queries_and_merges is flaky. [#67535](https://github.com/ClickHouse/ClickHouse/pull/67535) ([Daniil Ivanik](https://github.com/divanik)).
|
||||||
|
* Log message and increased concurrency for table removal. [#67537](https://github.com/ClickHouse/ClickHouse/pull/67537) ([Nikita Mikhaylov](https://github.com/nikitamikhaylov)).
|
||||||
|
* Fix AsyncLoader destruction race. [#67553](https://github.com/ClickHouse/ClickHouse/pull/67553) ([Sergei Trifonov](https://github.com/serxa)).
|
||||||
|
* Add an assert into TimerDescriptor. [#67555](https://github.com/ClickHouse/ClickHouse/pull/67555) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||||
|
* Re-enable ICU on s390/x. [#67557](https://github.com/ClickHouse/ClickHouse/pull/67557) ([Robert Schulze](https://github.com/rschu1ze)).
|
||||||
|
* Update version_date.tsv and changelogs after v24.4.4.107-stable. [#67559](https://github.com/ClickHouse/ClickHouse/pull/67559) ([robot-clickhouse](https://github.com/robot-clickhouse)).
|
||||||
|
* Sometimes we fail with timeout in stateless tests and the reason for that seems to be in `stop_logs_replication` step. Add a check for timeout here. [#67560](https://github.com/ClickHouse/ClickHouse/pull/67560) ([Nikolay Degterinsky](https://github.com/evillique)).
|
||||||
|
* Miscellaneous. [#67564](https://github.com/ClickHouse/ClickHouse/pull/67564) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||||
|
* enable parallel_view_processing in perf tests. [#67565](https://github.com/ClickHouse/ClickHouse/pull/67565) ([Sema Checherinda](https://github.com/CheSema)).
|
||||||
|
* Fix flaky `test_system_kafka_consumers_rebalance`. [#67566](https://github.com/ClickHouse/ClickHouse/pull/67566) ([János Benjamin Antal](https://github.com/antaljanosbenjamin)).
|
||||||
|
* Update version_date.tsv and changelogs after v24.7.2.13-stable. [#67586](https://github.com/ClickHouse/ClickHouse/pull/67586) ([robot-clickhouse](https://github.com/robot-clickhouse)).
|
||||||
|
* Fix 01811_storage_buffer_flush_parameters flakiness. [#67589](https://github.com/ClickHouse/ClickHouse/pull/67589) ([Azat Khuzhin](https://github.com/azat)).
|
||||||
|
* Fix test_zookeeper_config_load_balancing after adding the xdist worker name to the instance. [#67590](https://github.com/ClickHouse/ClickHouse/pull/67590) ([Pablo Marcos](https://github.com/pamarcos)).
|
||||||
|
* Update minio in integration tests. [#67595](https://github.com/ClickHouse/ClickHouse/pull/67595) ([Antonio Andelic](https://github.com/antonio2368)).
|
||||||
|
* added tests for page index in parquet files. [#67596](https://github.com/ClickHouse/ClickHouse/pull/67596) ([max-vostrikov](https://github.com/max-vostrikov)).
|
||||||
|
* Update check_rabbitmq_is_available. [#67597](https://github.com/ClickHouse/ClickHouse/pull/67597) ([Kseniia Sumarokova](https://github.com/kssenii)).
|
||||||
|
* Fix 02434_cancel_insert_when_client_dies. [#67600](https://github.com/ClickHouse/ClickHouse/pull/67600) ([vdimir](https://github.com/vdimir)).
|
||||||
|
* Fix 02910_bad_logs_level_in_local in fast tests. [#67603](https://github.com/ClickHouse/ClickHouse/pull/67603) ([Raúl Marín](https://github.com/Algunenano)).
|
||||||
|
* Fix 01605_adaptive_granularity_block_borders. [#67605](https://github.com/ClickHouse/ClickHouse/pull/67605) ([Nikita Taranov](https://github.com/nickitat)).
|
||||||
|
* Update CHANGELOG.md. [#67607](https://github.com/ClickHouse/ClickHouse/pull/67607) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||||
|
* Remove some `no-parallel` tags from tests. [#67610](https://github.com/ClickHouse/ClickHouse/pull/67610) ([Raúl Marín](https://github.com/Algunenano)).
|
||||||
|
* Update README.md. [#67613](https://github.com/ClickHouse/ClickHouse/pull/67613) ([Tyler Hannan](https://github.com/tylerhannan)).
|
||||||
|
* Try fix 03143_asof_join_ddb_long. [#67620](https://github.com/ClickHouse/ClickHouse/pull/67620) ([Nikita Taranov](https://github.com/nickitat)).
|
||||||
|
* Don't run ASAN unit tests under gdb. [#67622](https://github.com/ClickHouse/ClickHouse/pull/67622) ([Raúl Marín](https://github.com/Algunenano)).
|
||||||
|
* Fix crash in KeyCondition::cloneASTWithInversionPushDown() caused by type change. [#67641](https://github.com/ClickHouse/ClickHouse/pull/67641) ([Michael Kolupaev](https://github.com/al13n321)).
|
||||||
|
* Fix race condition between ProcessList and Settings. [#67645](https://github.com/ClickHouse/ClickHouse/pull/67645) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||||
|
* `02481_async_insert_race_long.sh` flakiness fixes. [#67650](https://github.com/ClickHouse/ClickHouse/pull/67650) ([Julia Kartseva](https://github.com/jkartseva)).
|
||||||
|
* Fixes [#67651](https://github.com/ClickHouse/ClickHouse/issues/67651). [#67653](https://github.com/ClickHouse/ClickHouse/pull/67653) ([pufit](https://github.com/pufit)).
|
||||||
|
* Fix flaky `test_replicated_table_attach`. [#67658](https://github.com/ClickHouse/ClickHouse/pull/67658) ([Antonio Andelic](https://github.com/antonio2368)).
|
||||||
|
* Update version_date.tsv and changelogs after v24.4.4.113-stable. [#67659](https://github.com/ClickHouse/ClickHouse/pull/67659) ([robot-clickhouse](https://github.com/robot-clickhouse)).
|
||||||
|
* Even better healthcheck for ldap. [#67667](https://github.com/ClickHouse/ClickHouse/pull/67667) ([Andrey Zvonov](https://github.com/zvonand)).
|
||||||
|
* Fix 03203_client_benchmark_options. [#67671](https://github.com/ClickHouse/ClickHouse/pull/67671) ([vdimir](https://github.com/vdimir)).
|
||||||
|
* Integration tests: fix ports clashing problem. [#67672](https://github.com/ClickHouse/ClickHouse/pull/67672) ([Nikita Fomichev](https://github.com/fm4v)).
|
||||||
|
* Remove some `no-parallel` tags from tests (Part 2). [#67673](https://github.com/ClickHouse/ClickHouse/pull/67673) ([Raúl Marín](https://github.com/Algunenano)).
|
||||||
|
* Use FunctionArgumentDescriptors for bitSlice. [#67674](https://github.com/ClickHouse/ClickHouse/pull/67674) ([Lennard Eijsackers](https://github.com/Blokje5)).
|
||||||
|
* Update version_date.tsv and changelog after v24.3.6.48-lts. [#67677](https://github.com/ClickHouse/ClickHouse/pull/67677) ([robot-clickhouse](https://github.com/robot-clickhouse)).
|
||||||
|
* Avoid ddl queue timeout in 02313_filesystem_cache_seeks. [#67680](https://github.com/ClickHouse/ClickHouse/pull/67680) ([Nikita Taranov](https://github.com/nickitat)).
|
||||||
|
* Fix bad log message in sort description. [#67690](https://github.com/ClickHouse/ClickHouse/pull/67690) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||||
|
* Update version_date.tsv and changelog after v23.8.16.40-lts. [#67692](https://github.com/ClickHouse/ClickHouse/pull/67692) ([robot-clickhouse](https://github.com/robot-clickhouse)).
|
||||||
|
* Fix check names in test reports and the CI Logs database. [#67696](https://github.com/ClickHouse/ClickHouse/pull/67696) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||||
|
* CI: Minor refactoring in ci_utils. [#67706](https://github.com/ClickHouse/ClickHouse/pull/67706) ([Max K.](https://github.com/maxknv)).
|
||||||
|
* Fix 01042_system_reload_dictionary_reloads_completely flakiness. [#67719](https://github.com/ClickHouse/ClickHouse/pull/67719) ([Azat Khuzhin](https://github.com/azat)).
|
||||||
|
* Fix test `00002_log_and_exception_messages_formatting`. [#67723](https://github.com/ClickHouse/ClickHouse/pull/67723) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||||
|
* Fix test `02789_reading_from_s3_with_connection_pool`. [#67726](https://github.com/ClickHouse/ClickHouse/pull/67726) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||||
|
* Fix strange code in HostResolvePool. [#67727](https://github.com/ClickHouse/ClickHouse/pull/67727) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||||
|
* Fix a typo. [#67729](https://github.com/ClickHouse/ClickHouse/pull/67729) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||||
|
* Smart handling of processes leftovers in tests. [#67737](https://github.com/ClickHouse/ClickHouse/pull/67737) ([Azat Khuzhin](https://github.com/azat)).
|
||||||
|
* Fix test retries. [#67738](https://github.com/ClickHouse/ClickHouse/pull/67738) ([Azat Khuzhin](https://github.com/azat)).
|
||||||
|
* Fill only selected columns from system.clusters. [#67739](https://github.com/ClickHouse/ClickHouse/pull/67739) ([Azat Khuzhin](https://github.com/azat)).
|
||||||
|
* Bump NuRaft (to properly catch thread exceptions). [#67740](https://github.com/ClickHouse/ClickHouse/pull/67740) ([Azat Khuzhin](https://github.com/azat)).
|
||||||
|
* Try to fix RabbitMQ test failures. [#67743](https://github.com/ClickHouse/ClickHouse/pull/67743) ([Azat Khuzhin](https://github.com/azat)).
|
||||||
|
* Stateless tests: attempt to fix timeouts of `02473_multistep_prewhere* 00411_long_accurate_number_comparison*`. [#67746](https://github.com/ClickHouse/ClickHouse/pull/67746) ([Nikita Fomichev](https://github.com/fm4v)).
|
||||||
|
* Fix test_ttl_move::test_alter_with_merge_work flakiness. [#67747](https://github.com/ClickHouse/ClickHouse/pull/67747) ([Azat Khuzhin](https://github.com/azat)).
|
||||||
|
* ci: better stateless runner (correctly collect artifacts and also some basic errors capturing). [#67752](https://github.com/ClickHouse/ClickHouse/pull/67752) ([Azat Khuzhin](https://github.com/azat)).
|
||||||
|
* Introduce `no-flaky-check` tag. [#67755](https://github.com/ClickHouse/ClickHouse/pull/67755) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||||
|
* Whitespaces. [#67771](https://github.com/ClickHouse/ClickHouse/pull/67771) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||||
|
* [RFC] Print original query for AST formatting check on CI. [#67776](https://github.com/ClickHouse/ClickHouse/pull/67776) ([Azat Khuzhin](https://github.com/azat)).
|
||||||
|
* Fix test `02833_concurrent_sessions`, Fix test `02835_drop_user_during_session`. [#67779](https://github.com/ClickHouse/ClickHouse/pull/67779) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||||
|
* Fix diagnostics in the test script. [#67780](https://github.com/ClickHouse/ClickHouse/pull/67780) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||||
|
* Fix test `02231_bloom_filter_sizing`. [#67784](https://github.com/ClickHouse/ClickHouse/pull/67784) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||||
|
* Fixed session_log related tests race condition on logout. [#67785](https://github.com/ClickHouse/ClickHouse/pull/67785) ([Alexey Gerasimchuck](https://github.com/Demilivor)).
|
||||||
|
* ci/stateless: fix artifacts post-processing and detect if something failed there. [#67791](https://github.com/ClickHouse/ClickHouse/pull/67791) ([Azat Khuzhin](https://github.com/azat)).
|
||||||
|
* Integration tests: fix flaky `test_dictionaries_update_and_reload::test_reload_after_fail_by_timer`. [#67793](https://github.com/ClickHouse/ClickHouse/pull/67793) ([Nikita Fomichev](https://github.com/fm4v)).
|
||||||
|
* Fix possible CANNOT_READ_ALL_DATA during server startup in performance tests. [#67795](https://github.com/ClickHouse/ClickHouse/pull/67795) ([Azat Khuzhin](https://github.com/azat)).
|
||||||
|
* Reduce table size in 03037_dynamic_merges_2* tests. [#67797](https://github.com/ClickHouse/ClickHouse/pull/67797) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||||
|
* Disable 03038_nested_dynamic_merges* under sanitizers because it's too slow. [#67798](https://github.com/ClickHouse/ClickHouse/pull/67798) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||||
|
* Revert "Merge pull request [#66510](https://github.com/ClickHouse/ClickHouse/issues/66510) from canhld94/fix_trivial_count_non_deterministic_func". [#67800](https://github.com/ClickHouse/ClickHouse/pull/67800) ([János Benjamin Antal](https://github.com/antaljanosbenjamin)).
|
||||||
|
* Update comment. [#67801](https://github.com/ClickHouse/ClickHouse/pull/67801) ([Kseniia Sumarokova](https://github.com/kssenii)).
|
||||||
|
* Fix bad test `03032_redundant_equals`. [#67822](https://github.com/ClickHouse/ClickHouse/pull/67822) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||||
|
* Update tags for a few tests. [#67829](https://github.com/ClickHouse/ClickHouse/pull/67829) ([Kseniia Sumarokova](https://github.com/kssenii)).
|
||||||
|
* Add debug logging for window view tests. [#67841](https://github.com/ClickHouse/ClickHouse/pull/67841) ([Kseniia Sumarokova](https://github.com/kssenii)).
|
||||||
|
* Closes [#67621](https://github.com/ClickHouse/ClickHouse/issues/67621). [#67843](https://github.com/ClickHouse/ClickHouse/pull/67843) ([Ilya Yatsishin](https://github.com/qoega)).
|
||||||
|
* Fix query cache randomization in stress tests. [#67855](https://github.com/ClickHouse/ClickHouse/pull/67855) ([Azat Khuzhin](https://github.com/azat)).
|
||||||
|
* Update version_date.tsv and changelogs after v24.5.5.78-stable. [#67863](https://github.com/ClickHouse/ClickHouse/pull/67863) ([robot-clickhouse](https://github.com/robot-clickhouse)).
|
||||||
|
* Un-flake 02524_fuzz_and_fuss_2. [#67867](https://github.com/ClickHouse/ClickHouse/pull/67867) ([Robert Schulze](https://github.com/rschu1ze)).
|
||||||
|
* Misc fixes. [#67869](https://github.com/ClickHouse/ClickHouse/pull/67869) ([Alexey Katsman](https://github.com/alexkats)).
|
||||||
|
* Fixes [#67444](https://github.com/ClickHouse/ClickHouse/issues/67444). [#67873](https://github.com/ClickHouse/ClickHouse/pull/67873) ([Konstantin Bogdanov](https://github.com/thevar1able)).
|
||||||
|
* no-msan 00314_sample_factor_virtual_column. [#67874](https://github.com/ClickHouse/ClickHouse/pull/67874) ([Michael Kolupaev](https://github.com/al13n321)).
|
||||||
|
* Revert "Revert "Add a test for [#47892](https://github.com/ClickHouse/ClickHouse/issues/47892)"". [#67877](https://github.com/ClickHouse/ClickHouse/pull/67877) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||||
|
* Lazily create invalid files in S3. [#67882](https://github.com/ClickHouse/ClickHouse/pull/67882) ([Antonio Andelic](https://github.com/antonio2368)).
|
||||||
|
* Do not try to create azure container if not needed. [#67896](https://github.com/ClickHouse/ClickHouse/pull/67896) ([Anton Popov](https://github.com/CurtizJ)).
|
||||||
|
* CI: Fix for setting Mergeable Check from sync. [#67898](https://github.com/ClickHouse/ClickHouse/pull/67898) ([Max K.](https://github.com/maxknv)).
|
||||||
|
* Bump rocksdb from v8.10 to v9.4 + enable jemalloc and liburing. [#67904](https://github.com/ClickHouse/ClickHouse/pull/67904) ([Robert Schulze](https://github.com/rschu1ze)).
|
||||||
|
* Update version_date.tsv and changelogs after v24.6.3.95-stable. [#67910](https://github.com/ClickHouse/ClickHouse/pull/67910) ([robot-clickhouse](https://github.com/robot-clickhouse)).
|
||||||
|
* Remove some no-parallel tags from tests (Part 3). [#67914](https://github.com/ClickHouse/ClickHouse/pull/67914) ([Raúl Marín](https://github.com/Algunenano)).
|
||||||
|
* Follow up [#67235](https://github.com/ClickHouse/ClickHouse/issues/67235). [#67917](https://github.com/ClickHouse/ClickHouse/pull/67917) ([Nikita Taranov](https://github.com/nickitat)).
|
||||||
|
* CI: Changelog: Critical Bug Fix to Bug Fix. [#67919](https://github.com/ClickHouse/ClickHouse/pull/67919) ([Max K.](https://github.com/maxknv)).
|
||||||
|
* CI: Multi-channel CiBuddy. [#67923](https://github.com/ClickHouse/ClickHouse/pull/67923) ([Max K.](https://github.com/maxknv)).
|
||||||
|
* more logs to debug logical error from async inserts. [#67928](https://github.com/ClickHouse/ClickHouse/pull/67928) ([Han Fei](https://github.com/hanfei1991)).
|
||||||
|
* Fix stress test error with TDigest statistics. [#67930](https://github.com/ClickHouse/ClickHouse/pull/67930) ([Robert Schulze](https://github.com/rschu1ze)).
|
||||||
|
* Remove some no-parallel tags from tests (Part 4). [#67932](https://github.com/ClickHouse/ClickHouse/pull/67932) ([Raúl Marín](https://github.com/Algunenano)).
|
||||||
|
* Upgrade QPL to v1.6.0. [#67933](https://github.com/ClickHouse/ClickHouse/pull/67933) ([Maria Zhukova](https://github.com/mzhukova)).
|
||||||
|
* CI: Strict job timeout 1.5h for tests, 2h for builds. [#67934](https://github.com/ClickHouse/ClickHouse/pull/67934) ([Max K.](https://github.com/maxknv)).
|
||||||
|
* Remove slow tests from fasttest check. [#67941](https://github.com/ClickHouse/ClickHouse/pull/67941) ([Raúl Marín](https://github.com/Algunenano)).
|
||||||
|
* Fix memory corruption in usearch. [#67942](https://github.com/ClickHouse/ClickHouse/pull/67942) ([Robert Schulze](https://github.com/rschu1ze)).
|
||||||
|
* Backported in [#68547](https://github.com/ClickHouse/ClickHouse/issues/68547): Timeout handling for functional and integration tests, store artifacts and report if timed out - sets 2h default timeout for all jobs. [#67944](https://github.com/ClickHouse/ClickHouse/pull/67944) ([Max K.](https://github.com/maxknv)).
|
||||||
|
* Unflake 02099_tsv_raw_format.sh. [#67947](https://github.com/ClickHouse/ClickHouse/pull/67947) ([Robert Schulze](https://github.com/rschu1ze)).
|
||||||
|
* This closes: [#67866](https://github.com/ClickHouse/ClickHouse/issues/67866). [#67950](https://github.com/ClickHouse/ClickHouse/pull/67950) ([Nikita Mikhaylov](https://github.com/nikitamikhaylov)).
|
||||||
|
* Change log level of an insignificant message in clickhouse-local. [#67952](https://github.com/ClickHouse/ClickHouse/pull/67952) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||||
|
* Add an explicit error for `ALTER MODIFY SQL SECURITY` on non-view tables. [#67953](https://github.com/ClickHouse/ClickHouse/pull/67953) ([pufit](https://github.com/pufit)).
|
||||||
|
* Fix flaky `test_storage_s3_queue/test.py::test_multiple_tables_streaming_sync_distributed`. [#67959](https://github.com/ClickHouse/ClickHouse/pull/67959) ([Julia Kartseva](https://github.com/jkartseva)).
|
||||||
|
* tests: fix 03002_part_log_rmt_fetch_merge_error flakiness. [#67960](https://github.com/ClickHouse/ClickHouse/pull/67960) ([Azat Khuzhin](https://github.com/azat)).
|
||||||
|
* Fix timeout of 02310_clickhouse_local_INSERT_progress_profile_events. [#67961](https://github.com/ClickHouse/ClickHouse/pull/67961) ([Robert Schulze](https://github.com/rschu1ze)).
|
||||||
|
* Remove obsolete `--multiquery` parameter (follow-up to [#63898](https://github.com/ClickHouse/ClickHouse/issues/63898)), pt. III. [#67964](https://github.com/ClickHouse/ClickHouse/pull/67964) ([Robert Schulze](https://github.com/rschu1ze)).
|
||||||
|
* Update minio in stateless tests. [#67975](https://github.com/ClickHouse/ClickHouse/pull/67975) ([Antonio Andelic](https://github.com/antonio2368)).
|
||||||
|
* CI: Integration tests uncover some logging. [#67978](https://github.com/ClickHouse/ClickHouse/pull/67978) ([Max K.](https://github.com/maxknv)).
|
||||||
|
* Fix 03130_convert_outer_join_to_inner_join. [#67980](https://github.com/ClickHouse/ClickHouse/pull/67980) ([vdimir](https://github.com/vdimir)).
|
||||||
|
* Collect minio audit logs in stateless tests. [#67998](https://github.com/ClickHouse/ClickHouse/pull/67998) ([Antonio Andelic](https://github.com/antonio2368)).
|
||||||
|
* Remove some no-parallel tags from tests (Part 5). [#68002](https://github.com/ClickHouse/ClickHouse/pull/68002) ([Raúl Marín](https://github.com/Algunenano)).
|
||||||
|
* Minor fixes in tables.md. [#68004](https://github.com/ClickHouse/ClickHouse/pull/68004) ([Ilya Yatsishin](https://github.com/qoega)).
|
||||||
|
* Follow up for [#67843](https://github.com/ClickHouse/ClickHouse/issues/67843). [#68007](https://github.com/ClickHouse/ClickHouse/pull/68007) ([Ilya Yatsishin](https://github.com/qoega)).
|
||||||
|
* Remove unused CLI option. [#68008](https://github.com/ClickHouse/ClickHouse/pull/68008) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||||
|
* Fix test `02845_threads_count_in_distributed_queries`. [#68011](https://github.com/ClickHouse/ClickHouse/pull/68011) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||||
|
* CI: Pass job timeout into tests config. [#68013](https://github.com/ClickHouse/ClickHouse/pull/68013) ([Nikita Fomichev](https://github.com/fm4v)).
|
||||||
|
* Add a test for [#57420](https://github.com/ClickHouse/ClickHouse/issues/57420). [#68017](https://github.com/ClickHouse/ClickHouse/pull/68017) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||||
|
* Revert "Revert "Bump rocksdb from v8.10 to v9.4 + enable jemalloc and liburing"". [#68021](https://github.com/ClickHouse/ClickHouse/pull/68021) ([Robert Schulze](https://github.com/rschu1ze)).
|
||||||
|
* CI: Fix for filtering jobs in PRs. [#68022](https://github.com/ClickHouse/ClickHouse/pull/68022) ([Max K.](https://github.com/maxknv)).
|
||||||
|
* Docs: Update 3rd party library guide. [#68027](https://github.com/ClickHouse/ClickHouse/pull/68027) ([Robert Schulze](https://github.com/rschu1ze)).
|
||||||
|
* Refactor tests for (experimental) statistics. [#68034](https://github.com/ClickHouse/ClickHouse/pull/68034) ([Robert Schulze](https://github.com/rschu1ze)).
|
||||||
|
* Split `00284_external_aggregation.sql`. [#68037](https://github.com/ClickHouse/ClickHouse/pull/68037) ([Robert Schulze](https://github.com/rschu1ze)).
|
||||||
|
* Update version_date.tsv and changelog after v24.7.3.42-stable. [#68045](https://github.com/ClickHouse/ClickHouse/pull/68045) ([robot-clickhouse](https://github.com/robot-clickhouse)).
|
||||||
|
* Update test_drop_is_lock_free/test.py. [#68051](https://github.com/ClickHouse/ClickHouse/pull/68051) ([Alexander Tokmakov](https://github.com/tavplubix)).
|
||||||
|
* Fixes [#67865](https://github.com/ClickHouse/ClickHouse/issues/67865). [#68054](https://github.com/ClickHouse/ClickHouse/pull/68054) ([Konstantin Bogdanov](https://github.com/thevar1able)).
|
||||||
|
* Disable randomization of `trace_profile_events` in clickhouse-test. [#68058](https://github.com/ClickHouse/ClickHouse/pull/68058) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||||
|
* Minor CMake cleanup. [#68069](https://github.com/ClickHouse/ClickHouse/pull/68069) ([Robert Schulze](https://github.com/rschu1ze)).
|
||||||
|
* If the test cluster is overloaded, sometimes simple query execution [can take more time](https://pastila.nl/?00224e71/f017cd6675b52ccc205c81aa62a47de5#8dB4+C4MOdOi3NLV1dc0Fg==) than `Buffer`'s max time to flush. This PR doubles the timeout and allows to skip the check in case of significant latency. [#68072](https://github.com/ClickHouse/ClickHouse/pull/68072) ([pufit](https://github.com/pufit)).
|
||||||
|
* Fix flaky `02675_profile_events_from_query_log_and_client`. [#68097](https://github.com/ClickHouse/ClickHouse/pull/68097) ([Antonio Andelic](https://github.com/antonio2368)).
|
||||||
|
* Fix race in `WithRetries`. [#68106](https://github.com/ClickHouse/ClickHouse/pull/68106) ([Antonio Andelic](https://github.com/antonio2368)).
|
||||||
|
* Add empty cell to reports when time is missing. [#68112](https://github.com/ClickHouse/ClickHouse/pull/68112) ([János Benjamin Antal](https://github.com/antaljanosbenjamin)).
|
||||||
|
* Fix test `00900_long_parquet_load`. [#68130](https://github.com/ClickHouse/ClickHouse/pull/68130) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||||
|
* tests: fix 01246_buffer_flush flakiness due to slow trace_log flush. [#68134](https://github.com/ClickHouse/ClickHouse/pull/68134) ([Azat Khuzhin](https://github.com/azat)).
|
||||||
|
* Only use Field::safeGet - Field::get prone to type punning. [#68135](https://github.com/ClickHouse/ClickHouse/pull/68135) ([Yakov Olkhovskiy](https://github.com/yakov-olkhovskiy)).
|
||||||
|
* tests: attempt to fix 01600_parts_states_metrics_long (by forbid parallel run). [#68136](https://github.com/ClickHouse/ClickHouse/pull/68136) ([Azat Khuzhin](https://github.com/azat)).
|
||||||
|
* Fix01710 Timeout. [#68138](https://github.com/ClickHouse/ClickHouse/pull/68138) ([jsc0218](https://github.com/jsc0218)).
|
||||||
|
* Remove the extra cell from reports when it is not necessary. [#68145](https://github.com/ClickHouse/ClickHouse/pull/68145) ([János Benjamin Antal](https://github.com/antaljanosbenjamin)).
|
||||||
|
* Remove "Processing configuration file" message from clickhouse-local. [#68157](https://github.com/ClickHouse/ClickHouse/pull/68157) ([Azat Khuzhin](https://github.com/azat)).
|
||||||
|
* tests: fix 02122_join_group_by_timeout flakiness. [#68160](https://github.com/ClickHouse/ClickHouse/pull/68160) ([Azat Khuzhin](https://github.com/azat)).
|
||||||
|
* Fix `test_cluster_all_replicas`. [#68178](https://github.com/ClickHouse/ClickHouse/pull/68178) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||||
|
* Fix leftovers. [#68181](https://github.com/ClickHouse/ClickHouse/pull/68181) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||||
|
* Fix test `01172_transaction_counters`. [#68182](https://github.com/ClickHouse/ClickHouse/pull/68182) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||||
|
* Refactor tests for (experimental) statistics. [#68186](https://github.com/ClickHouse/ClickHouse/pull/68186) ([Robert Schulze](https://github.com/rschu1ze)).
|
||||||
|
* Remove Log engine from Kafka integration tests. [#68200](https://github.com/ClickHouse/ClickHouse/pull/68200) ([János Benjamin Antal](https://github.com/antaljanosbenjamin)).
|
||||||
|
* [Green CI] Fix test 01903_correct_block_size_prediction_with_default. [#68203](https://github.com/ClickHouse/ClickHouse/pull/68203) ([Pablo Marcos](https://github.com/pamarcos)).
|
||||||
|
* Replace segfault in Replicated database with logical error. [#68250](https://github.com/ClickHouse/ClickHouse/pull/68250) ([Alexander Tokmakov](https://github.com/tavplubix)).
|
||||||
|
* Backported in [#68423](https://github.com/ClickHouse/ClickHouse/issues/68423): tests: make 01600_parts_states_metrics_long better. [#68265](https://github.com/ClickHouse/ClickHouse/pull/68265) ([Azat Khuzhin](https://github.com/azat)).
|
||||||
|
* Backported in [#68374](https://github.com/ClickHouse/ClickHouse/issues/68374): Rename: S3DiskNoKeyErrors -> DiskS3NoSuchKeyErrors. [#68361](https://github.com/ClickHouse/ClickHouse/pull/68361) ([Miсhael Stetsyuk](https://github.com/mstetsyuk)).
|
||||||
|
* Backported in [#68637](https://github.com/ClickHouse/ClickHouse/issues/68637): Check for invalid regexp in JSON SKIP REGEXP section. [#68451](https://github.com/ClickHouse/ClickHouse/pull/68451) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||||
|
* Backported in [#68485](https://github.com/ClickHouse/ClickHouse/issues/68485): Better inference of date times 2. [#68452](https://github.com/ClickHouse/ClickHouse/pull/68452) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||||
|
* Backported in [#68540](https://github.com/ClickHouse/ClickHouse/issues/68540): CI: Native build for package_aarch64. [#68457](https://github.com/ClickHouse/ClickHouse/pull/68457) ([Max K.](https://github.com/maxknv)).
|
||||||
|
* Backported in [#68518](https://github.com/ClickHouse/ClickHouse/issues/68518): Minor update in Dynamic/JSON serializations. [#68459](https://github.com/ClickHouse/ClickHouse/pull/68459) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||||
|
* Backported in [#68558](https://github.com/ClickHouse/ClickHouse/issues/68558): CI: Minor release workflow fix. [#68536](https://github.com/ClickHouse/ClickHouse/pull/68536) ([Max K.](https://github.com/maxknv)).
|
||||||
|
* Backported in [#68576](https://github.com/ClickHouse/ClickHouse/issues/68576): CI: Tidy build timeout from 2h to 3h. [#68567](https://github.com/ClickHouse/ClickHouse/pull/68567) ([Max K.](https://github.com/maxknv)).
|
||||||
|
|
12
docs/changelogs/v24.8.2.3-lts.md
Normal file
12
docs/changelogs/v24.8.2.3-lts.md
Normal file
@ -0,0 +1,12 @@
|
|||||||
|
---
|
||||||
|
sidebar_position: 1
|
||||||
|
sidebar_label: 2024
|
||||||
|
---
|
||||||
|
|
||||||
|
# 2024 Changelog
|
||||||
|
|
||||||
|
### ClickHouse release v24.8.2.3-lts (b54f79ed323) FIXME as compared to v24.8.1.2684-lts (161c62fd295)
|
||||||
|
|
||||||
|
#### Bug Fix (user-visible misbehavior in an official stable release)
|
||||||
|
* Backported in [#68670](https://github.com/ClickHouse/ClickHouse/issues/68670): Fix `LOGICAL_ERROR`s when functions `sipHash64Keyed`, `sipHash128Keyed`, or `sipHash128ReferenceKeyed` are applied to empty arrays or tuples. [#68630](https://github.com/ClickHouse/ClickHouse/pull/68630) ([Robert Schulze](https://github.com/rschu1ze)).
|
||||||
|
|
@ -91,6 +91,28 @@ SELECT 1
|
|||||||
In addition to the above settings, you can use `USE_*` flags from `system.build_options` to define usage of particular ClickHouse features.
|
In addition to the above settings, you can use `USE_*` flags from `system.build_options` to define usage of particular ClickHouse features.
|
||||||
For example, if your test uses a MySQL table, you should add a tag `use-mysql`.
|
For example, if your test uses a MySQL table, you should add a tag `use-mysql`.
|
||||||
|
|
||||||
|
### Specifying limits for random settings
|
||||||
|
|
||||||
|
A test can specify minimum and maximum allowed values for settings that can be randomized during test run.
|
||||||
|
|
||||||
|
For `.sh` tests limits are written as a comment on the line next to tags or on the second line if no tags are specified:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
#!/usr/bin/env bash
|
||||||
|
# Tags: no-fasttest
|
||||||
|
# Random settings limits: max_block_size=(1000, 10000); index_granularity=(100, None)
|
||||||
|
```
|
||||||
|
|
||||||
|
For `.sql` tests tags are placed as a SQL comment in the line next to tags or in the first line:
|
||||||
|
|
||||||
|
```sql
|
||||||
|
-- Tags: no-fasttest
|
||||||
|
-- Random settings limits: max_block_size=(1000, 10000); index_granularity=(100, None)
|
||||||
|
SELECT 1
|
||||||
|
```
|
||||||
|
|
||||||
|
If you need to specify only one limit, you can use `None` for another one.
|
||||||
|
|
||||||
### Choosing the Test Name
|
### Choosing the Test Name
|
||||||
|
|
||||||
The name of the test starts with a five-digit prefix followed by a descriptive name, such as `00422_hash_function_constexpr.sql`. To choose the prefix, find the largest prefix already present in the directory, and increment it by one. In the meantime, some other tests might be added with the same numeric prefix, but this is OK and does not lead to any problems, you don't have to change it later.
|
The name of the test starts with a five-digit prefix followed by a descriptive name, such as `00422_hash_function_constexpr.sql`. To choose the prefix, find the largest prefix already present in the directory, and increment it by one. In the meantime, some other tests might be added with the same numeric prefix, but this is OK and does not lead to any problems, you don't have to change it later.
|
||||||
|
@ -240,7 +240,7 @@ libhdfs3 support HDFS namenode HA.
|
|||||||
## Storage Settings {#storage-settings}
|
## Storage Settings {#storage-settings}
|
||||||
|
|
||||||
- [hdfs_truncate_on_insert](/docs/en/operations/settings/settings.md#hdfs_truncate_on_insert) - allows to truncate file before insert into it. Disabled by default.
|
- [hdfs_truncate_on_insert](/docs/en/operations/settings/settings.md#hdfs_truncate_on_insert) - allows to truncate file before insert into it. Disabled by default.
|
||||||
- [hdfs_create_multiple_files](/docs/en/operations/settings/settings.md#hdfs_allow_create_multiple_files) - allows to create a new file on each insert if format has suffix. Disabled by default.
|
- [hdfs_create_new_file_on_insert](/docs/en/operations/settings/settings.md#hdfs_create_new_file_on_insert) - allows to create a new file on each insert if format has suffix. Disabled by default.
|
||||||
- [hdfs_skip_empty_files](/docs/en/operations/settings/settings.md#hdfs_skip_empty_files) - allows to skip empty files while reading. Disabled by default.
|
- [hdfs_skip_empty_files](/docs/en/operations/settings/settings.md#hdfs_skip_empty_files) - allows to skip empty files while reading. Disabled by default.
|
||||||
|
|
||||||
**See Also**
|
**See Also**
|
||||||
|
@ -6,28 +6,34 @@ sidebar_label: Iceberg
|
|||||||
|
|
||||||
# Iceberg Table Engine
|
# Iceberg Table Engine
|
||||||
|
|
||||||
This engine provides a read-only integration with existing Apache [Iceberg](https://iceberg.apache.org/) tables in Amazon S3.
|
This engine provides a read-only integration with existing Apache [Iceberg](https://iceberg.apache.org/) tables in Amazon S3, Azure and locally stored tables.
|
||||||
|
|
||||||
## Create Table
|
## Create Table
|
||||||
|
|
||||||
Note that the Iceberg table must already exist in S3, this command does not take DDL parameters to create a new table.
|
Note that the Iceberg table must already exist in the storage, this command does not take DDL parameters to create a new table.
|
||||||
|
|
||||||
``` sql
|
``` sql
|
||||||
CREATE TABLE iceberg_table
|
CREATE TABLE iceberg_table_s3
|
||||||
ENGINE = Iceberg(url, [aws_access_key_id, aws_secret_access_key,])
|
ENGINE = IcebergS3(url, [, NOSIGN | access_key_id, secret_access_key, [session_token]], format, [,compression])
|
||||||
|
|
||||||
|
CREATE TABLE iceberg_table_azure
|
||||||
|
ENGINE = IcebergAzure(connection_string|storage_account_url, container_name, blobpath, [account_name, account_key, format, compression])
|
||||||
|
|
||||||
|
CREATE TABLE iceberg_table_local
|
||||||
|
ENGINE = IcebergLocal(path_to_table, [,format] [,compression_method])
|
||||||
```
|
```
|
||||||
|
|
||||||
**Engine parameters**
|
**Engine arguments**
|
||||||
|
|
||||||
- `url` — url with the path to an existing Iceberg table.
|
Description of the arguments coincides with description of arguments in engines `S3`, `AzureBlobStorage` and `File` correspondingly.
|
||||||
- `aws_access_key_id`, `aws_secret_access_key` - Long-term credentials for the [AWS](https://aws.amazon.com/) account user. You can use these to authenticate your requests. Parameter is optional. If credentials are not specified, they are used from the configuration file.
|
`format` stands for the format of data files in the Iceberg table.
|
||||||
|
|
||||||
Engine parameters can be specified using [Named Collections](../../../operations/named-collections.md)
|
Engine parameters can be specified using [Named Collections](../../../operations/named-collections.md)
|
||||||
|
|
||||||
**Example**
|
**Example**
|
||||||
|
|
||||||
```sql
|
```sql
|
||||||
CREATE TABLE iceberg_table ENGINE=Iceberg('http://test.s3.amazonaws.com/clickhouse-bucket/test_table', 'test', 'test')
|
CREATE TABLE iceberg_table ENGINE=IcebergS3('http://test.s3.amazonaws.com/clickhouse-bucket/test_table', 'test', 'test')
|
||||||
```
|
```
|
||||||
|
|
||||||
Using named collections:
|
Using named collections:
|
||||||
@ -45,9 +51,15 @@ Using named collections:
|
|||||||
```
|
```
|
||||||
|
|
||||||
```sql
|
```sql
|
||||||
CREATE TABLE iceberg_table ENGINE=Iceberg(iceberg_conf, filename = 'test_table')
|
CREATE TABLE iceberg_table ENGINE=IcebergS3(iceberg_conf, filename = 'test_table')
|
||||||
|
|
||||||
```
|
```
|
||||||
|
|
||||||
|
**Aliases**
|
||||||
|
|
||||||
|
|
||||||
|
Table engine `Iceberg` is an alias to `IcebergS3` now.
|
||||||
|
|
||||||
## See also
|
## See also
|
||||||
|
|
||||||
- [iceberg table function](/docs/en/sql-reference/table-functions/iceberg.md)
|
- [iceberg table function](/docs/en/sql-reference/table-functions/iceberg.md)
|
||||||
|
@ -225,7 +225,7 @@ CREATE TABLE table_with_asterisk (name String, value UInt32)
|
|||||||
## Storage Settings {#storage-settings}
|
## Storage Settings {#storage-settings}
|
||||||
|
|
||||||
- [s3_truncate_on_insert](/docs/en/operations/settings/settings.md#s3_truncate_on_insert) - allows to truncate file before insert into it. Disabled by default.
|
- [s3_truncate_on_insert](/docs/en/operations/settings/settings.md#s3_truncate_on_insert) - allows to truncate file before insert into it. Disabled by default.
|
||||||
- [s3_create_multiple_files](/docs/en/operations/settings/settings.md#s3_allow_create_multiple_files) - allows to create a new file on each insert if format has suffix. Disabled by default.
|
- [s3_create_new_file_on_insert](/docs/en/operations/settings/settings.md#s3_create_new_file_on_insert) - allows to create a new file on each insert if format has suffix. Disabled by default.
|
||||||
- [s3_skip_empty_files](/docs/en/operations/settings/settings.md#s3_skip_empty_files) - allows to skip empty files while reading. Disabled by default.
|
- [s3_skip_empty_files](/docs/en/operations/settings/settings.md#s3_skip_empty_files) - allows to skip empty files while reading. Disabled by default.
|
||||||
|
|
||||||
## S3-related Settings {#settings}
|
## S3-related Settings {#settings}
|
||||||
|
@ -54,7 +54,7 @@ Parameters:
|
|||||||
- `distance_function`: either `L2Distance` (the [Euclidean distance](https://en.wikipedia.org/wiki/Euclidean_distance) - the length of a
|
- `distance_function`: either `L2Distance` (the [Euclidean distance](https://en.wikipedia.org/wiki/Euclidean_distance) - the length of a
|
||||||
line between two points in Euclidean space), or `cosineDistance` (the [cosine
|
line between two points in Euclidean space), or `cosineDistance` (the [cosine
|
||||||
distance](https://en.wikipedia.org/wiki/Cosine_similarity#Cosine_distance)- the angle between two non-zero vectors).
|
distance](https://en.wikipedia.org/wiki/Cosine_similarity#Cosine_distance)- the angle between two non-zero vectors).
|
||||||
- `quantization`: either `f32`, `f16`, or `i8` for storing the vector with reduced precision (optional, default: `f32`)
|
- `quantization`: either `f64`, `f32`, `f16`, `bf16`, or `i8` for storing the vector with reduced precision (optional, default: `bf16`)
|
||||||
- `m`: the number of neighbors per graph node (optional, default: 16)
|
- `m`: the number of neighbors per graph node (optional, default: 16)
|
||||||
- `ef_construction`: (optional, default: 128)
|
- `ef_construction`: (optional, default: 128)
|
||||||
- `ef_search`: (optional, default: 64)
|
- `ef_search`: (optional, default: 64)
|
||||||
|
@ -80,7 +80,7 @@ For partitioning by month, use the `toYYYYMM(date_column)` expression, where `da
|
|||||||
`PRIMARY KEY` — The primary key if it [differs from the sorting key](#choosing-a-primary-key-that-differs-from-the-sorting-key). Optional.
|
`PRIMARY KEY` — The primary key if it [differs from the sorting key](#choosing-a-primary-key-that-differs-from-the-sorting-key). Optional.
|
||||||
|
|
||||||
Specifying a sorting key (using `ORDER BY` clause) implicitly specifies a primary key.
|
Specifying a sorting key (using `ORDER BY` clause) implicitly specifies a primary key.
|
||||||
It is usually not necessary to specify the primary key in addition to the primary key.
|
It is usually not necessary to specify the primary key in addition to the sorting key.
|
||||||
|
|
||||||
#### SAMPLE BY
|
#### SAMPLE BY
|
||||||
|
|
||||||
|
@ -109,6 +109,7 @@ For partitioning by month, use the `toYYYYMM(date_column)` expression, where `da
|
|||||||
- `_file` — Resource name of the `URL`. Type: `LowCardinalty(String)`.
|
- `_file` — Resource name of the `URL`. Type: `LowCardinalty(String)`.
|
||||||
- `_size` — Size of the resource in bytes. Type: `Nullable(UInt64)`. If the size is unknown, the value is `NULL`.
|
- `_size` — Size of the resource in bytes. Type: `Nullable(UInt64)`. If the size is unknown, the value is `NULL`.
|
||||||
- `_time` — Last modified time of the file. Type: `Nullable(DateTime)`. If the time is unknown, the value is `NULL`.
|
- `_time` — Last modified time of the file. Type: `Nullable(DateTime)`. If the time is unknown, the value is `NULL`.
|
||||||
|
- `_headers` - HTTP response headers. Type: `Map(LowCardinality(String), LowCardinality(String))`.
|
||||||
|
|
||||||
## Storage Settings {#storage-settings}
|
## Storage Settings {#storage-settings}
|
||||||
|
|
||||||
|
@ -58,7 +58,7 @@ Connection: Close
|
|||||||
Content-Type: text/tab-separated-values; charset=UTF-8
|
Content-Type: text/tab-separated-values; charset=UTF-8
|
||||||
X-ClickHouse-Server-Display-Name: clickhouse.ru-central1.internal
|
X-ClickHouse-Server-Display-Name: clickhouse.ru-central1.internal
|
||||||
X-ClickHouse-Query-Id: 5abe861c-239c-467f-b955-8a201abb8b7f
|
X-ClickHouse-Query-Id: 5abe861c-239c-467f-b955-8a201abb8b7f
|
||||||
X-ClickHouse-Summary: {"read_rows":"0","read_bytes":"0","written_rows":"0","written_bytes":"0","total_rows_to_read":"0","elapsed_ns":"662334"}
|
X-ClickHouse-Summary: {"read_rows":"0","read_bytes":"0","written_rows":"0","written_bytes":"0","total_rows_to_read":"0","elapsed_ns":"662334", "real_time_microseconds": "0"}
|
||||||
|
|
||||||
1
|
1
|
||||||
```
|
```
|
||||||
@ -472,7 +472,7 @@ $ curl -v 'http://localhost:8123/predefined_query'
|
|||||||
< X-ClickHouse-Format: Template
|
< X-ClickHouse-Format: Template
|
||||||
< X-ClickHouse-Timezone: Asia/Shanghai
|
< X-ClickHouse-Timezone: Asia/Shanghai
|
||||||
< Keep-Alive: timeout=10
|
< Keep-Alive: timeout=10
|
||||||
< X-ClickHouse-Summary: {"read_rows":"0","read_bytes":"0","written_rows":"0","written_bytes":"0","total_rows_to_read":"0","elapsed_ns":"662334"}
|
< X-ClickHouse-Summary: {"read_rows":"0","read_bytes":"0","written_rows":"0","written_bytes":"0","total_rows_to_read":"0","elapsed_ns":"662334", "real_time_microseconds":"0"}
|
||||||
<
|
<
|
||||||
# HELP "Query" "Number of executing queries"
|
# HELP "Query" "Number of executing queries"
|
||||||
# TYPE "Query" counter
|
# TYPE "Query" counter
|
||||||
@ -668,7 +668,7 @@ $ curl -vv -H 'XXX:xxx' 'http://localhost:8123/hi'
|
|||||||
< Content-Type: text/html; charset=UTF-8
|
< Content-Type: text/html; charset=UTF-8
|
||||||
< Transfer-Encoding: chunked
|
< Transfer-Encoding: chunked
|
||||||
< Keep-Alive: timeout=10
|
< Keep-Alive: timeout=10
|
||||||
< X-ClickHouse-Summary: {"read_rows":"0","read_bytes":"0","written_rows":"0","written_bytes":"0","total_rows_to_read":"0","elapsed_ns":"662334"}
|
< X-ClickHouse-Summary: {"read_rows":"0","read_bytes":"0","written_rows":"0","written_bytes":"0","total_rows_to_read":"0","elapsed_ns":"662334", "real_time_microseconds":"0"}
|
||||||
<
|
<
|
||||||
* Connection #0 to host localhost left intact
|
* Connection #0 to host localhost left intact
|
||||||
Say Hi!%
|
Say Hi!%
|
||||||
@ -708,7 +708,7 @@ $ curl -v -H 'XXX:xxx' 'http://localhost:8123/get_config_static_handler'
|
|||||||
< Content-Type: text/plain; charset=UTF-8
|
< Content-Type: text/plain; charset=UTF-8
|
||||||
< Transfer-Encoding: chunked
|
< Transfer-Encoding: chunked
|
||||||
< Keep-Alive: timeout=10
|
< Keep-Alive: timeout=10
|
||||||
< X-ClickHouse-Summary: {"read_rows":"0","read_bytes":"0","written_rows":"0","written_bytes":"0","total_rows_to_read":"0","elapsed_ns":"662334"}
|
< X-ClickHouse-Summary: {"read_rows":"0","read_bytes":"0","written_rows":"0","written_bytes":"0","total_rows_to_read":"0","elapsed_ns":"662334", "real_time_microseconds":"0"}
|
||||||
<
|
<
|
||||||
* Connection #0 to host localhost left intact
|
* Connection #0 to host localhost left intact
|
||||||
<html ng-app="SMI2"><head><base href="http://ui.tabix.io/"></head><body><div ui-view="" class="content-ui"></div><script src="http://loader.tabix.io/master.js"></script></body></html>%
|
<html ng-app="SMI2"><head><base href="http://ui.tabix.io/"></head><body><div ui-view="" class="content-ui"></div><script src="http://loader.tabix.io/master.js"></script></body></html>%
|
||||||
@ -766,7 +766,7 @@ $ curl -vv -H 'XXX:xxx' 'http://localhost:8123/get_absolute_path_static_handler'
|
|||||||
< Content-Type: text/html; charset=UTF-8
|
< Content-Type: text/html; charset=UTF-8
|
||||||
< Transfer-Encoding: chunked
|
< Transfer-Encoding: chunked
|
||||||
< Keep-Alive: timeout=10
|
< Keep-Alive: timeout=10
|
||||||
< X-ClickHouse-Summary: {"read_rows":"0","read_bytes":"0","written_rows":"0","written_bytes":"0","total_rows_to_read":"0","elapsed_ns":"662334"}
|
< X-ClickHouse-Summary: {"read_rows":"0","read_bytes":"0","written_rows":"0","written_bytes":"0","total_rows_to_read":"0","elapsed_ns":"662334", "real_time_microseconds":"0"}
|
||||||
<
|
<
|
||||||
<html><body>Absolute Path File</body></html>
|
<html><body>Absolute Path File</body></html>
|
||||||
* Connection #0 to host localhost left intact
|
* Connection #0 to host localhost left intact
|
||||||
@ -785,7 +785,7 @@ $ curl -vv -H 'XXX:xxx' 'http://localhost:8123/get_relative_path_static_handler'
|
|||||||
< Content-Type: text/html; charset=UTF-8
|
< Content-Type: text/html; charset=UTF-8
|
||||||
< Transfer-Encoding: chunked
|
< Transfer-Encoding: chunked
|
||||||
< Keep-Alive: timeout=10
|
< Keep-Alive: timeout=10
|
||||||
< X-ClickHouse-Summary: {"read_rows":"0","read_bytes":"0","written_rows":"0","written_bytes":"0","total_rows_to_read":"0","elapsed_ns":"662334"}
|
< X-ClickHouse-Summary: {"read_rows":"0","read_bytes":"0","written_rows":"0","written_bytes":"0","total_rows_to_read":"0","elapsed_ns":"662334", "real_time_microseconds":"0"}
|
||||||
<
|
<
|
||||||
<html><body>Relative Path File</body></html>
|
<html><body>Relative Path File</body></html>
|
||||||
* Connection #0 to host localhost left intact
|
* Connection #0 to host localhost left intact
|
||||||
|
@ -1389,7 +1389,7 @@ DESC format(JSONEachRow, '{"id" : 1, "age" : 25, "name" : "Josh", "status" : nul
|
|||||||
#### schema_inference_make_columns_nullable
|
#### schema_inference_make_columns_nullable
|
||||||
|
|
||||||
Controls making inferred types `Nullable` in schema inference for formats without information about nullability.
|
Controls making inferred types `Nullable` in schema inference for formats without information about nullability.
|
||||||
If the setting is enabled, all inferred type will be `Nullable`, if disabled, the inferred type will be `Nullable` only if `input_format_null_as_default` is disabled and the column contains `NULL` in a sample that is parsed during schema inference.
|
If the setting is enabled, all inferred type will be `Nullable`, if disabled, the inferred type will never be `Nullable`, if set to `auto`, the inferred type will be `Nullable` only if the column contains `NULL` in a sample that is parsed during schema inference or file metadata contains information about column nullability.
|
||||||
|
|
||||||
Enabled by default.
|
Enabled by default.
|
||||||
|
|
||||||
@ -1412,15 +1412,13 @@ DESC format(JSONEachRow, $$
|
|||||||
└─────────┴─────────────────────────┴──────────────┴────────────────────┴─────────┴──────────────────┴────────────────┘
|
└─────────┴─────────────────────────┴──────────────┴────────────────────┴─────────┴──────────────────┴────────────────┘
|
||||||
```
|
```
|
||||||
```sql
|
```sql
|
||||||
SET schema_inference_make_columns_nullable = 0;
|
SET schema_inference_make_columns_nullable = 'auto';
|
||||||
SET input_format_null_as_default = 0;
|
|
||||||
DESC format(JSONEachRow, $$
|
DESC format(JSONEachRow, $$
|
||||||
{"id" : 1, "age" : 25, "name" : "Josh", "status" : null, "hobbies" : ["football", "cooking"]}
|
{"id" : 1, "age" : 25, "name" : "Josh", "status" : null, "hobbies" : ["football", "cooking"]}
|
||||||
{"id" : 2, "age" : 19, "name" : "Alan", "status" : "married", "hobbies" : ["tennis", "art"]}
|
{"id" : 2, "age" : 19, "name" : "Alan", "status" : "married", "hobbies" : ["tennis", "art"]}
|
||||||
$$)
|
$$)
|
||||||
```
|
```
|
||||||
```response
|
```response
|
||||||
|
|
||||||
┌─name────┬─type─────────────┬─default_type─┬─default_expression─┬─comment─┬─codec_expression─┬─ttl_expression─┐
|
┌─name────┬─type─────────────┬─default_type─┬─default_expression─┬─comment─┬─codec_expression─┬─ttl_expression─┐
|
||||||
│ id │ Int64 │ │ │ │ │ │
|
│ id │ Int64 │ │ │ │ │ │
|
||||||
│ age │ Int64 │ │ │ │ │ │
|
│ age │ Int64 │ │ │ │ │ │
|
||||||
@ -1432,7 +1430,6 @@ DESC format(JSONEachRow, $$
|
|||||||
|
|
||||||
```sql
|
```sql
|
||||||
SET schema_inference_make_columns_nullable = 0;
|
SET schema_inference_make_columns_nullable = 0;
|
||||||
SET input_format_null_as_default = 1;
|
|
||||||
DESC format(JSONEachRow, $$
|
DESC format(JSONEachRow, $$
|
||||||
{"id" : 1, "age" : 25, "name" : "Josh", "status" : null, "hobbies" : ["football", "cooking"]}
|
{"id" : 1, "age" : 25, "name" : "Josh", "status" : null, "hobbies" : ["football", "cooking"]}
|
||||||
{"id" : 2, "age" : 19, "name" : "Alan", "status" : "married", "hobbies" : ["tennis", "art"]}
|
{"id" : 2, "age" : 19, "name" : "Alan", "status" : "married", "hobbies" : ["tennis", "art"]}
|
||||||
|
@ -73,13 +73,21 @@ In the above example the `password_sha256_hex` value is the hexadecimal represen
|
|||||||
|
|
||||||
### Storage for named collections
|
### Storage for named collections
|
||||||
|
|
||||||
Named collections can either be stored on local disk or in zookeeper/keeper. By default local storage is used.
|
Named collections can either be stored on local disk or in ZooKeeper/Keeper. By default local storage is used.
|
||||||
|
They can also be stored using encryption with the same algorithms used for [disk encryption](storing-data#encrypted-virtual-file-system),
|
||||||
|
where `aes_128_ctr` is used by default.
|
||||||
|
|
||||||
To configure named collections storage in keeper and a `type` (equal to either `keeper` or `zookeeper`) and `path` (path in keeper, where named collections will be stored) to `named_collections_storage` section in configuration file:
|
To configure named collections storage you need to specify a `type`. This can be either `local` or `keeper`/`zookeeper`. For encrypted storage,
|
||||||
|
you can use `local_encrypted` or `keeper_encrypted`/`zookeeper_encrypted`.
|
||||||
|
|
||||||
|
To use ZooKeeper/Keeper we also need to set up a `path` (path in ZooKeeper/Keeper, where named collections will be stored) to
|
||||||
|
`named_collections_storage` section in configuration file. The following example uses encryption and ZooKeeper/Keeper:
|
||||||
```
|
```
|
||||||
<clickhouse>
|
<clickhouse>
|
||||||
<named_collections_storage>
|
<named_collections_storage>
|
||||||
<type>zookeeper</type>
|
<type>zookeeper_encrypted</type>
|
||||||
|
<key_hex>bebec0cabebec0cabebec0cabebec0ca</key_hex>
|
||||||
|
<algorithm>aes_128_ctr</algorithm>
|
||||||
<path>/named_collections_path/</path>
|
<path>/named_collections_path/</path>
|
||||||
<update_timeout_ms>1000</update_timeout_ms>
|
<update_timeout_ms>1000</update_timeout_ms>
|
||||||
</named_collections_storage>
|
</named_collections_storage>
|
||||||
|
@ -155,6 +155,8 @@ SELECT 1 SETTINGS use_query_cache = true, query_cache_tag = 'tag 1';
|
|||||||
SELECT 1 SETTINGS use_query_cache = true, query_cache_tag = 'tag 2';
|
SELECT 1 SETTINGS use_query_cache = true, query_cache_tag = 'tag 2';
|
||||||
```
|
```
|
||||||
|
|
||||||
|
To remove only entries with tag `tag` from the query cache, you can use statement `SYSTEM DROP QUERY CACHE TAG 'tag'`.
|
||||||
|
|
||||||
ClickHouse reads table data in blocks of [max_block_size](settings/settings.md#setting-max_block_size) rows. Due to filtering, aggregation,
|
ClickHouse reads table data in blocks of [max_block_size](settings/settings.md#setting-max_block_size) rows. Due to filtering, aggregation,
|
||||||
etc., result blocks are typically much smaller than 'max_block_size' but there are also cases where they are much bigger. Setting
|
etc., result blocks are typically much smaller than 'max_block_size' but there are also cases where they are much bigger. Setting
|
||||||
[query_cache_squash_partial_results](settings/settings.md#query-cache-squash-partial-results) (enabled by default) controls if result blocks
|
[query_cache_squash_partial_results](settings/settings.md#query-cache-squash-partial-results) (enabled by default) controls if result blocks
|
||||||
|
@ -171,8 +171,8 @@ If the `schema_inference_hints` is not formated properly, or if there is a typo
|
|||||||
|
|
||||||
## schema_inference_make_columns_nullable {#schema_inference_make_columns_nullable}
|
## schema_inference_make_columns_nullable {#schema_inference_make_columns_nullable}
|
||||||
|
|
||||||
Controls making inferred types `Nullable` in schema inference for formats without information about nullability.
|
Controls making inferred types `Nullable` in schema inference.
|
||||||
If the setting is enabled, the inferred type will be `Nullable` only if column contains `NULL` in a sample that is parsed during schema inference.
|
If the setting is enabled, all inferred type will be `Nullable`, if disabled, the inferred type will never be `Nullable`, if set to `auto`, the inferred type will be `Nullable` only if the column contains `NULL` in a sample that is parsed during schema inference or file metadata contains information about column nullability.
|
||||||
|
|
||||||
Default value: `true`.
|
Default value: `true`.
|
||||||
|
|
||||||
@ -194,6 +194,17 @@ If enabled, ClickHouse will try to infer type `DateTime64` from string fields in
|
|||||||
|
|
||||||
Enabled by default.
|
Enabled by default.
|
||||||
|
|
||||||
|
## input_format_try_infer_variants {#input_format_try_infer_variants}
|
||||||
|
|
||||||
|
If enabled, ClickHouse will try to infer type [`Variant`](../../sql-reference/data-types/variant.md) in schema inference for text formats when there is more than one possible type for column/array elements.
|
||||||
|
|
||||||
|
Possible values:
|
||||||
|
|
||||||
|
- 0 — Disabled.
|
||||||
|
- 1 — Enabled.
|
||||||
|
|
||||||
|
Default value: `0`.
|
||||||
|
|
||||||
## date_time_input_format {#date_time_input_format}
|
## date_time_input_format {#date_time_input_format}
|
||||||
|
|
||||||
Allows choosing a parser of the text representation of date and time.
|
Allows choosing a parser of the text representation of date and time.
|
||||||
|
@ -1381,7 +1381,7 @@ Default value: `2`.
|
|||||||
|
|
||||||
Close connection before returning connection to the pool.
|
Close connection before returning connection to the pool.
|
||||||
|
|
||||||
Default value: true.
|
Default value: false.
|
||||||
|
|
||||||
## odbc_bridge_connection_pool_size {#odbc-bridge-connection-pool-size}
|
## odbc_bridge_connection_pool_size {#odbc-bridge-connection-pool-size}
|
||||||
|
|
||||||
@ -2855,7 +2855,7 @@ The minimum chunk size in bytes, which each thread will parse in parallel.
|
|||||||
|
|
||||||
## merge_selecting_sleep_ms {#merge_selecting_sleep_ms}
|
## merge_selecting_sleep_ms {#merge_selecting_sleep_ms}
|
||||||
|
|
||||||
Sleep time for merge selecting when no part is selected. A lower setting triggers selecting tasks in `background_schedule_pool` frequently, which results in a large number of requests to ClickHouse Keeper in large-scale clusters.
|
Minimum time to wait before trying to select parts to merge again after no parts were selected. A lower setting triggers selecting tasks in `background_schedule_pool` frequently, which results in a large number of requests to ClickHouse Keeper in large-scale clusters.
|
||||||
|
|
||||||
Possible values:
|
Possible values:
|
||||||
|
|
||||||
@ -2863,6 +2863,16 @@ Possible values:
|
|||||||
|
|
||||||
Default value: `5000`.
|
Default value: `5000`.
|
||||||
|
|
||||||
|
## max_merge_selecting_sleep_ms
|
||||||
|
|
||||||
|
Maximum time to wait before trying to select parts to merge again after no parts were selected. A lower setting triggers selecting tasks in `background_schedule_pool` frequently, which results in a large number of requests to ClickHouse Keeper in large-scale clusters.
|
||||||
|
|
||||||
|
Possible values:
|
||||||
|
|
||||||
|
- Any positive integer.
|
||||||
|
|
||||||
|
Default value: `60000`.
|
||||||
|
|
||||||
## parallel_distributed_insert_select {#parallel_distributed_insert_select}
|
## parallel_distributed_insert_select {#parallel_distributed_insert_select}
|
||||||
|
|
||||||
Enables parallel distributed `INSERT ... SELECT` query.
|
Enables parallel distributed `INSERT ... SELECT` query.
|
||||||
@ -5623,7 +5633,6 @@ Default value: `1GiB`.
|
|||||||
## use_json_alias_for_old_object_type
|
## use_json_alias_for_old_object_type
|
||||||
|
|
||||||
When enabled, `JSON` data type alias will be used to create an old [Object('json')](../../sql-reference/data-types/json.md) type instead of the new [JSON](../../sql-reference/data-types/newjson.md) type.
|
When enabled, `JSON` data type alias will be used to create an old [Object('json')](../../sql-reference/data-types/json.md) type instead of the new [JSON](../../sql-reference/data-types/newjson.md) type.
|
||||||
This setting requires server restart to take effect when changed.
|
|
||||||
|
|
||||||
Default value: `false`.
|
Default value: `false`.
|
||||||
|
|
||||||
|
41
docs/en/operations/system-tables/projections.md
Normal file
41
docs/en/operations/system-tables/projections.md
Normal file
@ -0,0 +1,41 @@
|
|||||||
|
---
|
||||||
|
slug: /en/operations/system-tables/projections
|
||||||
|
---
|
||||||
|
# projections
|
||||||
|
|
||||||
|
Contains information about existing projections in all the tables.
|
||||||
|
|
||||||
|
Columns:
|
||||||
|
|
||||||
|
- `database` ([String](../../sql-reference/data-types/string.md)) — Database name.
|
||||||
|
- `table` ([String](../../sql-reference/data-types/string.md)) — Table name.
|
||||||
|
- `name` ([String](../../sql-reference/data-types/string.md)) — Projection name.
|
||||||
|
- `type` ([Enum](../../sql-reference/data-types/enum.md)) — Projection type ('Normal' = 0, 'Aggregate' = 1).
|
||||||
|
- `sorting_key` ([Array(String)](../../sql-reference/data-types/array.md)) — Projection sorting key.
|
||||||
|
- `query` ([String](../../sql-reference/data-types/string.md)) — Projection query.
|
||||||
|
|
||||||
|
**Example**
|
||||||
|
|
||||||
|
```sql
|
||||||
|
SELECT * FROM system.projections LIMIT 2 FORMAT Vertical;
|
||||||
|
```
|
||||||
|
|
||||||
|
```text
|
||||||
|
Row 1:
|
||||||
|
──────
|
||||||
|
database: default
|
||||||
|
table: landing
|
||||||
|
name: improved_sorting_key
|
||||||
|
type: Normal
|
||||||
|
sorting_key: ['user_id','date']
|
||||||
|
query: SELECT * ORDER BY user_id, date
|
||||||
|
|
||||||
|
Row 2:
|
||||||
|
──────
|
||||||
|
database: default
|
||||||
|
table: landing
|
||||||
|
name: agg_no_key
|
||||||
|
type: Aggregate
|
||||||
|
sorting_key: []
|
||||||
|
query: SELECT count()
|
||||||
|
```
|
@ -104,7 +104,7 @@ Events that occur at the same second may lay in the sequence in an undefined ord
|
|||||||
|
|
||||||
**Parameters**
|
**Parameters**
|
||||||
|
|
||||||
- `pattern` — Pattern string. See [Pattern syntax](#sequencematch).
|
- `pattern` — Pattern string. See [Pattern syntax](#pattern-syntax).
|
||||||
|
|
||||||
**Returned values**
|
**Returned values**
|
||||||
|
|
||||||
@ -113,8 +113,7 @@ Events that occur at the same second may lay in the sequence in an undefined ord
|
|||||||
|
|
||||||
Type: `UInt8`.
|
Type: `UInt8`.
|
||||||
|
|
||||||
<a name="sequence-function-pattern-syntax"></a>
|
#### Pattern syntax
|
||||||
**Pattern syntax**
|
|
||||||
|
|
||||||
- `(?N)` — Matches the condition argument at position `N`. Conditions are numbered in the `[1, 32]` range. For example, `(?1)` matches the argument passed to the `cond1` parameter.
|
- `(?N)` — Matches the condition argument at position `N`. Conditions are numbered in the `[1, 32]` range. For example, `(?1)` matches the argument passed to the `cond1` parameter.
|
||||||
|
|
||||||
@ -196,7 +195,7 @@ sequenceCount(pattern)(timestamp, cond1, cond2, ...)
|
|||||||
|
|
||||||
**Parameters**
|
**Parameters**
|
||||||
|
|
||||||
- `pattern` — Pattern string. See [Pattern syntax](#sequencematch).
|
- `pattern` — Pattern string. See [Pattern syntax](#pattern-syntax).
|
||||||
|
|
||||||
**Returned values**
|
**Returned values**
|
||||||
|
|
||||||
|
@ -70,7 +70,7 @@ SELECT '{"a" : {"b" : 42},"c" : [1, 2, 3], "d" : "Hello, World!"}'::JSON as json
|
|||||||
└────────────────────────────────────────────────┘
|
└────────────────────────────────────────────────┘
|
||||||
```
|
```
|
||||||
|
|
||||||
CAST from named `Tuple`, `Map` and `Object('json')` to `JSON` type will be supported later.
|
CAST from `JSON`, named `Tuple`, `Map` and `Object('json')` to `JSON` type will be supported later.
|
||||||
|
|
||||||
## Reading JSON paths as subcolumns
|
## Reading JSON paths as subcolumns
|
||||||
|
|
||||||
@ -453,8 +453,8 @@ As we can see, after inserting paths `e` and `f.g` the limit was reached and we
|
|||||||
|
|
||||||
### During merges of data parts in MergeTree table engines
|
### During merges of data parts in MergeTree table engines
|
||||||
|
|
||||||
During merge of several data parts in MergeTree table the `JSON` column in the resulting data part can reach the limit of dynamic paths won't be able to store all paths from source parts as subcolumns.
|
During merge of several data parts in MergeTree table the `JSON` column in the resulting data part can reach the limit of dynamic paths and won't be able to store all paths from source parts as subcolumns.
|
||||||
In this case ClickHouse chooses what paths will remain as subcolumns after merge and what types will be stored in the shared data structure. In most cases ClickHouse tries to keep paths that contains
|
In this case ClickHouse chooses what paths will remain as subcolumns after merge and what paths will be stored in the shared data structure. In most cases ClickHouse tries to keep paths that contain
|
||||||
the largest number of non-null values and move the rarest paths to the shared data structure, but it depends on the implementation.
|
the largest number of non-null values and move the rarest paths to the shared data structure, but it depends on the implementation.
|
||||||
|
|
||||||
Let's see an example of such merge. First, let's create a table with `JSON` column, set the limit of dynamic paths to `3` and insert values with `5` different paths:
|
Let's see an example of such merge. First, let's create a table with `JSON` column, set the limit of dynamic paths to `3` and insert values with `5` different paths:
|
||||||
|
@ -53,29 +53,28 @@ SELECT now() as current_date_time, current_date_time + INTERVAL 4 DAY
|
|||||||
└─────────────────────┴───────────────────────────────┘
|
└─────────────────────┴───────────────────────────────┘
|
||||||
```
|
```
|
||||||
|
|
||||||
Intervals with different types can’t be combined. You can’t use intervals like `4 DAY 1 HOUR`. Specify intervals in units that are smaller or equal to the smallest unit of the interval, for example, the interval `1 day and an hour` interval can be expressed as `25 HOUR` or `90000 SECOND`.
|
Also it is possible to use multiple intervals simultaneously:
|
||||||
|
|
||||||
You can’t perform arithmetical operations with `Interval`-type values, but you can add intervals of different types consequently to values in `Date` or `DateTime` data types. For example:
|
|
||||||
|
|
||||||
``` sql
|
``` sql
|
||||||
SELECT now() AS current_date_time, current_date_time + INTERVAL 4 DAY + INTERVAL 3 HOUR
|
SELECT now() AS current_date_time, current_date_time + (INTERVAL 4 DAY + INTERVAL 3 HOUR)
|
||||||
```
|
```
|
||||||
|
|
||||||
``` text
|
``` text
|
||||||
┌───current_date_time─┬─plus(plus(now(), toIntervalDay(4)), toIntervalHour(3))─┐
|
┌───current_date_time─┬─plus(current_date_time, plus(toIntervalDay(4), toIntervalHour(3)))─┐
|
||||||
│ 2019-10-23 11:16:28 │ 2019-10-27 14:16:28 │
|
│ 2024-08-08 18:31:39 │ 2024-08-12 21:31:39 │
|
||||||
└─────────────────────┴────────────────────────────────────────────────────────┘
|
└─────────────────────┴────────────────────────────────────────────────────────────────────┘
|
||||||
```
|
```
|
||||||
|
|
||||||
The following query causes an exception:
|
And to compare values with different intervals:
|
||||||
|
|
||||||
``` sql
|
``` sql
|
||||||
select now() AS current_date_time, current_date_time + (INTERVAL 4 DAY + INTERVAL 3 HOUR)
|
SELECT toIntervalMicrosecond(3600000000) = toIntervalHour(1);
|
||||||
```
|
```
|
||||||
|
|
||||||
``` text
|
``` text
|
||||||
Received exception from server (version 19.14.1):
|
┌─less(toIntervalMicrosecond(179999999), toIntervalMinute(3))─┐
|
||||||
Code: 43. DB::Exception: Received from localhost:9000. DB::Exception: Wrong argument types for function plus: if one argument is Interval, then another must be Date or DateTime..
|
│ 1 │
|
||||||
|
└─────────────────────────────────────────────────────────────┘
|
||||||
```
|
```
|
||||||
|
|
||||||
## See Also
|
## See Also
|
||||||
|
@ -49,7 +49,7 @@ Result:
|
|||||||
|
|
||||||
## multiIf
|
## multiIf
|
||||||
|
|
||||||
Allows to write the [CASE](../../sql-reference/operators/index.md#operator_case) operator more compactly in the query.
|
Allows to write the [CASE](../../sql-reference/operators/index.md#conditional-expression) operator more compactly in the query.
|
||||||
|
|
||||||
**Syntax**
|
**Syntax**
|
||||||
|
|
||||||
|
@ -2019,7 +2019,7 @@ Alias: `dateTrunc`.
|
|||||||
|
|
||||||
`unit` argument is case-insensitive.
|
`unit` argument is case-insensitive.
|
||||||
|
|
||||||
- `value` — Date and time. [DateTime](../data-types/datetime.md) or [DateTime64](../data-types/datetime64.md).
|
- `value` — Date and time. [Date](../data-types/date.md), [Date32](../data-types/date32.md), [DateTime](../data-types/datetime.md) or [DateTime64](../data-types/datetime64.md).
|
||||||
- `timezone` — [Timezone name](../../operations/server-configuration-parameters/settings.md#server_configuration_parameters-timezone) for the returned value (optional). If not specified, the function uses the timezone of the `value` parameter. [String](../data-types/string.md).
|
- `timezone` — [Timezone name](../../operations/server-configuration-parameters/settings.md#server_configuration_parameters-timezone) for the returned value (optional). If not specified, the function uses the timezone of the `value` parameter. [String](../data-types/string.md).
|
||||||
|
|
||||||
**Returned value**
|
**Returned value**
|
||||||
@ -4287,7 +4287,7 @@ Result:
|
|||||||
|
|
||||||
## fromModifiedJulianDay
|
## fromModifiedJulianDay
|
||||||
|
|
||||||
Converts a [Modified Julian Day](https://en.wikipedia.org/wiki/Julian_day#Variants) number to a [Proleptic Gregorian calendar](https://en.wikipedia.org/wiki/Proleptic_Gregorian_calendar) date in text form `YYYY-MM-DD`. This function supports day number from `-678941` to `2973119` (which represent 0000-01-01 and 9999-12-31 respectively). It raises an exception if the day number is outside of the supported range.
|
Converts a [Modified Julian Day](https://en.wikipedia.org/wiki/Julian_day#Variants) number to a [Proleptic Gregorian calendar](https://en.wikipedia.org/wiki/Proleptic_Gregorian_calendar) date in text form `YYYY-MM-DD`. This function supports day number from `-678941` to `2973483` (which represent 0000-01-01 and 9999-12-31 respectively). It raises an exception if the day number is outside of the supported range.
|
||||||
|
|
||||||
**Syntax**
|
**Syntax**
|
||||||
|
|
||||||
|
@ -6,7 +6,7 @@ title: "Functions for Working with Geohash"
|
|||||||
|
|
||||||
## Geohash
|
## Geohash
|
||||||
|
|
||||||
[Geohash](https://en.wikipedia.org/wiki/Geohash) is the geocode system, which subdivides Earth’s surface into buckets of grid shape and encodes each cell into a short string of letters and digits. It is a hierarchical data structure, so the longer is the geohash string, the more precise is the geographic location.
|
[Geohash](https://en.wikipedia.org/wiki/Geohash) is the geocode system, which subdivides Earth’s surface into buckets of grid shape and encodes each cell into a short string of letters and digits. It is a hierarchical data structure, so the longer the geohash string is, the more precise the geographic location will be.
|
||||||
|
|
||||||
If you need to manually convert geographic coordinates to geohash strings, you can use [geohash.org](http://geohash.org/).
|
If you need to manually convert geographic coordinates to geohash strings, you can use [geohash.org](http://geohash.org/).
|
||||||
|
|
||||||
@ -14,26 +14,37 @@ If you need to manually convert geographic coordinates to geohash strings, you c
|
|||||||
|
|
||||||
Encodes latitude and longitude as a [geohash](#geohash)-string.
|
Encodes latitude and longitude as a [geohash](#geohash)-string.
|
||||||
|
|
||||||
|
**Syntax**
|
||||||
|
|
||||||
``` sql
|
``` sql
|
||||||
geohashEncode(longitude, latitude, [precision])
|
geohashEncode(longitude, latitude, [precision])
|
||||||
```
|
```
|
||||||
|
|
||||||
**Input values**
|
**Input values**
|
||||||
|
|
||||||
- longitude - longitude part of the coordinate you want to encode. Floating in range`[-180°, 180°]`
|
- `longitude` — Longitude part of the coordinate you want to encode. Floating in range`[-180°, 180°]`. [Float](../../data-types/float.md).
|
||||||
- latitude - latitude part of the coordinate you want to encode. Floating in range `[-90°, 90°]`
|
- `latitude` — Latitude part of the coordinate you want to encode. Floating in range `[-90°, 90°]`. [Float](../../data-types/float.md).
|
||||||
- precision - Optional, length of the resulting encoded string, defaults to `12`. Integer in range `[1, 12]`. Any value less than `1` or greater than `12` is silently converted to `12`.
|
- `precision` (optional) — Length of the resulting encoded string. Defaults to `12`. Integer in the range `[1, 12]`. [Int8](../../data-types/int-uint.md).
|
||||||
|
|
||||||
|
:::note
|
||||||
|
- All coordinate parameters must be of the same type: either `Float32` or `Float64`.
|
||||||
|
- For the `precision` parameter, any value less than `1` or greater than `12` is silently converted to `12`.
|
||||||
|
:::
|
||||||
|
|
||||||
**Returned values**
|
**Returned values**
|
||||||
|
|
||||||
- alphanumeric `String` of encoded coordinate (modified version of the base32-encoding alphabet is used).
|
- Alphanumeric string of the encoded coordinate (modified version of the base32-encoding alphabet is used). [String](../../data-types/string.md).
|
||||||
|
|
||||||
**Example**
|
**Example**
|
||||||
|
|
||||||
|
Query:
|
||||||
|
|
||||||
``` sql
|
``` sql
|
||||||
SELECT geohashEncode(-5.60302734375, 42.593994140625, 0) AS res;
|
SELECT geohashEncode(-5.60302734375, 42.593994140625, 0) AS res;
|
||||||
```
|
```
|
||||||
|
|
||||||
|
Result:
|
||||||
|
|
||||||
``` text
|
``` text
|
||||||
┌─res──────────┐
|
┌─res──────────┐
|
||||||
│ ezs42d000000 │
|
│ ezs42d000000 │
|
||||||
@ -44,13 +55,19 @@ SELECT geohashEncode(-5.60302734375, 42.593994140625, 0) AS res;
|
|||||||
|
|
||||||
Decodes any [geohash](#geohash)-encoded string into longitude and latitude.
|
Decodes any [geohash](#geohash)-encoded string into longitude and latitude.
|
||||||
|
|
||||||
|
**Syntax**
|
||||||
|
|
||||||
|
```sql
|
||||||
|
geohashDecode(hash_str)
|
||||||
|
```
|
||||||
|
|
||||||
**Input values**
|
**Input values**
|
||||||
|
|
||||||
- encoded string - geohash-encoded string.
|
- `hash_str` — Geohash-encoded string.
|
||||||
|
|
||||||
**Returned values**
|
**Returned values**
|
||||||
|
|
||||||
- (longitude, latitude) - 2-tuple of `Float64` values of longitude and latitude.
|
- Tuple `(longitude, latitude)` of `Float64` values of longitude and latitude. [Tuple](../../data-types/tuple.md)([Float64](../../data-types/float.md))
|
||||||
|
|
||||||
**Example**
|
**Example**
|
||||||
|
|
||||||
|
@ -688,6 +688,40 @@ SELECT kostikConsistentHash(16045690984833335023, 2);
|
|||||||
└───────────────────────────────────────────────┘
|
└───────────────────────────────────────────────┘
|
||||||
```
|
```
|
||||||
|
|
||||||
|
## ripeMD160
|
||||||
|
|
||||||
|
Produces [RIPEMD-160](https://en.wikipedia.org/wiki/RIPEMD) hash value.
|
||||||
|
|
||||||
|
**Syntax**
|
||||||
|
|
||||||
|
```sql
|
||||||
|
ripeMD160(input)
|
||||||
|
```
|
||||||
|
|
||||||
|
**Parameters**
|
||||||
|
|
||||||
|
- `input`: Input string. [String](../data-types/string.md)
|
||||||
|
|
||||||
|
**Returned value**
|
||||||
|
|
||||||
|
- A [UInt256](../data-types/int-uint.md) hash value where the 160-bit RIPEMD-160 hash is stored in the first 20 bytes. The remaining 12 bytes are zero-padded.
|
||||||
|
|
||||||
|
**Example**
|
||||||
|
|
||||||
|
Use the [hex](../functions/encoding-functions.md/#hex) function to represent the result as a hex-encoded string.
|
||||||
|
|
||||||
|
Query:
|
||||||
|
|
||||||
|
```sql
|
||||||
|
SELECT hex(ripeMD160('The quick brown fox jumps over the lazy dog'));
|
||||||
|
```
|
||||||
|
|
||||||
|
```response
|
||||||
|
┌─hex(ripeMD160('The quick brown fox jumps over the lazy dog'))─┐
|
||||||
|
│ 37F332F68DB77BD9D7EDD4969571AD671CF9DD3B │
|
||||||
|
└───────────────────────────────────────────────────────────────┘
|
||||||
|
```
|
||||||
|
|
||||||
## murmurHash2_32, murmurHash2_64
|
## murmurHash2_32, murmurHash2_64
|
||||||
|
|
||||||
Produces a [MurmurHash2](https://github.com/aappleby/smhasher) hash value.
|
Produces a [MurmurHash2](https://github.com/aappleby/smhasher) hash value.
|
||||||
|
@ -8,6 +8,78 @@ sidebar_label: Replacing in Strings
|
|||||||
|
|
||||||
[General strings functions](string-functions.md) and [functions for searching in strings](string-search-functions.md) are described separately.
|
[General strings functions](string-functions.md) and [functions for searching in strings](string-search-functions.md) are described separately.
|
||||||
|
|
||||||
|
## overlay
|
||||||
|
|
||||||
|
Replace part of the string `input` with another string `replace`, starting at the 1-based index `offset`.
|
||||||
|
|
||||||
|
**Syntax**
|
||||||
|
|
||||||
|
```sql
|
||||||
|
overlay(s, replace, offset[, length])
|
||||||
|
```
|
||||||
|
|
||||||
|
**Parameters**
|
||||||
|
|
||||||
|
- `input`: A string type [String](../data-types/string.md).
|
||||||
|
- `replace`: A string type [String](../data-types/string.md).
|
||||||
|
- `offset`: An integer type [Int](../data-types/int-uint.md). If `offset` is negative, it is counted from the end of the `input` string.
|
||||||
|
- `length`: Optional. An integer type [Int](../data-types/int-uint.md). `length` specifies the length of the snippet within input to be replaced. If `length` is not specified, the number of bytes removed from `input` equals the length of `replace`; otherwise `length` bytes are removed.
|
||||||
|
|
||||||
|
**Returned value**
|
||||||
|
|
||||||
|
- A [String](../data-types/string.md) data type value.
|
||||||
|
|
||||||
|
**Example**
|
||||||
|
|
||||||
|
```sql
|
||||||
|
SELECT overlay('ClickHouse SQL', 'CORE', 12) AS res;
|
||||||
|
```
|
||||||
|
|
||||||
|
Result:
|
||||||
|
|
||||||
|
```text
|
||||||
|
┌─res─────────────┐
|
||||||
|
│ ClickHouse CORE │
|
||||||
|
└─────────────────┘
|
||||||
|
```
|
||||||
|
|
||||||
|
## overlayUTF8
|
||||||
|
|
||||||
|
Replace part of the string `input` with another string `replace`, starting at the 1-based index `offset`.
|
||||||
|
|
||||||
|
Assumes that the string contains valid UTF-8 encoded text. If this assumption is violated, no exception is thrown and the result is undefined.
|
||||||
|
|
||||||
|
**Syntax**
|
||||||
|
|
||||||
|
```sql
|
||||||
|
overlayUTF8(s, replace, offset[, length])
|
||||||
|
```
|
||||||
|
|
||||||
|
**Parameters**
|
||||||
|
|
||||||
|
- `s`: A string type [String](../data-types/string.md).
|
||||||
|
- `replace`: A string type [String](../data-types/string.md).
|
||||||
|
- `offset`: An integer type [Int](../data-types/int-uint.md). If `offset` is negative, it is counted from the end of the `input` string.
|
||||||
|
- `length`: Optional. An integer type [Int](../data-types/int-uint.md). `length` specifies the length of the snippet within input to be replaced. If `length` is not specified, the number of characters removed from `input` equals the length of `replace`; otherwise `length` characters are removed.
|
||||||
|
|
||||||
|
**Returned value**
|
||||||
|
|
||||||
|
- A [String](../data-types/string.md) data type value.
|
||||||
|
|
||||||
|
**Example**
|
||||||
|
|
||||||
|
```sql
|
||||||
|
SELECT overlayUTF8('ClickHouse是一款OLAP数据库', '开源', 12, 2) AS res;
|
||||||
|
```
|
||||||
|
|
||||||
|
Result:
|
||||||
|
|
||||||
|
```text
|
||||||
|
┌─res────────────────────────┐
|
||||||
|
│ ClickHouse是开源OLAP数据库 │
|
||||||
|
└────────────────────────────┘
|
||||||
|
```
|
||||||
|
|
||||||
## replaceOne
|
## replaceOne
|
||||||
|
|
||||||
Replaces the first occurrence of the substring `pattern` in `haystack` by the `replacement` string.
|
Replaces the first occurrence of the substring `pattern` in `haystack` by the `replacement` string.
|
||||||
|
@ -49,6 +49,55 @@ SETTINGS cast_keep_nullable = 1
|
|||||||
└──────────────────┴─────────────────────┴──────────────────┘
|
└──────────────────┴─────────────────────┴──────────────────┘
|
||||||
```
|
```
|
||||||
|
|
||||||
|
## toBool
|
||||||
|
|
||||||
|
Converts an input value to a value of type [`Bool`](../data-types/boolean.md). Throws an exception in case of an error.
|
||||||
|
|
||||||
|
**Syntax**
|
||||||
|
|
||||||
|
```sql
|
||||||
|
toBool(expr)
|
||||||
|
```
|
||||||
|
|
||||||
|
**Arguments**
|
||||||
|
|
||||||
|
- `expr` — Expression returning a number or a string. [Expression](../syntax.md/#syntax-expressions).
|
||||||
|
|
||||||
|
Supported arguments:
|
||||||
|
- Values of type (U)Int8/16/32/64/128/256.
|
||||||
|
- Values of type Float32/64.
|
||||||
|
- Strings `true` or `false` (case-insensitive).
|
||||||
|
|
||||||
|
**Returned value**
|
||||||
|
|
||||||
|
- Returns `true` or `false` based on evaluation of the argument. [Bool](../data-types/boolean.md).
|
||||||
|
|
||||||
|
**Example**
|
||||||
|
|
||||||
|
Query:
|
||||||
|
|
||||||
|
```sql
|
||||||
|
SELECT
|
||||||
|
toBool(toUInt8(1)),
|
||||||
|
toBool(toInt8(-1)),
|
||||||
|
toBool(toFloat32(1.01)),
|
||||||
|
toBool('true'),
|
||||||
|
toBool('false'),
|
||||||
|
toBool('FALSE')
|
||||||
|
FORMAT Vertical
|
||||||
|
```
|
||||||
|
|
||||||
|
Result:
|
||||||
|
|
||||||
|
```response
|
||||||
|
toBool(toUInt8(1)): true
|
||||||
|
toBool(toInt8(-1)): true
|
||||||
|
toBool(toFloat32(1.01)): true
|
||||||
|
toBool('true'): true
|
||||||
|
toBool('false'): false
|
||||||
|
toBool('FALSE'): false
|
||||||
|
```
|
||||||
|
|
||||||
## toInt8
|
## toInt8
|
||||||
|
|
||||||
Converts an input value to a value of type [`Int8`](../data-types/int-uint.md). Throws an exception in case of an error.
|
Converts an input value to a value of type [`Int8`](../data-types/int-uint.md). Throws an exception in case of an error.
|
||||||
@ -6103,30 +6152,23 @@ Result:
|
|||||||
└───────┴───────────────┴──────┴──────────────┴──────────────┴──────────────────────┘
|
└───────┴───────────────┴──────┴──────────────┴──────────────┴──────────────────────┘
|
||||||
```
|
```
|
||||||
|
|
||||||
## toInterval(Year\|Quarter\|Month\|Week\|Day\|Hour\|Minute\|Second)
|
## toIntervalYear
|
||||||
|
|
||||||
Converts a Number type argument to an [Interval](../data-types/special-data-types/interval.md) data type.
|
Returns an interval of `n` years of data type [IntervalYear](../data-types/special-data-types/interval.md).
|
||||||
|
|
||||||
**Syntax**
|
**Syntax**
|
||||||
|
|
||||||
``` sql
|
``` sql
|
||||||
toIntervalSecond(number)
|
toIntervalYear(n)
|
||||||
toIntervalMinute(number)
|
|
||||||
toIntervalHour(number)
|
|
||||||
toIntervalDay(number)
|
|
||||||
toIntervalWeek(number)
|
|
||||||
toIntervalMonth(number)
|
|
||||||
toIntervalQuarter(number)
|
|
||||||
toIntervalYear(number)
|
|
||||||
```
|
```
|
||||||
|
|
||||||
**Arguments**
|
**Arguments**
|
||||||
|
|
||||||
- `number` — Duration of interval. Positive integer number.
|
- `n` — Number of years. Integer numbers or string representations thereof, and float numbers. [(U)Int*](../data-types/int-uint.md)/[Float*](../data-types/float.md)/[String](../data-types/string.md).
|
||||||
|
|
||||||
**Returned values**
|
**Returned values**
|
||||||
|
|
||||||
- The value in `Interval` data type.
|
- Interval of `n` years. [IntervalYear](../data-types/special-data-types/interval.md).
|
||||||
|
|
||||||
**Example**
|
**Example**
|
||||||
|
|
||||||
@ -6134,20 +6176,387 @@ Query:
|
|||||||
|
|
||||||
``` sql
|
``` sql
|
||||||
WITH
|
WITH
|
||||||
toDate('2019-01-01') AS date,
|
toDate('2024-06-15') AS date,
|
||||||
INTERVAL 1 WEEK AS interval_week,
|
toIntervalYear(1) AS interval_to_year
|
||||||
toIntervalWeek(1) AS interval_to_week
|
SELECT date + interval_to_year AS result
|
||||||
SELECT
|
|
||||||
date + interval_week,
|
|
||||||
date + interval_to_week;
|
|
||||||
```
|
```
|
||||||
|
|
||||||
Result:
|
Result:
|
||||||
|
|
||||||
```response
|
```response
|
||||||
┌─plus(date, interval_week)─┬─plus(date, interval_to_week)─┐
|
┌─────result─┐
|
||||||
│ 2019-01-08 │ 2019-01-08 │
|
│ 2025-06-15 │
|
||||||
└───────────────────────────┴──────────────────────────────┘
|
└────────────┘
|
||||||
|
```
|
||||||
|
|
||||||
|
## toIntervalQuarter
|
||||||
|
|
||||||
|
Returns an interval of `n` quarters of data type [IntervalQuarter](../data-types/special-data-types/interval.md).
|
||||||
|
|
||||||
|
**Syntax**
|
||||||
|
|
||||||
|
``` sql
|
||||||
|
toIntervalQuarter(n)
|
||||||
|
```
|
||||||
|
|
||||||
|
**Arguments**
|
||||||
|
|
||||||
|
- `n` — Number of quarters. Integer numbers or string representations thereof, and float numbers. [(U)Int*](../data-types/int-uint.md)/[Float*](../data-types/float.md)/[String](../data-types/string.md).
|
||||||
|
|
||||||
|
**Returned values**
|
||||||
|
|
||||||
|
- Interval of `n` quarters. [IntervalQuarter](../data-types/special-data-types/interval.md).
|
||||||
|
|
||||||
|
**Example**
|
||||||
|
|
||||||
|
Query:
|
||||||
|
|
||||||
|
``` sql
|
||||||
|
WITH
|
||||||
|
toDate('2024-06-15') AS date,
|
||||||
|
toIntervalQuarter(1) AS interval_to_quarter
|
||||||
|
SELECT date + interval_to_quarter AS result
|
||||||
|
```
|
||||||
|
|
||||||
|
Result:
|
||||||
|
|
||||||
|
```response
|
||||||
|
┌─────result─┐
|
||||||
|
│ 2024-09-15 │
|
||||||
|
└────────────┘
|
||||||
|
```
|
||||||
|
|
||||||
|
## toIntervalMonth
|
||||||
|
|
||||||
|
Returns an interval of `n` months of data type [IntervalMonth](../data-types/special-data-types/interval.md).
|
||||||
|
|
||||||
|
**Syntax**
|
||||||
|
|
||||||
|
``` sql
|
||||||
|
toIntervalMonth(n)
|
||||||
|
```
|
||||||
|
|
||||||
|
**Arguments**
|
||||||
|
|
||||||
|
- `n` — Number of months. Integer numbers or string representations thereof, and float numbers. [(U)Int*](../data-types/int-uint.md)/[Float*](../data-types/float.md)/[String](../data-types/string.md).
|
||||||
|
|
||||||
|
**Returned values**
|
||||||
|
|
||||||
|
- Interval of `n` months. [IntervalMonth](../data-types/special-data-types/interval.md).
|
||||||
|
|
||||||
|
**Example**
|
||||||
|
|
||||||
|
Query:
|
||||||
|
|
||||||
|
``` sql
|
||||||
|
WITH
|
||||||
|
toDate('2024-06-15') AS date,
|
||||||
|
toIntervalMonth(1) AS interval_to_month
|
||||||
|
SELECT date + interval_to_month AS result
|
||||||
|
```
|
||||||
|
|
||||||
|
Result:
|
||||||
|
|
||||||
|
```response
|
||||||
|
┌─────result─┐
|
||||||
|
│ 2024-07-15 │
|
||||||
|
└────────────┘
|
||||||
|
```
|
||||||
|
|
||||||
|
## toIntervalWeek
|
||||||
|
|
||||||
|
Returns an interval of `n` weeks of data type [IntervalWeek](../data-types/special-data-types/interval.md).
|
||||||
|
|
||||||
|
**Syntax**
|
||||||
|
|
||||||
|
``` sql
|
||||||
|
toIntervalWeek(n)
|
||||||
|
```
|
||||||
|
|
||||||
|
**Arguments**
|
||||||
|
|
||||||
|
- `n` — Number of weeks. Integer numbers or string representations thereof, and float numbers. [(U)Int*](../data-types/int-uint.md)/[Float*](../data-types/float.md)/[String](../data-types/string.md).
|
||||||
|
|
||||||
|
**Returned values**
|
||||||
|
|
||||||
|
- Interval of `n` weeks. [IntervalWeek](../data-types/special-data-types/interval.md).
|
||||||
|
|
||||||
|
**Example**
|
||||||
|
|
||||||
|
Query:
|
||||||
|
|
||||||
|
``` sql
|
||||||
|
WITH
|
||||||
|
toDate('2024-06-15') AS date,
|
||||||
|
toIntervalWeek(1) AS interval_to_week
|
||||||
|
SELECT date + interval_to_week AS result
|
||||||
|
```
|
||||||
|
|
||||||
|
Result:
|
||||||
|
|
||||||
|
```response
|
||||||
|
┌─────result─┐
|
||||||
|
│ 2024-06-22 │
|
||||||
|
└────────────┘
|
||||||
|
```
|
||||||
|
|
||||||
|
## toIntervalDay
|
||||||
|
|
||||||
|
Returns an interval of `n` days of data type [IntervalDay](../data-types/special-data-types/interval.md).
|
||||||
|
|
||||||
|
**Syntax**
|
||||||
|
|
||||||
|
``` sql
|
||||||
|
toIntervalDay(n)
|
||||||
|
```
|
||||||
|
|
||||||
|
**Arguments**
|
||||||
|
|
||||||
|
- `n` — Number of days. Integer numbers or string representations thereof, and float numbers. [(U)Int*](../data-types/int-uint.md)/[Float*](../data-types/float.md)/[String](../data-types/string.md).
|
||||||
|
|
||||||
|
**Returned values**
|
||||||
|
|
||||||
|
- Interval of `n` days. [IntervalDay](../data-types/special-data-types/interval.md).
|
||||||
|
|
||||||
|
**Example**
|
||||||
|
|
||||||
|
Query:
|
||||||
|
|
||||||
|
``` sql
|
||||||
|
WITH
|
||||||
|
toDate('2024-06-15') AS date,
|
||||||
|
toIntervalDay(5) AS interval_to_days
|
||||||
|
SELECT date + interval_to_days AS result
|
||||||
|
```
|
||||||
|
|
||||||
|
Result:
|
||||||
|
|
||||||
|
```response
|
||||||
|
┌─────result─┐
|
||||||
|
│ 2024-06-20 │
|
||||||
|
└────────────┘
|
||||||
|
```
|
||||||
|
|
||||||
|
## toIntervalHour
|
||||||
|
|
||||||
|
Returns an interval of `n` hours of data type [IntervalHour](../data-types/special-data-types/interval.md).
|
||||||
|
|
||||||
|
**Syntax**
|
||||||
|
|
||||||
|
``` sql
|
||||||
|
toIntervalHour(n)
|
||||||
|
```
|
||||||
|
|
||||||
|
**Arguments**
|
||||||
|
|
||||||
|
- `n` — Number of hours. Integer numbers or string representations thereof, and float numbers. [(U)Int*](../data-types/int-uint.md)/[Float*](../data-types/float.md)/[String](../data-types/string.md).
|
||||||
|
|
||||||
|
**Returned values**
|
||||||
|
|
||||||
|
- Interval of `n` hours. [IntervalHour](../data-types/special-data-types/interval.md).
|
||||||
|
|
||||||
|
**Example**
|
||||||
|
|
||||||
|
Query:
|
||||||
|
|
||||||
|
``` sql
|
||||||
|
WITH
|
||||||
|
toDate('2024-06-15') AS date,
|
||||||
|
toIntervalHour(12) AS interval_to_hours
|
||||||
|
SELECT date + interval_to_hours AS result
|
||||||
|
```
|
||||||
|
|
||||||
|
Result:
|
||||||
|
|
||||||
|
```response
|
||||||
|
┌──────────────result─┐
|
||||||
|
│ 2024-06-15 12:00:00 │
|
||||||
|
└─────────────────────┘
|
||||||
|
```
|
||||||
|
|
||||||
|
## toIntervalMinute
|
||||||
|
|
||||||
|
Returns an interval of `n` minutes of data type [IntervalMinute](../data-types/special-data-types/interval.md).
|
||||||
|
|
||||||
|
**Syntax**
|
||||||
|
|
||||||
|
``` sql
|
||||||
|
toIntervalMinute(n)
|
||||||
|
```
|
||||||
|
|
||||||
|
**Arguments**
|
||||||
|
|
||||||
|
- `n` — Number of minutes. Integer numbers or string representations thereof, and float numbers. [(U)Int*](../data-types/int-uint.md)/[Float*](../data-types/float.md)/[String](../data-types/string.md).
|
||||||
|
|
||||||
|
**Returned values**
|
||||||
|
|
||||||
|
- Interval of `n` minutes. [IntervalMinute](../data-types/special-data-types/interval.md).
|
||||||
|
|
||||||
|
**Example**
|
||||||
|
|
||||||
|
Query:
|
||||||
|
|
||||||
|
``` sql
|
||||||
|
WITH
|
||||||
|
toDate('2024-06-15') AS date,
|
||||||
|
toIntervalMinute(12) AS interval_to_minutes
|
||||||
|
SELECT date + interval_to_minutes AS result
|
||||||
|
```
|
||||||
|
|
||||||
|
Result:
|
||||||
|
|
||||||
|
```response
|
||||||
|
┌──────────────result─┐
|
||||||
|
│ 2024-06-15 00:12:00 │
|
||||||
|
└─────────────────────┘
|
||||||
|
```
|
||||||
|
|
||||||
|
## toIntervalSecond
|
||||||
|
|
||||||
|
Returns an interval of `n` seconds of data type [IntervalSecond](../data-types/special-data-types/interval.md).
|
||||||
|
|
||||||
|
**Syntax**
|
||||||
|
|
||||||
|
``` sql
|
||||||
|
toIntervalSecond(n)
|
||||||
|
```
|
||||||
|
|
||||||
|
**Arguments**
|
||||||
|
|
||||||
|
- `n` — Number of seconds. Integer numbers or string representations thereof, and float numbers. [(U)Int*](../data-types/int-uint.md)/[Float*](../data-types/float.md)/[String](../data-types/string.md).
|
||||||
|
|
||||||
|
**Returned values**
|
||||||
|
|
||||||
|
- Interval of `n` seconds. [IntervalSecond](../data-types/special-data-types/interval.md).
|
||||||
|
|
||||||
|
**Example**
|
||||||
|
|
||||||
|
Query:
|
||||||
|
|
||||||
|
``` sql
|
||||||
|
WITH
|
||||||
|
toDate('2024-06-15') AS date,
|
||||||
|
toIntervalSecond(30) AS interval_to_seconds
|
||||||
|
SELECT date + interval_to_seconds AS result
|
||||||
|
```
|
||||||
|
|
||||||
|
Result:
|
||||||
|
|
||||||
|
```response
|
||||||
|
┌──────────────result─┐
|
||||||
|
│ 2024-06-15 00:00:30 │
|
||||||
|
└─────────────────────┘
|
||||||
|
```
|
||||||
|
|
||||||
|
## toIntervalMillisecond
|
||||||
|
|
||||||
|
Returns an interval of `n` milliseconds of data type [IntervalMillisecond](../data-types/special-data-types/interval.md).
|
||||||
|
|
||||||
|
**Syntax**
|
||||||
|
|
||||||
|
``` sql
|
||||||
|
toIntervalMillisecond(n)
|
||||||
|
```
|
||||||
|
|
||||||
|
**Arguments**
|
||||||
|
|
||||||
|
- `n` — Number of milliseconds. Integer numbers or string representations thereof, and float numbers. [(U)Int*](../data-types/int-uint.md)/[Float*](../data-types/float.md)/[String](../data-types/string.md).
|
||||||
|
|
||||||
|
**Returned values**
|
||||||
|
|
||||||
|
- Interval of `n` milliseconds. [IntervalMilliseconds](../data-types/special-data-types/interval.md).
|
||||||
|
|
||||||
|
**Example**
|
||||||
|
|
||||||
|
Query:
|
||||||
|
|
||||||
|
``` sql
|
||||||
|
WITH
|
||||||
|
toDateTime('2024-06-15') AS date,
|
||||||
|
toIntervalMillisecond(30) AS interval_to_milliseconds
|
||||||
|
SELECT date + interval_to_milliseconds AS result
|
||||||
|
```
|
||||||
|
|
||||||
|
Result:
|
||||||
|
|
||||||
|
```response
|
||||||
|
┌──────────────────result─┐
|
||||||
|
│ 2024-06-15 00:00:00.030 │
|
||||||
|
└─────────────────────────┘
|
||||||
|
```
|
||||||
|
|
||||||
|
## toIntervalMicrosecond
|
||||||
|
|
||||||
|
Returns an interval of `n` microseconds of data type [IntervalMicrosecond](../data-types/special-data-types/interval.md).
|
||||||
|
|
||||||
|
**Syntax**
|
||||||
|
|
||||||
|
``` sql
|
||||||
|
toIntervalMicrosecond(n)
|
||||||
|
```
|
||||||
|
|
||||||
|
**Arguments**
|
||||||
|
|
||||||
|
- `n` — Number of microseconds. Integer numbers or string representations thereof, and float numbers. [(U)Int*](../data-types/int-uint.md)/[Float*](../data-types/float.md)/[String](../data-types/string.md).
|
||||||
|
|
||||||
|
**Returned values**
|
||||||
|
|
||||||
|
- Interval of `n` microseconds. [IntervalMicrosecond](../data-types/special-data-types/interval.md).
|
||||||
|
|
||||||
|
**Example**
|
||||||
|
|
||||||
|
Query:
|
||||||
|
|
||||||
|
``` sql
|
||||||
|
WITH
|
||||||
|
toDateTime('2024-06-15') AS date,
|
||||||
|
toIntervalMicrosecond(30) AS interval_to_microseconds
|
||||||
|
SELECT date + interval_to_microseconds AS result
|
||||||
|
```
|
||||||
|
|
||||||
|
Result:
|
||||||
|
|
||||||
|
```response
|
||||||
|
┌─────────────────────result─┐
|
||||||
|
│ 2024-06-15 00:00:00.000030 │
|
||||||
|
└────────────────────────────┘
|
||||||
|
```
|
||||||
|
|
||||||
|
## toIntervalNanosecond
|
||||||
|
|
||||||
|
Returns an interval of `n` nanoseconds of data type [IntervalNanosecond](../data-types/special-data-types/interval.md).
|
||||||
|
|
||||||
|
**Syntax**
|
||||||
|
|
||||||
|
``` sql
|
||||||
|
toIntervalNanosecond(n)
|
||||||
|
```
|
||||||
|
|
||||||
|
**Arguments**
|
||||||
|
|
||||||
|
- `n` — Number of nanoseconds. Integer numbers or string representations thereof, and float numbers. [(U)Int*](../data-types/int-uint.md)/[Float*](../data-types/float.md)/[String](../data-types/string.md).
|
||||||
|
|
||||||
|
**Returned values**
|
||||||
|
|
||||||
|
- Interval of `n` nanoseconds. [IntervalNanosecond](../data-types/special-data-types/interval.md).
|
||||||
|
|
||||||
|
**Example**
|
||||||
|
|
||||||
|
Query:
|
||||||
|
|
||||||
|
``` sql
|
||||||
|
WITH
|
||||||
|
toDateTime('2024-06-15') AS date,
|
||||||
|
toIntervalNanosecond(30) AS interval_to_nanoseconds
|
||||||
|
SELECT date + interval_to_nanoseconds AS result
|
||||||
|
```
|
||||||
|
|
||||||
|
Result:
|
||||||
|
|
||||||
|
```response
|
||||||
|
┌────────────────────────result─┐
|
||||||
|
│ 2024-06-15 00:00:00.000000030 │
|
||||||
|
└───────────────────────────────┘
|
||||||
```
|
```
|
||||||
|
|
||||||
## parseDateTime
|
## parseDateTime
|
||||||
|
@ -8,7 +8,7 @@ title: "CREATE ROW POLICY"
|
|||||||
Creates a [row policy](../../../guides/sre/user-management/index.md#row-policy-management), i.e. a filter used to determine which rows a user can read from a table.
|
Creates a [row policy](../../../guides/sre/user-management/index.md#row-policy-management), i.e. a filter used to determine which rows a user can read from a table.
|
||||||
|
|
||||||
:::tip
|
:::tip
|
||||||
Row policies makes sense only for users with readonly access. If user can modify table or copy partitions between tables, it defeats the restrictions of row policies.
|
Row policies make sense only for users with readonly access. If a user can modify a table or copy partitions between tables, it defeats the restrictions of row policies.
|
||||||
:::
|
:::
|
||||||
|
|
||||||
Syntax:
|
Syntax:
|
||||||
@ -24,40 +24,40 @@ CREATE [ROW] POLICY [IF NOT EXISTS | OR REPLACE] policy_name1 [ON CLUSTER cluste
|
|||||||
|
|
||||||
## USING Clause
|
## USING Clause
|
||||||
|
|
||||||
Allows to specify a condition to filter rows. An user will see a row if the condition is calculated to non-zero for the row.
|
Allows specifying a condition to filter rows. A user will see a row if the condition is calculated to non-zero for the row.
|
||||||
|
|
||||||
## TO Clause
|
## TO Clause
|
||||||
|
|
||||||
In the section `TO` you can provide a list of users and roles this policy should work for. For example, `CREATE ROW POLICY ... TO accountant, john@localhost`.
|
In the `TO` section you can provide a list of users and roles this policy should work for. For example, `CREATE ROW POLICY ... TO accountant, john@localhost`.
|
||||||
|
|
||||||
Keyword `ALL` means all the ClickHouse users including current user. Keyword `ALL EXCEPT` allow to exclude some users from the all users list, for example, `CREATE ROW POLICY ... TO ALL EXCEPT accountant, john@localhost`
|
Keyword `ALL` means all the ClickHouse users, including current user. Keyword `ALL EXCEPT` allows excluding some users from the all users list, for example, `CREATE ROW POLICY ... TO ALL EXCEPT accountant, john@localhost`
|
||||||
|
|
||||||
:::note
|
:::note
|
||||||
If there are no row policies defined for a table then any user can `SELECT` all the row from the table. Defining one or more row policies for the table makes the access to the table depending on the row policies no matter if those row policies are defined for the current user or not. For example, the following policy
|
If there are no row policies defined for a table, then any user can `SELECT` all the rows from the table. Defining one or more row policies for the table makes access to the table dependent on the row policies, no matter if those row policies are defined for the current user or not. For example, the following policy:
|
||||||
|
|
||||||
`CREATE ROW POLICY pol1 ON mydb.table1 USING b=1 TO mira, peter`
|
`CREATE ROW POLICY pol1 ON mydb.table1 USING b=1 TO mira, peter`
|
||||||
|
|
||||||
forbids the users `mira` and `peter` to see the rows with `b != 1`, and any non-mentioned user (e.g., the user `paul`) will see no rows from `mydb.table1` at all.
|
forbids the users `mira` and `peter` from seeing the rows with `b != 1`, and any non-mentioned user (e.g., the user `paul`) will see no rows from `mydb.table1` at all.
|
||||||
|
|
||||||
If that's not desirable it can't be fixed by adding one more row policy, like the following:
|
If that's not desirable, it can be fixed by adding one more row policy, like the following:
|
||||||
|
|
||||||
`CREATE ROW POLICY pol2 ON mydb.table1 USING 1 TO ALL EXCEPT mira, peter`
|
`CREATE ROW POLICY pol2 ON mydb.table1 USING 1 TO ALL EXCEPT mira, peter`
|
||||||
:::
|
:::
|
||||||
|
|
||||||
## AS Clause
|
## AS Clause
|
||||||
|
|
||||||
It's allowed to have more than one policy enabled on the same table for the same user at the one time. So we need a way to combine the conditions from multiple policies.
|
It's allowed to have more than one policy enabled on the same table for the same user at one time. So we need a way to combine the conditions from multiple policies.
|
||||||
|
|
||||||
By default policies are combined using the boolean `OR` operator. For example, the following policies
|
By default, policies are combined using the boolean `OR` operator. For example, the following policies:
|
||||||
|
|
||||||
``` sql
|
``` sql
|
||||||
CREATE ROW POLICY pol1 ON mydb.table1 USING b=1 TO mira, peter
|
CREATE ROW POLICY pol1 ON mydb.table1 USING b=1 TO mira, peter
|
||||||
CREATE ROW POLICY pol2 ON mydb.table1 USING c=2 TO peter, antonio
|
CREATE ROW POLICY pol2 ON mydb.table1 USING c=2 TO peter, antonio
|
||||||
```
|
```
|
||||||
|
|
||||||
enables the user `peter` to see rows with either `b=1` or `c=2`.
|
enable the user `peter` to see rows with either `b=1` or `c=2`.
|
||||||
|
|
||||||
The `AS` clause specifies how policies should be combined with other policies. Policies can be either permissive or restrictive. By default policies are permissive, which means they are combined using the boolean `OR` operator.
|
The `AS` clause specifies how policies should be combined with other policies. Policies can be either permissive or restrictive. By default, policies are permissive, which means they are combined using the boolean `OR` operator.
|
||||||
|
|
||||||
A policy can be defined as restrictive as an alternative. Restrictive policies are combined using the boolean `AND` operator.
|
A policy can be defined as restrictive as an alternative. Restrictive policies are combined using the boolean `AND` operator.
|
||||||
|
|
||||||
@ -68,25 +68,25 @@ row_is_visible = (one or more of the permissive policies' conditions are non-zer
|
|||||||
(all of the restrictive policies's conditions are non-zero)
|
(all of the restrictive policies's conditions are non-zero)
|
||||||
```
|
```
|
||||||
|
|
||||||
For example, the following policies
|
For example, the following policies:
|
||||||
|
|
||||||
``` sql
|
``` sql
|
||||||
CREATE ROW POLICY pol1 ON mydb.table1 USING b=1 TO mira, peter
|
CREATE ROW POLICY pol1 ON mydb.table1 USING b=1 TO mira, peter
|
||||||
CREATE ROW POLICY pol2 ON mydb.table1 USING c=2 AS RESTRICTIVE TO peter, antonio
|
CREATE ROW POLICY pol2 ON mydb.table1 USING c=2 AS RESTRICTIVE TO peter, antonio
|
||||||
```
|
```
|
||||||
|
|
||||||
enables the user `peter` to see rows only if both `b=1` AND `c=2`.
|
enable the user `peter` to see rows only if both `b=1` AND `c=2`.
|
||||||
|
|
||||||
Database policies are combined with table policies.
|
Database policies are combined with table policies.
|
||||||
|
|
||||||
For example, the following policies
|
For example, the following policies:
|
||||||
|
|
||||||
``` sql
|
``` sql
|
||||||
CREATE ROW POLICY pol1 ON mydb.* USING b=1 TO mira, peter
|
CREATE ROW POLICY pol1 ON mydb.* USING b=1 TO mira, peter
|
||||||
CREATE ROW POLICY pol2 ON mydb.table1 USING c=2 AS RESTRICTIVE TO peter, antonio
|
CREATE ROW POLICY pol2 ON mydb.table1 USING c=2 AS RESTRICTIVE TO peter, antonio
|
||||||
```
|
```
|
||||||
|
|
||||||
enables the user `peter` to see table1 rows only if both `b=1` AND `c=2`, although
|
enable the user `peter` to see table1 rows only if both `b=1` AND `c=2`, although
|
||||||
any other table in mydb would have only `b=1` policy applied for the user.
|
any other table in mydb would have only `b=1` policy applied for the user.
|
||||||
|
|
||||||
|
|
||||||
|
@ -10,7 +10,7 @@ title: The Lightweight DELETE Statement
|
|||||||
The lightweight `DELETE` statement removes rows from the table `[db.]table` that match the expression `expr`. It is only available for the *MergeTree table engine family.
|
The lightweight `DELETE` statement removes rows from the table `[db.]table` that match the expression `expr`. It is only available for the *MergeTree table engine family.
|
||||||
|
|
||||||
``` sql
|
``` sql
|
||||||
DELETE FROM [db.]table [ON CLUSTER cluster] WHERE expr;
|
DELETE FROM [db.]table [ON CLUSTER cluster] [IN PARTITION partition_expr] WHERE expr;
|
||||||
```
|
```
|
||||||
|
|
||||||
It is called "lightweight `DELETE`" to contrast it to the [ALTER table DELETE](/en/sql-reference/statements/alter/delete) command, which is a heavyweight process.
|
It is called "lightweight `DELETE`" to contrast it to the [ALTER table DELETE](/en/sql-reference/statements/alter/delete) command, which is a heavyweight process.
|
||||||
|
@ -200,6 +200,7 @@ Hierarchy of privileges:
|
|||||||
- `JDBC`
|
- `JDBC`
|
||||||
- `HDFS`
|
- `HDFS`
|
||||||
- `S3`
|
- `S3`
|
||||||
|
- `POSTGRES`
|
||||||
- [dictGet](#dictget)
|
- [dictGet](#dictget)
|
||||||
- [displaySecretsInShowAndSelect](#displaysecretsinshowandselect)
|
- [displaySecretsInShowAndSelect](#displaysecretsinshowandselect)
|
||||||
- [NAMED COLLECTION ADMIN](#named-collection-admin)
|
- [NAMED COLLECTION ADMIN](#named-collection-admin)
|
||||||
@ -476,6 +477,7 @@ Allows using external data sources. Applies to [table engines](../../engines/tab
|
|||||||
- `JDBC`. Level: `GLOBAL`
|
- `JDBC`. Level: `GLOBAL`
|
||||||
- `HDFS`. Level: `GLOBAL`
|
- `HDFS`. Level: `GLOBAL`
|
||||||
- `S3`. Level: `GLOBAL`
|
- `S3`. Level: `GLOBAL`
|
||||||
|
- `POSTGRES`. Level: `GLOBAL`
|
||||||
|
|
||||||
The `SOURCES` privilege enables use of all the sources. Also you can grant a privilege for each source individually. To use sources, you need additional privileges.
|
The `SOURCES` privilege enables use of all the sources. Also you can grant a privilege for each source individually. To use sources, you need additional privileges.
|
||||||
|
|
||||||
|
@ -136,7 +136,13 @@ The compiled expression cache is enabled/disabled with the query/user/profile-le
|
|||||||
|
|
||||||
## DROP QUERY CACHE
|
## DROP QUERY CACHE
|
||||||
|
|
||||||
|
```sql
|
||||||
|
SYSTEM DROP QUERY CACHE;
|
||||||
|
SYSTEM DROP QUERY CACHE TAG '<tag>'
|
||||||
|
````
|
||||||
|
|
||||||
Clears the [query cache](../../operations/query-cache.md).
|
Clears the [query cache](../../operations/query-cache.md).
|
||||||
|
If a tag is specified, only query cache entries with the specified tag are deleted.
|
||||||
|
|
||||||
## DROP FORMAT SCHEMA CACHE {#system-drop-schema-format}
|
## DROP FORMAT SCHEMA CACHE {#system-drop-schema-format}
|
||||||
|
|
||||||
|
@ -116,7 +116,7 @@ SELECT * from HDFS('hdfs://hdfs1:9000/data/path/date=*/country=*/code=*/*.parque
|
|||||||
## Storage Settings {#storage-settings}
|
## Storage Settings {#storage-settings}
|
||||||
|
|
||||||
- [hdfs_truncate_on_insert](/docs/en/operations/settings/settings.md#hdfs_truncate_on_insert) - allows to truncate file before insert into it. Disabled by default.
|
- [hdfs_truncate_on_insert](/docs/en/operations/settings/settings.md#hdfs_truncate_on_insert) - allows to truncate file before insert into it. Disabled by default.
|
||||||
- [hdfs_create_multiple_files](/docs/en/operations/settings/settings.md#hdfs_allow_create_multiple_files) - allows to create a new file on each insert if format has suffix. Disabled by default.
|
- [hdfs_create_new_file_on_insert](/docs/en/operations/settings/settings.md#hdfs_create_new_file_on_insert) - allows to create a new file on each insert if format has suffix. Disabled by default.
|
||||||
- [hdfs_skip_empty_files](/docs/en/operations/settings/settings.md#hdfs_skip_empty_files) - allows to skip empty files while reading. Disabled by default.
|
- [hdfs_skip_empty_files](/docs/en/operations/settings/settings.md#hdfs_skip_empty_files) - allows to skip empty files while reading. Disabled by default.
|
||||||
- [ignore_access_denied_multidirectory_globs](/docs/en/operations/settings/settings.md#ignore_access_denied_multidirectory_globs) - allows to ignore permission denied errors for multi-directory globs.
|
- [ignore_access_denied_multidirectory_globs](/docs/en/operations/settings/settings.md#ignore_access_denied_multidirectory_globs) - allows to ignore permission denied errors for multi-directory globs.
|
||||||
|
|
||||||
|
@ -6,35 +6,37 @@ sidebar_label: iceberg
|
|||||||
|
|
||||||
# iceberg Table Function
|
# iceberg Table Function
|
||||||
|
|
||||||
Provides a read-only table-like interface to Apache [Iceberg](https://iceberg.apache.org/) tables in Amazon S3.
|
Provides a read-only table-like interface to Apache [Iceberg](https://iceberg.apache.org/) tables in Amazon S3, Azure or locally stored.
|
||||||
|
|
||||||
## Syntax
|
## Syntax
|
||||||
|
|
||||||
``` sql
|
``` sql
|
||||||
iceberg(url [,aws_access_key_id, aws_secret_access_key] [,format] [,structure])
|
icebergS3(url [, NOSIGN | access_key_id, secret_access_key, [session_token]] [,format] [,compression_method])
|
||||||
|
icebergS3(named_collection[, option=value [,..]])
|
||||||
|
|
||||||
|
icebergAzure(connection_string|storage_account_url, container_name, blobpath, [,account_name], [,account_key] [,format] [,compression_method])
|
||||||
|
icebergAzure(named_collection[, option=value [,..]])
|
||||||
|
|
||||||
|
icebergLocal(path_to_table, [,format] [,compression_method])
|
||||||
|
icebergLocal(named_collection[, option=value [,..]])
|
||||||
```
|
```
|
||||||
|
|
||||||
## Arguments
|
## Arguments
|
||||||
|
|
||||||
- `url` — Bucket url with the path to an existing Iceberg table in S3.
|
Description of the arguments coincides with description of arguments in table functions `s3`, `azureBlobStorage` and `file` correspondingly.
|
||||||
- `aws_access_key_id`, `aws_secret_access_key` - Long-term credentials for the [AWS](https://aws.amazon.com/) account user. You can use these to authenticate your requests. These parameters are optional. If credentials are not specified, they are used from the ClickHouse configuration. For more information see [Using S3 for Data Storage](/docs/en/engines/table-engines/mergetree-family/mergetree.md/#table_engine-mergetree-s3).
|
`format` stands for the format of data files in the Iceberg table.
|
||||||
- `format` — The [format](/docs/en/interfaces/formats.md/#formats) of the file. By default `Parquet` is used.
|
|
||||||
- `structure` — Structure of the table. Format `'column1_name column1_type, column2_name column2_type, ...'`.
|
|
||||||
|
|
||||||
Engine parameters can be specified using [Named Collections](/docs/en/operations/named-collections.md).
|
|
||||||
|
|
||||||
**Returned value**
|
**Returned value**
|
||||||
|
A table with the specified structure for reading data in the specified Iceberg table.
|
||||||
A table with the specified structure for reading data in the specified Iceberg table in S3.
|
|
||||||
|
|
||||||
**Example**
|
**Example**
|
||||||
|
|
||||||
```sql
|
```sql
|
||||||
SELECT * FROM iceberg('http://test.s3.amazonaws.com/clickhouse-bucket/test_table', 'test', 'test')
|
SELECT * FROM icebergS3('http://test.s3.amazonaws.com/clickhouse-bucket/test_table', 'test', 'test')
|
||||||
```
|
```
|
||||||
|
|
||||||
:::important
|
:::important
|
||||||
ClickHouse currently supports reading v1 (v2 support is coming soon!) of the Iceberg format via the `iceberg` table function and `Iceberg` table engine.
|
ClickHouse currently supports reading v1 and v2 of the Iceberg format via the `icebergS3`, `icebergAzure` and `icebergLocal` table functions and `IcebergS3`, `icebergAzure` ans `icebergLocal` table engines.
|
||||||
:::
|
:::
|
||||||
|
|
||||||
## Defining a named collection
|
## Defining a named collection
|
||||||
@ -56,10 +58,14 @@ Here is an example of configuring a named collection for storing the URL and cre
|
|||||||
```
|
```
|
||||||
|
|
||||||
```sql
|
```sql
|
||||||
SELECT * FROM iceberg(iceberg_conf, filename = 'test_table')
|
SELECT * FROM icebergS3(iceberg_conf, filename = 'test_table')
|
||||||
DESCRIBE iceberg(iceberg_conf, filename = 'test_table')
|
DESCRIBE icebergS3(iceberg_conf, filename = 'test_table')
|
||||||
```
|
```
|
||||||
|
|
||||||
|
**Aliases**
|
||||||
|
|
||||||
|
Table function `iceberg` is an alias to `icebergS3` now.
|
||||||
|
|
||||||
**See Also**
|
**See Also**
|
||||||
|
|
||||||
- [Iceberg engine](/docs/en/engines/table-engines/integrations/iceberg.md)
|
- [Iceberg engine](/docs/en/engines/table-engines/integrations/iceberg.md)
|
||||||
|
@ -290,7 +290,7 @@ SELECT * from s3('s3://data/path/date=*/country=*/code=*/*.parquet') where _date
|
|||||||
## Storage Settings {#storage-settings}
|
## Storage Settings {#storage-settings}
|
||||||
|
|
||||||
- [s3_truncate_on_insert](/docs/en/operations/settings/settings.md#s3_truncate_on_insert) - allows to truncate file before insert into it. Disabled by default.
|
- [s3_truncate_on_insert](/docs/en/operations/settings/settings.md#s3_truncate_on_insert) - allows to truncate file before insert into it. Disabled by default.
|
||||||
- [s3_create_multiple_files](/docs/en/operations/settings/settings.md#s3_allow_create_multiple_files) - allows to create a new file on each insert if format has suffix. Disabled by default.
|
- [s3_create_new_file_on_insert](/docs/en/operations/settings/settings.md#s3_create_new_file_on_insert) - allows to create a new file on each insert if format has suffix. Disabled by default.
|
||||||
- [s3_skip_empty_files](/docs/en/operations/settings/settings.md#s3_skip_empty_files) - allows to skip empty files while reading. Disabled by default.
|
- [s3_skip_empty_files](/docs/en/operations/settings/settings.md#s3_skip_empty_files) - allows to skip empty files while reading. Disabled by default.
|
||||||
|
|
||||||
**See Also**
|
**See Also**
|
||||||
|
@ -54,6 +54,7 @@ Character `|` inside patterns is used to specify failover addresses. They are it
|
|||||||
- `_file` — Resource name of the `URL`. Type: `LowCardinalty(String)`.
|
- `_file` — Resource name of the `URL`. Type: `LowCardinalty(String)`.
|
||||||
- `_size` — Size of the resource in bytes. Type: `Nullable(UInt64)`. If the size is unknown, the value is `NULL`.
|
- `_size` — Size of the resource in bytes. Type: `Nullable(UInt64)`. If the size is unknown, the value is `NULL`.
|
||||||
- `_time` — Last modified time of the file. Type: `Nullable(DateTime)`. If the time is unknown, the value is `NULL`.
|
- `_time` — Last modified time of the file. Type: `Nullable(DateTime)`. If the time is unknown, the value is `NULL`.
|
||||||
|
- `_headers` - HTTP response headers. Type: `Map(LowCardinality(String), LowCardinality(String))`.
|
||||||
|
|
||||||
## Hive-style partitioning {#hive-style-partitioning}
|
## Hive-style partitioning {#hive-style-partitioning}
|
||||||
|
|
||||||
|
@ -22,18 +22,26 @@ grep -q sse4_2 /proc/cpuinfo && echo "SSE 4.2 supported" || echo "SSE 4.2 not su
|
|||||||
|
|
||||||
### Из deb-пакетов {#install-from-deb-packages}
|
### Из deb-пакетов {#install-from-deb-packages}
|
||||||
|
|
||||||
Яндекс рекомендует использовать официальные скомпилированные `deb`-пакеты для Debian или Ubuntu. Для установки пакетов выполните:
|
Рекомендуется использовать официальные скомпилированные `deb`-пакеты для Debian или Ubuntu. Для установки пакетов выполните:
|
||||||
|
|
||||||
``` bash
|
``` bash
|
||||||
sudo apt-get install -y apt-transport-https ca-certificates dirmngr
|
sudo apt-get install -y apt-transport-https ca-certificates curl gnupg
|
||||||
sudo apt-key adv --keyserver hkp://keyserver.ubuntu.com:80 --recv 8919F6BD2B48D754
|
curl -fsSL 'https://packages.clickhouse.com/rpm/lts/repodata/repomd.xml.key' | sudo gpg --dearmor -o /usr/share/keyrings/clickhouse-keyring.gpg
|
||||||
|
|
||||||
echo "deb https://packages.clickhouse.com/deb stable main" | sudo tee \
|
echo "deb [signed-by=/usr/share/keyrings/clickhouse-keyring.gpg] https://packages.clickhouse.com/deb stable main" | sudo tee \
|
||||||
/etc/apt/sources.list.d/clickhouse.list
|
/etc/apt/sources.list.d/clickhouse.list
|
||||||
sudo apt-get update
|
sudo apt-get update
|
||||||
|
```
|
||||||
|
|
||||||
|
#### Установка ClickHouse server и client
|
||||||
|
|
||||||
|
```bash
|
||||||
sudo apt-get install -y clickhouse-server clickhouse-client
|
sudo apt-get install -y clickhouse-server clickhouse-client
|
||||||
|
```
|
||||||
|
|
||||||
|
#### Запуск ClickHouse server
|
||||||
|
|
||||||
|
```bash
|
||||||
sudo service clickhouse-server start
|
sudo service clickhouse-server start
|
||||||
clickhouse-client # or "clickhouse-client --password" if you've set up a password.
|
clickhouse-client # or "clickhouse-client --password" if you've set up a password.
|
||||||
```
|
```
|
||||||
@ -55,7 +63,7 @@ clickhouse-client # or "clickhouse-client --password" if you've set up a passwor
|
|||||||
:::
|
:::
|
||||||
### Из rpm-пакетов {#from-rpm-packages}
|
### Из rpm-пакетов {#from-rpm-packages}
|
||||||
|
|
||||||
Команда ClickHouse в Яндексе рекомендует использовать официальные предкомпилированные `rpm`-пакеты для CentOS, RedHat и всех остальных дистрибутивов Linux, основанных на rpm.
|
Команда ClickHouse рекомендует использовать официальные предкомпилированные `rpm`-пакеты для CentOS, RedHat и всех остальных дистрибутивов Linux, основанных на rpm.
|
||||||
|
|
||||||
#### Установка официального репозитория
|
#### Установка официального репозитория
|
||||||
|
|
||||||
@ -102,7 +110,7 @@ sudo yum install clickhouse-server clickhouse-client
|
|||||||
|
|
||||||
### Из tgz-архивов {#from-tgz-archives}
|
### Из tgz-архивов {#from-tgz-archives}
|
||||||
|
|
||||||
Команда ClickHouse в Яндексе рекомендует использовать предкомпилированные бинарники из `tgz`-архивов для всех дистрибутивов, где невозможна установка `deb`- и `rpm`- пакетов.
|
Команда ClickHouse рекомендует использовать предкомпилированные бинарники из `tgz`-архивов для всех дистрибутивов, где невозможна установка `deb`- и `rpm`- пакетов.
|
||||||
|
|
||||||
Интересующую версию архивов можно скачать вручную с помощью `curl` или `wget` из репозитория https://packages.clickhouse.com/tgz/.
|
Интересующую версию архивов можно скачать вручную с помощью `curl` или `wget` из репозитория https://packages.clickhouse.com/tgz/.
|
||||||
После этого архивы нужно распаковать и воспользоваться скриптами установки. Пример установки самой свежей версии:
|
После этого архивы нужно распаковать и воспользоваться скриптами установки. Пример установки самой свежей версии:
|
||||||
|
@ -50,7 +50,7 @@ Connection: Close
|
|||||||
Content-Type: text/tab-separated-values; charset=UTF-8
|
Content-Type: text/tab-separated-values; charset=UTF-8
|
||||||
X-ClickHouse-Server-Display-Name: clickhouse.ru-central1.internal
|
X-ClickHouse-Server-Display-Name: clickhouse.ru-central1.internal
|
||||||
X-ClickHouse-Query-Id: 5abe861c-239c-467f-b955-8a201abb8b7f
|
X-ClickHouse-Query-Id: 5abe861c-239c-467f-b955-8a201abb8b7f
|
||||||
X-ClickHouse-Summary: {"read_rows":"0","read_bytes":"0","written_rows":"0","written_bytes":"0","total_rows_to_read":"0","elapsed_ns":"662334"}
|
X-ClickHouse-Summary: {"read_rows":"0","read_bytes":"0","written_rows":"0","written_bytes":"0","total_rows_to_read":"0","elapsed_ns":"662334", "real_time_microseconds":"0"}
|
||||||
|
|
||||||
1
|
1
|
||||||
```
|
```
|
||||||
@ -367,7 +367,7 @@ $ curl -v 'http://localhost:8123/predefined_query'
|
|||||||
< X-ClickHouse-Format: Template
|
< X-ClickHouse-Format: Template
|
||||||
< X-ClickHouse-Timezone: Asia/Shanghai
|
< X-ClickHouse-Timezone: Asia/Shanghai
|
||||||
< Keep-Alive: timeout=10
|
< Keep-Alive: timeout=10
|
||||||
< X-ClickHouse-Summary: {"read_rows":"0","read_bytes":"0","written_rows":"0","written_bytes":"0","total_rows_to_read":"0"}
|
< X-ClickHouse-Summary: {"read_rows":"0","read_bytes":"0","written_rows":"0","written_bytes":"0","total_rows_to_read":"0", "elapsed_ns":"662334", "real_time_microseconds":"0"}
|
||||||
<
|
<
|
||||||
# HELP "Query" "Number of executing queries"
|
# HELP "Query" "Number of executing queries"
|
||||||
# TYPE "Query" counter
|
# TYPE "Query" counter
|
||||||
@ -601,7 +601,7 @@ $ curl -v -H 'XXX:xxx' 'http://localhost:8123/get_config_static_handler'
|
|||||||
< Content-Type: text/plain; charset=UTF-8
|
< Content-Type: text/plain; charset=UTF-8
|
||||||
< Transfer-Encoding: chunked
|
< Transfer-Encoding: chunked
|
||||||
< Keep-Alive: timeout=10
|
< Keep-Alive: timeout=10
|
||||||
< X-ClickHouse-Summary: {"read_rows":"0","read_bytes":"0","written_rows":"0","written_bytes":"0","total_rows_to_read":"0","elapsed_ns":"662334"}
|
< X-ClickHouse-Summary: {"read_rows":"0","read_bytes":"0","written_rows":"0","written_bytes":"0","total_rows_to_read":"0","elapsed_ns":"662334", "real_time_microseconds":"0"}
|
||||||
<
|
<
|
||||||
* Connection #0 to host localhost left intact
|
* Connection #0 to host localhost left intact
|
||||||
<html ng-app="SMI2"><head><base href="http://ui.tabix.io/"></head><body><div ui-view="" class="content-ui"></div><script src="http://loader.tabix.io/master.js"></script></body></html>%
|
<html ng-app="SMI2"><head><base href="http://ui.tabix.io/"></head><body><div ui-view="" class="content-ui"></div><script src="http://loader.tabix.io/master.js"></script></body></html>%
|
||||||
@ -659,7 +659,7 @@ $ curl -vv -H 'XXX:xxx' 'http://localhost:8123/get_absolute_path_static_handler'
|
|||||||
< Content-Type: text/html; charset=UTF-8
|
< Content-Type: text/html; charset=UTF-8
|
||||||
< Transfer-Encoding: chunked
|
< Transfer-Encoding: chunked
|
||||||
< Keep-Alive: timeout=10
|
< Keep-Alive: timeout=10
|
||||||
< X-ClickHouse-Summary: {"read_rows":"0","read_bytes":"0","written_rows":"0","written_bytes":"0","total_rows_to_read":"0","elapsed_ns":"662334"}
|
< X-ClickHouse-Summary: {"read_rows":"0","read_bytes":"0","written_rows":"0","written_bytes":"0","total_rows_to_read":"0","elapsed_ns":"662334", "real_time_microseconds":"0"}
|
||||||
<
|
<
|
||||||
<html><body>Absolute Path File</body></html>
|
<html><body>Absolute Path File</body></html>
|
||||||
* Connection #0 to host localhost left intact
|
* Connection #0 to host localhost left intact
|
||||||
@ -678,7 +678,7 @@ $ curl -vv -H 'XXX:xxx' 'http://localhost:8123/get_relative_path_static_handler'
|
|||||||
< Content-Type: text/html; charset=UTF-8
|
< Content-Type: text/html; charset=UTF-8
|
||||||
< Transfer-Encoding: chunked
|
< Transfer-Encoding: chunked
|
||||||
< Keep-Alive: timeout=10
|
< Keep-Alive: timeout=10
|
||||||
< X-ClickHouse-Summary: {"read_rows":"0","read_bytes":"0","written_rows":"0","written_bytes":"0","total_rows_to_read":"0","elapsed_ns":"662334"}
|
< X-ClickHouse-Summary: {"read_rows":"0","read_bytes":"0","written_rows":"0","written_bytes":"0","total_rows_to_read":"0","elapsed_ns":"662334", "real_time_microseconds":"0"}
|
||||||
<
|
<
|
||||||
<html><body>Relative Path File</body></html>
|
<html><body>Relative Path File</body></html>
|
||||||
* Connection #0 to host localhost left intact
|
* Connection #0 to host localhost left intact
|
||||||
|
@ -54,29 +54,28 @@ SELECT now() as current_date_time, current_date_time + INTERVAL 4 DAY
|
|||||||
└─────────────────────┴───────────────────────────────┘
|
└─────────────────────┴───────────────────────────────┘
|
||||||
```
|
```
|
||||||
|
|
||||||
Нельзя объединять интервалы различных типов. Нельзя использовать интервалы вида `4 DAY 1 HOUR`. Вместо этого выражайте интервал в единицах меньших или равных минимальной единице интервала, например, интервал «1 день и 1 час» можно выразить как `25 HOUR` или `90000 SECOND`.
|
Также можно использовать различные типы интервалов одновременно:
|
||||||
|
|
||||||
Арифметические операции со значениями типов `Interval` не доступны, однако можно последовательно добавлять различные интервалы к значениям типов `Date` и `DateTime`. Например:
|
|
||||||
|
|
||||||
``` sql
|
``` sql
|
||||||
SELECT now() AS current_date_time, current_date_time + INTERVAL 4 DAY + INTERVAL 3 HOUR
|
SELECT now() AS current_date_time, current_date_time + (INTERVAL 4 DAY + INTERVAL 3 HOUR)
|
||||||
```
|
```
|
||||||
|
|
||||||
``` text
|
``` text
|
||||||
┌───current_date_time─┬─plus(plus(now(), toIntervalDay(4)), toIntervalHour(3))─┐
|
┌───current_date_time─┬─plus(current_date_time, plus(toIntervalDay(4), toIntervalHour(3)))─┐
|
||||||
│ 2019-10-23 11:16:28 │ 2019-10-27 14:16:28 │
|
│ 2024-08-08 18:31:39 │ 2024-08-12 21:31:39 │
|
||||||
└─────────────────────┴────────────────────────────────────────────────────────┘
|
└─────────────────────┴────────────────────────────────────────────────────────────────────┘
|
||||||
```
|
```
|
||||||
|
|
||||||
Следующий запрос приведёт к генерированию исключения:
|
И сравнивать значения из разными интервалами:
|
||||||
|
|
||||||
``` sql
|
``` sql
|
||||||
select now() AS current_date_time, current_date_time + (INTERVAL 4 DAY + INTERVAL 3 HOUR)
|
SELECT toIntervalMicrosecond(3600000000) = toIntervalHour(1);
|
||||||
```
|
```
|
||||||
|
|
||||||
``` text
|
``` text
|
||||||
Received exception from server (version 19.14.1):
|
┌─less(toIntervalMicrosecond(179999999), toIntervalMinute(3))─┐
|
||||||
Code: 43. DB::Exception: Received from localhost:9000. DB::Exception: Wrong argument types for function plus: if one argument is Interval, then another must be Date or DateTime..
|
│ 1 │
|
||||||
|
└─────────────────────────────────────────────────────────────┘
|
||||||
```
|
```
|
||||||
|
|
||||||
## Смотрите также {#smotrite-takzhe}
|
## Смотрите также {#smotrite-takzhe}
|
||||||
|
@ -124,6 +124,40 @@ SELECT hex(sipHash128('foo', '\x01', 3));
|
|||||||
└──────────────────────────────────┘
|
└──────────────────────────────────┘
|
||||||
```
|
```
|
||||||
|
|
||||||
|
## ripeMD160
|
||||||
|
|
||||||
|
Генерирует [RIPEMD-160](https://en.wikipedia.org/wiki/RIPEMD) хеш строки.
|
||||||
|
|
||||||
|
**Синтаксис**
|
||||||
|
|
||||||
|
```sql
|
||||||
|
ripeMD160(input)
|
||||||
|
```
|
||||||
|
|
||||||
|
**Аргументы**
|
||||||
|
|
||||||
|
- `input`: Строка [String](../data-types/string.md)
|
||||||
|
|
||||||
|
**Возвращаемое значение**
|
||||||
|
|
||||||
|
- [UInt256](../data-types/int-uint.md), где 160-битный хеш RIPEMD-160 хранится в первых 20 байтах. Оставшиеся 12 байт заполняются нулями.
|
||||||
|
|
||||||
|
**Пример**
|
||||||
|
|
||||||
|
Используйте функцию [hex](../functions/encoding-functions.md#hex) для представления результата в виде строки с шестнадцатеричной кодировкой
|
||||||
|
|
||||||
|
Запрос:
|
||||||
|
|
||||||
|
```sql
|
||||||
|
SELECT hex(ripeMD160('The quick brown fox jumps over the lazy dog'));
|
||||||
|
```
|
||||||
|
Результат:
|
||||||
|
```response
|
||||||
|
┌─hex(ripeMD160('The quick brown fox jumps over the lazy dog'))─┐
|
||||||
|
│ 37F332F68DB77BD9D7EDD4969571AD671CF9DD3B │
|
||||||
|
└───────────────────────────────────────────────────────────────┘
|
||||||
|
```
|
||||||
|
|
||||||
## cityHash64 {#cityhash64}
|
## cityHash64 {#cityhash64}
|
||||||
|
|
||||||
Генерирует 64-х битное значение [CityHash](https://github.com/google/cityhash).
|
Генерирует 64-х битное значение [CityHash](https://github.com/google/cityhash).
|
||||||
|
@ -280,7 +280,7 @@ SYSTEM START REPLICATION QUEUES [ON CLUSTER cluster_name] [[db.]replicated_merge
|
|||||||
Ждет когда таблица семейства `ReplicatedMergeTree` будет синхронизирована с другими репликами в кластере, но не более `receive_timeout` секунд:
|
Ждет когда таблица семейства `ReplicatedMergeTree` будет синхронизирована с другими репликами в кластере, но не более `receive_timeout` секунд:
|
||||||
|
|
||||||
``` sql
|
``` sql
|
||||||
SYSTEM SYNC REPLICA [db.]replicated_merge_tree_family_table_name [STRICT | LIGHTWEIGHT [FROM 'srcReplica1'[, 'srcReplica2'[, ...]]] | PULL]
|
SYSTEM SYNC REPLICA [ON CLUSTER cluster_name] [db.]replicated_merge_tree_family_table_name [STRICT | LIGHTWEIGHT [FROM 'srcReplica1'[, 'srcReplica2'[, ...]]] | PULL]
|
||||||
```
|
```
|
||||||
|
|
||||||
После выполнения этого запроса таблица `[db.]replicated_merge_tree_family_table_name` загружает команды из общего реплицированного лога в свою собственную очередь репликации. Затем запрос ждет, пока реплика не обработает все загруженные команды. Поддерживаются следующие модификаторы:
|
После выполнения этого запроса таблица `[db.]replicated_merge_tree_family_table_name` загружает команды из общего реплицированного лога в свою собственную очередь репликации. Затем запрос ждет, пока реплика не обработает все загруженные команды. Поддерживаются следующие модификаторы:
|
||||||
|
@ -53,7 +53,7 @@ Connection: Close
|
|||||||
Content-Type: text/tab-separated-values; charset=UTF-8
|
Content-Type: text/tab-separated-values; charset=UTF-8
|
||||||
X-ClickHouse-Server-Display-Name: clickhouse.ru-central1.internal
|
X-ClickHouse-Server-Display-Name: clickhouse.ru-central1.internal
|
||||||
X-ClickHouse-Query-Id: 5abe861c-239c-467f-b955-8a201abb8b7f
|
X-ClickHouse-Query-Id: 5abe861c-239c-467f-b955-8a201abb8b7f
|
||||||
X-ClickHouse-Summary: {"read_rows":"0","read_bytes":"0","written_rows":"0","written_bytes":"0","total_rows_to_read":"0","elapsed_ns":"662334"}
|
X-ClickHouse-Summary: {"read_rows":"0","read_bytes":"0","written_rows":"0","written_bytes":"0","total_rows_to_read":"0","elapsed_ns":"662334","real_time_microseconds":"0"}
|
||||||
|
|
||||||
1
|
1
|
||||||
```
|
```
|
||||||
@ -363,7 +363,7 @@ $ curl -v 'http://localhost:8123/predefined_query'
|
|||||||
< X-ClickHouse-Format: Template
|
< X-ClickHouse-Format: Template
|
||||||
< X-ClickHouse-Timezone: Asia/Shanghai
|
< X-ClickHouse-Timezone: Asia/Shanghai
|
||||||
< Keep-Alive: timeout=10
|
< Keep-Alive: timeout=10
|
||||||
< X-ClickHouse-Summary: {"read_rows":"0","read_bytes":"0","written_rows":"0","written_bytes":"0","total_rows_to_read":"0","elapsed_ns":"662334"}
|
< X-ClickHouse-Summary: {"read_rows":"0","read_bytes":"0","written_rows":"0","written_bytes":"0","total_rows_to_read":"0","elapsed_ns":"662334", "real_time_microseconds":"0"}
|
||||||
<
|
<
|
||||||
# HELP "Query" "Number of executing queries"
|
# HELP "Query" "Number of executing queries"
|
||||||
# TYPE "Query" counter
|
# TYPE "Query" counter
|
||||||
@ -524,7 +524,7 @@ $ curl -vv -H 'XXX:xxx' 'http://localhost:8123/hi'
|
|||||||
< Content-Type: text/html; charset=UTF-8
|
< Content-Type: text/html; charset=UTF-8
|
||||||
< Transfer-Encoding: chunked
|
< Transfer-Encoding: chunked
|
||||||
< Keep-Alive: timeout=10
|
< Keep-Alive: timeout=10
|
||||||
< X-ClickHouse-Summary: {"read_rows":"0","read_bytes":"0","written_rows":"0","written_bytes":"0","total_rows_to_read":"0","elapsed_ns":"662334"}
|
< X-ClickHouse-Summary: {"read_rows":"0","read_bytes":"0","written_rows":"0","written_bytes":"0","total_rows_to_read":"0","elapsed_ns":"662334", "real_time_microseconds":"0"}
|
||||||
<
|
<
|
||||||
* Connection #0 to host localhost left intact
|
* Connection #0 to host localhost left intact
|
||||||
Say Hi!%
|
Say Hi!%
|
||||||
@ -564,7 +564,7 @@ $ curl -v -H 'XXX:xxx' 'http://localhost:8123/get_config_static_handler'
|
|||||||
< Content-Type: text/plain; charset=UTF-8
|
< Content-Type: text/plain; charset=UTF-8
|
||||||
< Transfer-Encoding: chunked
|
< Transfer-Encoding: chunked
|
||||||
< Keep-Alive: timeout=10
|
< Keep-Alive: timeout=10
|
||||||
< X-ClickHouse-Summary: {"read_rows":"0","read_bytes":"0","written_rows":"0","written_bytes":"0","total_rows_to_read":"0","elapsed_ns":"662334"}
|
< X-ClickHouse-Summary: {"read_rows":"0","read_bytes":"0","written_rows":"0","written_bytes":"0","total_rows_to_read":"0","elapsed_ns":"662334","real_time_microseconds":"0"}
|
||||||
<
|
<
|
||||||
* Connection #0 to host localhost left intact
|
* Connection #0 to host localhost left intact
|
||||||
<html ng-app="SMI2"><head><base href="http://ui.tabix.io/"></head><body><div ui-view="" class="content-ui"></div><script src="http://loader.tabix.io/master.js"></script></body></html>%
|
<html ng-app="SMI2"><head><base href="http://ui.tabix.io/"></head><body><div ui-view="" class="content-ui"></div><script src="http://loader.tabix.io/master.js"></script></body></html>%
|
||||||
@ -616,7 +616,7 @@ $ curl -vv -H 'XXX:xxx' 'http://localhost:8123/get_absolute_path_static_handler'
|
|||||||
< Content-Type: text/html; charset=UTF-8
|
< Content-Type: text/html; charset=UTF-8
|
||||||
< Transfer-Encoding: chunked
|
< Transfer-Encoding: chunked
|
||||||
< Keep-Alive: timeout=10
|
< Keep-Alive: timeout=10
|
||||||
< X-ClickHouse-Summary: {"read_rows":"0","read_bytes":"0","written_rows":"0","written_bytes":"0","total_rows_to_read":"0","elapsed_ns":"662334"}
|
< X-ClickHouse-Summary: {"read_rows":"0","read_bytes":"0","written_rows":"0","written_bytes":"0","total_rows_to_read":"0","elapsed_ns":"662334","real_time_microseconds":"0"}
|
||||||
<
|
<
|
||||||
<html><body>Absolute Path File</body></html>
|
<html><body>Absolute Path File</body></html>
|
||||||
* Connection #0 to host localhost left intact
|
* Connection #0 to host localhost left intact
|
||||||
@ -635,7 +635,7 @@ $ curl -vv -H 'XXX:xxx' 'http://localhost:8123/get_relative_path_static_handler'
|
|||||||
< Content-Type: text/html; charset=UTF-8
|
< Content-Type: text/html; charset=UTF-8
|
||||||
< Transfer-Encoding: chunked
|
< Transfer-Encoding: chunked
|
||||||
< Keep-Alive: timeout=10
|
< Keep-Alive: timeout=10
|
||||||
< X-ClickHouse-Summary: {"read_rows":"0","read_bytes":"0","written_rows":"0","written_bytes":"0","total_rows_to_read":"0","elapsed_ns":"662334"}
|
< X-ClickHouse-Summary: {"read_rows":"0","read_bytes":"0","written_rows":"0","written_bytes":"0","total_rows_to_read":"0","elapsed_ns":"662334","real_time_microseconds":"0"}
|
||||||
<
|
<
|
||||||
<html><body>Relative Path File</body></html>
|
<html><body>Relative Path File</body></html>
|
||||||
* Connection #0 to host localhost left intact
|
* Connection #0 to host localhost left intact
|
||||||
|
@ -1157,7 +1157,7 @@ SELECT toModifiedJulianDayOrNull('2020-01-01');
|
|||||||
|
|
||||||
## fromModifiedJulianDay {#frommodifiedjulianday}
|
## fromModifiedJulianDay {#frommodifiedjulianday}
|
||||||
|
|
||||||
将 [Modified Julian Day](https://en.wikipedia.org/wiki/Julian_day#Variants) 数字转换为 `YYYY-MM-DD` 文本格式的 [Proleptic Gregorian calendar](https://en.wikipedia.org/wiki/Proleptic_Gregorian_calendar) 日期。该函数支持从 `-678941` 到 `2973119` 的天数(分别代表 0000-01-01 和 9999-12-31)。如果天数超出支持范围,则会引发异常。
|
将 [Modified Julian Day](https://en.wikipedia.org/wiki/Julian_day#Variants) 数字转换为 `YYYY-MM-DD` 文本格式的 [Proleptic Gregorian calendar](https://en.wikipedia.org/wiki/Proleptic_Gregorian_calendar) 日期。该函数支持从 `-678941` 到 `2973483` 的天数(分别代表 0000-01-01 和 9999-12-31)。如果天数超出支持范围,则会引发异常。
|
||||||
|
|
||||||
**语法**
|
**语法**
|
||||||
|
|
||||||
|
@ -6,6 +6,7 @@ macro(configure_bash_completion)
|
|||||||
COMMAND ${PKG_CONFIG_BIN} --variable=completionsdir bash-completion
|
COMMAND ${PKG_CONFIG_BIN} --variable=completionsdir bash-completion
|
||||||
OUTPUT_VARIABLE ${out}
|
OUTPUT_VARIABLE ${out}
|
||||||
OUTPUT_STRIP_TRAILING_WHITESPACE
|
OUTPUT_STRIP_TRAILING_WHITESPACE
|
||||||
|
COMMAND_ERROR_IS_FATAL ANY
|
||||||
)
|
)
|
||||||
endif()
|
endif()
|
||||||
string(REPLACE /usr "${CMAKE_INSTALL_PREFIX}" out "${out}")
|
string(REPLACE /usr "${CMAKE_INSTALL_PREFIX}" out "${out}")
|
||||||
|
@ -1164,9 +1164,6 @@ void Client::processOptions(const OptionsDescription & options_description,
|
|||||||
/// (There is no need to copy the context because clickhouse-client has no background tasks so it won't use that context in parallel.)
|
/// (There is no need to copy the context because clickhouse-client has no background tasks so it won't use that context in parallel.)
|
||||||
client_context = global_context;
|
client_context = global_context;
|
||||||
initClientContext();
|
initClientContext();
|
||||||
|
|
||||||
/// Allow to pass-through unknown settings to the server.
|
|
||||||
client_context->getAccessControl().allowAllSettings();
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
@ -53,6 +53,9 @@
|
|||||||
</proto_caps>
|
</proto_caps>
|
||||||
-->
|
-->
|
||||||
|
|
||||||
|
<!-- Do not send clickhouse-client to background on C-z -->
|
||||||
|
<!-- <ignore_shell_suspend>true</ignore_shell_suspend> -->
|
||||||
|
|
||||||
<!--
|
<!--
|
||||||
Settings adjustable via command-line parameters
|
Settings adjustable via command-line parameters
|
||||||
can take their defaults from that config file, see examples:
|
can take their defaults from that config file, see examples:
|
||||||
|
@ -247,6 +247,7 @@ void DisksApp::runInteractiveReplxx()
|
|||||||
suggest,
|
suggest,
|
||||||
history_file,
|
history_file,
|
||||||
/* multiline= */ false,
|
/* multiline= */ false,
|
||||||
|
/* ignore_shell_suspend= */ false,
|
||||||
query_extenders,
|
query_extenders,
|
||||||
query_delimiters,
|
query_delimiters,
|
||||||
word_break_characters.c_str(),
|
word_break_characters.c_str(),
|
||||||
|
@ -2,7 +2,7 @@
|
|||||||
|
|
||||||
#include <unordered_map>
|
#include <unordered_map>
|
||||||
#include <vector>
|
#include <vector>
|
||||||
#include <Client/ReplxxLineReader.h>
|
#include <Client/LineReader.h>
|
||||||
#include <Loggers/Loggers.h>
|
#include <Loggers/Loggers.h>
|
||||||
#include "DisksClient.h"
|
#include "DisksClient.h"
|
||||||
#include "ICommand_fwd.h"
|
#include "ICommand_fwd.h"
|
||||||
|
@ -1,6 +1,5 @@
|
|||||||
#include "DisksClient.h"
|
#include "DisksClient.h"
|
||||||
#include <Client/ClientBase.h>
|
#include <Client/ClientBase.h>
|
||||||
#include <Client/ReplxxLineReader.h>
|
|
||||||
#include <Disks/registerDisks.h>
|
#include <Disks/registerDisks.h>
|
||||||
#include <Common/Config/ConfigProcessor.h>
|
#include <Common/Config/ConfigProcessor.h>
|
||||||
|
|
||||||
|
@ -5,9 +5,8 @@
|
|||||||
#include <string>
|
#include <string>
|
||||||
#include <unordered_map>
|
#include <unordered_map>
|
||||||
#include <vector>
|
#include <vector>
|
||||||
#include <Client/ReplxxLineReader.h>
|
|
||||||
#include <Loggers/Loggers.h>
|
#include <Loggers/Loggers.h>
|
||||||
#include "Disks/IDisk.h"
|
#include <Disks/IDisk.h>
|
||||||
|
|
||||||
#include <Interpreters/Context.h>
|
#include <Interpreters/Context.h>
|
||||||
#include <boost/program_options/options_description.hpp>
|
#include <boost/program_options/options_description.hpp>
|
||||||
|
@ -677,4 +677,122 @@ void GetAllChildrenNumberCommand::execute(const ASTKeeperQuery * query, KeeperCl
|
|||||||
std::cout << totalNumChildren << "\n";
|
std::cout << totalNumChildren << "\n";
|
||||||
}
|
}
|
||||||
|
|
||||||
|
namespace
|
||||||
|
{
|
||||||
|
|
||||||
|
class CPMVOperation
|
||||||
|
{
|
||||||
|
constexpr static UInt64 kTryLimit = 1000;
|
||||||
|
|
||||||
|
public:
|
||||||
|
CPMVOperation(String src_, String dest_, bool remove_src_, KeeperClient * client_)
|
||||||
|
: src(std::move(src_)), dest(std::move(dest_)), remove_src(remove_src_), client(client_)
|
||||||
|
{
|
||||||
|
}
|
||||||
|
|
||||||
|
bool isTryLimitReached() const { return failed_tries_count >= kTryLimit; }
|
||||||
|
|
||||||
|
bool isCompleted() const { return is_completed; }
|
||||||
|
|
||||||
|
void perform()
|
||||||
|
{
|
||||||
|
Coordination::Stat src_stat;
|
||||||
|
String data = client->zookeeper->get(src, &src_stat);
|
||||||
|
|
||||||
|
Coordination::Requests ops{
|
||||||
|
zkutil::makeCheckRequest(src, src_stat.version),
|
||||||
|
zkutil::makeCreateRequest(dest, data, zkutil::CreateMode::Persistent), // Do we need to copy ACLs here?
|
||||||
|
};
|
||||||
|
|
||||||
|
if (remove_src)
|
||||||
|
ops.push_back(zkutil::makeRemoveRequest(src, src_stat.version));
|
||||||
|
|
||||||
|
Coordination::Responses responses;
|
||||||
|
auto code = client->zookeeper->tryMulti(ops, responses);
|
||||||
|
|
||||||
|
switch (code)
|
||||||
|
{
|
||||||
|
case Coordination::Error::ZOK: {
|
||||||
|
is_completed = true;
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
case Coordination::Error::ZBADVERSION: {
|
||||||
|
++failed_tries_count;
|
||||||
|
|
||||||
|
if (isTryLimitReached())
|
||||||
|
zkutil::KeeperMultiException::check(code, ops, responses);
|
||||||
|
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
default:
|
||||||
|
zkutil::KeeperMultiException::check(code, ops, responses);
|
||||||
|
}
|
||||||
|
|
||||||
|
throw Exception(ErrorCodes::LOGICAL_ERROR, "Unreachable");
|
||||||
|
}
|
||||||
|
|
||||||
|
private:
|
||||||
|
String src;
|
||||||
|
String dest;
|
||||||
|
bool remove_src = false;
|
||||||
|
KeeperClient * client = nullptr;
|
||||||
|
|
||||||
|
bool is_completed = false;
|
||||||
|
uint64_t failed_tries_count = 0;
|
||||||
|
};
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
bool CPCommand::parse(IParser::Pos & pos, std::shared_ptr<ASTKeeperQuery> & node, [[maybe_unused]] Expected & expected) const
|
||||||
|
{
|
||||||
|
String src_path;
|
||||||
|
if (!parseKeeperPath(pos, expected, src_path))
|
||||||
|
return false;
|
||||||
|
node->args.push_back(std::move(src_path));
|
||||||
|
|
||||||
|
String to_path;
|
||||||
|
if (!parseKeeperPath(pos, expected, to_path))
|
||||||
|
return false;
|
||||||
|
node->args.push_back(std::move(to_path));
|
||||||
|
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
|
void CPCommand::execute(const ASTKeeperQuery * query, KeeperClient * client) const
|
||||||
|
{
|
||||||
|
auto src = client->getAbsolutePath(query->args[0].safeGet<String>());
|
||||||
|
auto dest = client->getAbsolutePath(query->args[1].safeGet<String>());
|
||||||
|
|
||||||
|
CPMVOperation operation(std::move(src), std::move(dest), /*remove_src_=*/false, /*client_=*/client);
|
||||||
|
|
||||||
|
while (!operation.isTryLimitReached() && !operation.isCompleted())
|
||||||
|
operation.perform();
|
||||||
|
}
|
||||||
|
|
||||||
|
bool MVCommand::parse(IParser::Pos & pos, std::shared_ptr<ASTKeeperQuery> & node, Expected & expected) const
|
||||||
|
{
|
||||||
|
String src_path;
|
||||||
|
if (!parseKeeperPath(pos, expected, src_path))
|
||||||
|
return false;
|
||||||
|
node->args.push_back(std::move(src_path));
|
||||||
|
|
||||||
|
String to_path;
|
||||||
|
if (!parseKeeperPath(pos, expected, to_path))
|
||||||
|
return false;
|
||||||
|
node->args.push_back(std::move(to_path));
|
||||||
|
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
|
void MVCommand::execute(const ASTKeeperQuery * query, KeeperClient * client) const
|
||||||
|
{
|
||||||
|
auto src = client->getAbsolutePath(query->args[0].safeGet<String>());
|
||||||
|
auto dest = client->getAbsolutePath(query->args[1].safeGet<String>());
|
||||||
|
|
||||||
|
CPMVOperation operation(std::move(src), std::move(dest), /*remove_src_=*/true, /*client_=*/client);
|
||||||
|
|
||||||
|
while (!operation.isTryLimitReached() && !operation.isCompleted())
|
||||||
|
operation.perform();
|
||||||
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
@ -266,4 +266,32 @@ class GetAllChildrenNumberCommand : public IKeeperClientCommand
|
|||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
|
class CPCommand : public IKeeperClientCommand
|
||||||
|
{
|
||||||
|
String getName() const override { return "cp"; }
|
||||||
|
|
||||||
|
bool parse(IParser::Pos & pos, std::shared_ptr<ASTKeeperQuery> & node, Expected & expected) const override;
|
||||||
|
|
||||||
|
void execute(const ASTKeeperQuery * query, KeeperClient * client) const override;
|
||||||
|
|
||||||
|
String getHelpMessage() const override
|
||||||
|
{
|
||||||
|
return "{} <src> <dest> -- Copies 'src' node to 'dest' path.";
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
class MVCommand : public IKeeperClientCommand
|
||||||
|
{
|
||||||
|
String getName() const override { return "mv"; }
|
||||||
|
|
||||||
|
bool parse(IParser::Pos & pos, std::shared_ptr<ASTKeeperQuery> & node, Expected & expected) const override;
|
||||||
|
|
||||||
|
void execute(const ASTKeeperQuery * query, KeeperClient * client) const override;
|
||||||
|
|
||||||
|
String getHelpMessage() const override
|
||||||
|
{
|
||||||
|
return "{} <src> <dest> -- Moves 'src' node to the 'dest' path.";
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
}
|
}
|
||||||
|
@ -212,6 +212,8 @@ void KeeperClient::initialize(Poco::Util::Application & /* self */)
|
|||||||
std::make_shared<FourLetterWordCommand>(),
|
std::make_shared<FourLetterWordCommand>(),
|
||||||
std::make_shared<GetDirectChildrenNumberCommand>(),
|
std::make_shared<GetDirectChildrenNumberCommand>(),
|
||||||
std::make_shared<GetAllChildrenNumberCommand>(),
|
std::make_shared<GetAllChildrenNumberCommand>(),
|
||||||
|
std::make_shared<CPCommand>(),
|
||||||
|
std::make_shared<MVCommand>(),
|
||||||
});
|
});
|
||||||
|
|
||||||
String home_path;
|
String home_path;
|
||||||
@ -314,6 +316,7 @@ void KeeperClient::runInteractiveReplxx()
|
|||||||
suggest,
|
suggest,
|
||||||
history_file,
|
history_file,
|
||||||
/* multiline= */ false,
|
/* multiline= */ false,
|
||||||
|
/* ignore_shell_suspend= */ false,
|
||||||
query_extenders,
|
query_extenders,
|
||||||
query_delimiters,
|
query_delimiters,
|
||||||
word_break_characters,
|
word_break_characters,
|
||||||
|
@ -713,7 +713,7 @@ void LocalServer::processConfig()
|
|||||||
if (index_uncompressed_cache_size > max_cache_size)
|
if (index_uncompressed_cache_size > max_cache_size)
|
||||||
{
|
{
|
||||||
index_uncompressed_cache_size = max_cache_size;
|
index_uncompressed_cache_size = max_cache_size;
|
||||||
LOG_INFO(log, "Lowered index uncompressed cache size to {} because the system has limited RAM", formatReadableSizeWithBinarySuffix(uncompressed_cache_size));
|
LOG_INFO(log, "Lowered index uncompressed cache size to {} because the system has limited RAM", formatReadableSizeWithBinarySuffix(index_uncompressed_cache_size));
|
||||||
}
|
}
|
||||||
global_context->setIndexUncompressedCache(index_uncompressed_cache_policy, index_uncompressed_cache_size, index_uncompressed_cache_size_ratio);
|
global_context->setIndexUncompressedCache(index_uncompressed_cache_policy, index_uncompressed_cache_size, index_uncompressed_cache_size_ratio);
|
||||||
|
|
||||||
@ -723,7 +723,7 @@ void LocalServer::processConfig()
|
|||||||
if (index_mark_cache_size > max_cache_size)
|
if (index_mark_cache_size > max_cache_size)
|
||||||
{
|
{
|
||||||
index_mark_cache_size = max_cache_size;
|
index_mark_cache_size = max_cache_size;
|
||||||
LOG_INFO(log, "Lowered index mark cache size to {} because the system has limited RAM", formatReadableSizeWithBinarySuffix(uncompressed_cache_size));
|
LOG_INFO(log, "Lowered index mark cache size to {} because the system has limited RAM", formatReadableSizeWithBinarySuffix(index_mark_cache_size));
|
||||||
}
|
}
|
||||||
global_context->setIndexMarkCache(index_mark_cache_policy, index_mark_cache_size, index_mark_cache_size_ratio);
|
global_context->setIndexMarkCache(index_mark_cache_policy, index_mark_cache_size, index_mark_cache_size_ratio);
|
||||||
|
|
||||||
@ -731,7 +731,7 @@ void LocalServer::processConfig()
|
|||||||
if (mmap_cache_size > max_cache_size)
|
if (mmap_cache_size > max_cache_size)
|
||||||
{
|
{
|
||||||
mmap_cache_size = max_cache_size;
|
mmap_cache_size = max_cache_size;
|
||||||
LOG_INFO(log, "Lowered mmap file cache size to {} because the system has limited RAM", formatReadableSizeWithBinarySuffix(uncompressed_cache_size));
|
LOG_INFO(log, "Lowered mmap file cache size to {} because the system has limited RAM", formatReadableSizeWithBinarySuffix(mmap_cache_size));
|
||||||
}
|
}
|
||||||
global_context->setMMappedFileCache(mmap_cache_size);
|
global_context->setMMappedFileCache(mmap_cache_size);
|
||||||
|
|
||||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user