Merge remote-tracking branch 'origin/master' into pr-skip-index-analysis-on-workers

This commit is contained in:
Igor Nikonov 2024-11-22 23:02:33 +00:00
commit d146c76745
275 changed files with 1715 additions and 853 deletions

View File

@ -16,6 +16,9 @@ Checks: [
'-android-*',
'-boost-use-ranges',
'-modernize-use-ranges',
'-bugprone-assignment-in-if-condition',
'-bugprone-branch-clone',
'-bugprone-easily-swappable-parameters',
@ -28,7 +31,6 @@ Checks: [
'-bugprone-reserved-identifier', # useful but too slow, TODO retry when https://reviews.llvm.org/rG1c282052624f9d0bd273bde0b47b30c96699c6c7 is merged
'-bugprone-unchecked-optional-access',
'-bugprone-crtp-constructor-accessibility',
'-bugprone-suspicious-stringview-data-usage',
'-cert-dcl16-c',
'-cert-dcl37-c',
@ -42,6 +44,8 @@ Checks: [
'-clang-analyzer-optin.performance.Padding',
'-clang-analyzer-cplusplus.PlacementNew',
'-clang-analyzer-unix.Malloc',
'-cppcoreguidelines-*', # impractical in a codebase as large as ClickHouse, also slow
@ -90,6 +94,7 @@ Checks: [
'-misc-non-private-member-variables-in-classes',
'-misc-confusable-identifiers', # useful but slooo
'-misc-use-anonymous-namespace',
'-misc-use-internal-linkage',
'-modernize-avoid-c-arrays',
'-modernize-concat-nested-namespaces',
@ -137,6 +142,7 @@ Checks: [
'-readability-suspicious-call-argument',
'-readability-uppercase-literal-suffix',
'-readability-use-anyofallof',
'-readability-math-missing-parentheses',
'-zircon-*'
]

View File

@ -12,7 +12,7 @@ tests/ci/cancel_and_rerun_workflow_lambda/app.py
- Backward Incompatible Change
- Build/Testing/Packaging Improvement
- Documentation (changelog entry is not required)
- Critical Bug Fix (crash, data loss, RBAC)
- Critical Bug Fix (crash, data loss, RBAC) or LOGICAL_ERROR
- Bug Fix (user-visible misbehavior in an official stable release)
- CI Fix or Improvement (changelog entry is not required)
- Not for changelog (changelog entry is not required)

View File

@ -25,6 +25,11 @@ env:
required: false
default: false
type: boolean
only-docker:
description: 'Run only docker builds (repo-recovery, tests)'
required: false
default: false
type: boolean
dry-run:
description: 'Dry run'
required: false
@ -45,6 +50,11 @@ env:
required: false
default: false
type: boolean
only-docker:
description: 'Run only docker builds (repo-recovery, tests)'
required: false
default: false
type: boolean
dry-run:
description: 'Dry run'
required: false
@ -69,13 +79,13 @@ jobs:
- name: Prepare Release Info
shell: bash
run: |
if [ ${{ inputs.only-repo }} == "true" ]; then
git tag -l ${{ inputs.ref }} || { echo "With only-repo option ref must be a valid release tag"; exit 1; }
if [ ${{ inputs.only-repo }} == "true" ] || [ ${{ inputs.only-docker }} == "true" ]; then
git tag -l ${{ inputs.ref }} || { echo "With only-repo/docker option ref must be a valid release tag"; exit 1; }
fi
python3 ./tests/ci/create_release.py --prepare-release-info \
--ref ${{ inputs.ref }} --release-type ${{ inputs.type }} \
${{ inputs.dry-run == true && '--dry-run' || '' }} \
${{ inputs.only-repo == true && '--skip-tag-check' || '' }}
${{ (inputs.only-repo == true || inputs.only-docker == true) && '--skip-tag-check' || '' }}
echo "::group::Release Info"
python3 -m json.tool /tmp/release_info.json
echo "::endgroup::"
@ -87,31 +97,33 @@ jobs:
echo "COMMIT_SHA=$commit_sha" >> "$GITHUB_ENV"
if [ "$is_latest" == "true" ]; then
echo "DOCKER_TAG_TYPE=release-latest" >> "$GITHUB_ENV"
echo "IS_LATEST=1" >> "$GITHUB_ENV"
else
echo "DOCKER_TAG_TYPE=release" >> "$GITHUB_ENV"
echo "IS_LATEST=0" >> "$GITHUB_ENV"
fi
- name: Download All Release Artifacts
if: ${{ inputs.type == 'patch' }}
if: ${{ inputs.type == 'patch' && ! inputs.only-docker }}
shell: bash
run: |
python3 ./tests/ci/create_release.py --download-packages ${{ inputs.dry-run == true && '--dry-run' || '' }}
- name: Push Git Tag for the Release
if: ${{ ! inputs.only-repo }}
if: ${{ ! inputs.only-repo && ! inputs.only-docker }}
shell: bash
run: |
python3 ./tests/ci/create_release.py --push-release-tag ${{ inputs.dry-run == true && '--dry-run' || '' }}
- name: Push New Release Branch
if: ${{ inputs.type == 'new' && ! inputs.only-repo }}
if: ${{ inputs.type == 'new' && ! inputs.only-repo && ! inputs.only-docker }}
shell: bash
run: |
python3 ./tests/ci/create_release.py --push-new-release-branch ${{ inputs.dry-run == true && '--dry-run' || '' }}
- name: Bump CH Version and Update Contributors' List
if: ${{ ! inputs.only-repo }}
if: ${{ ! inputs.only-repo && ! inputs.only-docker }}
shell: bash
run: |
python3 ./tests/ci/create_release.py --create-bump-version-pr ${{ inputs.dry-run == true && '--dry-run' || '' }}
- name: Bump Docker versions, Changelog, Security
if: ${{ inputs.type == 'patch' && ! inputs.only-repo }}
if: ${{ inputs.type == 'patch' && ! inputs.only-repo && ! inputs.only-docker }}
shell: bash
run: |
python3 ./tests/ci/create_release.py --set-progress-started --progress "update changelog, docker version, security"
@ -135,7 +147,7 @@ jobs:
python3 ./utils/security-generator/generate_security.py > SECURITY.md
git diff HEAD
- name: Create ChangeLog PR
if: ${{ inputs.type == 'patch' && ! inputs.dry-run && ! inputs.only-repo }}
if: ${{ inputs.type == 'patch' && ! inputs.dry-run && ! inputs.only-repo && ! inputs.only-docker }}
uses: peter-evans/create-pull-request@v6
with:
author: "robot-clickhouse <robot-clickhouse@users.noreply.github.com>"
@ -153,65 +165,176 @@ jobs:
### Changelog category (leave one):
- Not for changelog (changelog entry is not required)
- name: Complete previous steps and Restore git state
if: ${{ inputs.type == 'patch' && ! inputs.only-repo }}
if: ${{ inputs.type == 'patch' && ! inputs.only-repo && ! inputs.only-docker }}
shell: bash
run: |
git reset --hard HEAD
git checkout "$GITHUB_REF_NAME"
python3 ./tests/ci/create_release.py --set-progress-completed
- name: Create GH Release
if: ${{ inputs.type == 'patch' && ! inputs.only-repo }}
if: ${{ inputs.type == 'patch' && ! inputs.only-repo && ! inputs.only-docker}}
shell: bash
run: |
python3 ./tests/ci/create_release.py --create-gh-release ${{ inputs.dry-run == true && '--dry-run' || '' }}
- name: Export TGZ Packages
if: ${{ inputs.type == 'patch' }}
if: ${{ inputs.type == 'patch' && ! inputs.only-docker }}
shell: bash
run: |
python3 ./tests/ci/artifactory.py --export-tgz ${{ inputs.dry-run == true && '--dry-run' || '' }}
- name: Test TGZ Packages
if: ${{ inputs.type == 'patch' }}
if: ${{ inputs.type == 'patch' && ! inputs.only-docker }}
shell: bash
run: |
python3 ./tests/ci/artifactory.py --test-tgz ${{ inputs.dry-run == true && '--dry-run' || '' }}
- name: Export RPM Packages
if: ${{ inputs.type == 'patch' }}
if: ${{ inputs.type == 'patch' && ! inputs.only-docker }}
shell: bash
run: |
python3 ./tests/ci/artifactory.py --export-rpm ${{ inputs.dry-run == true && '--dry-run' || '' }}
- name: Test RPM Packages
if: ${{ inputs.type == 'patch' }}
if: ${{ inputs.type == 'patch' && ! inputs.only-docker }}
shell: bash
run: |
python3 ./tests/ci/artifactory.py --test-rpm ${{ inputs.dry-run == true && '--dry-run' || '' }}
- name: Export Debian Packages
if: ${{ inputs.type == 'patch' }}
if: ${{ inputs.type == 'patch' && ! inputs.only-docker }}
shell: bash
run: |
python3 ./tests/ci/artifactory.py --export-debian ${{ inputs.dry-run == true && '--dry-run' || '' }}
- name: Test Debian Packages
if: ${{ inputs.type == 'patch' }}
if: ${{ inputs.type == 'patch' && ! inputs.only-docker }}
shell: bash
run: |
python3 ./tests/ci/artifactory.py --test-debian ${{ inputs.dry-run == true && '--dry-run' || '' }}
- name: Docker clickhouse/clickhouse-server building
if: ${{ inputs.type == 'patch' }}
if: ${{ inputs.type == 'patch' && inputs.dry-run != true }}
shell: bash
run: |
cd "./tests/ci"
python3 ./create_release.py --set-progress-started --progress "docker server release"
export CHECK_NAME="Docker server image"
python3 docker_server.py --tag-type ${{ env.DOCKER_TAG_TYPE }} --version ${{ env.RELEASE_TAG }} --check-name "$CHECK_NAME" --sha ${{ env.COMMIT_SHA }} ${{ ! inputs.dry-run && '--push' || '' }}
export DOCKER_IMAGE="clickhouse/clickhouse-server"
# We must use docker file from the release commit
git checkout "${{ env.RELEASE_TAG }}"
python3 ./version_helper.py --export > /tmp/version.sh
. /tmp/version.sh
if [[ $CLICKHOUSE_VERSION_STRING =~ ^[0-9]+\.[0-9]+\.[0-9]+\.[0-9]+$ ]]; then
echo "ClickHouse version: $CLICKHOUSE_VERSION_STRING"
else
echo "Invalid version string: $CLICKHOUSE_VERSION_STRING"
exit 1
fi
CLICKHOUSE_VERSION_MINOR=${CLICKHOUSE_VERSION_STRING%.*}
CLICKHOUSE_VERSION_MAJOR=${CLICKHOUSE_VERSION_MINOR%.*}
# Define build configurations
configs=(
"ubuntu:../../docker/server/Dockerfile.ubuntu"
"alpine:../../docker/server/Dockerfile.alpine"
)
for config in "${configs[@]}"; do
# Split the config into variant and Dockerfile path
variant=${config%%:*}
dockerfile=${config##*:}
VERSION_SUFFIX=$([ "$variant" = "ubuntu" ] && echo "" || echo "-$variant")
LABEL_VERSION="${CLICKHOUSE_VERSION_STRING}${VERSION_SUFFIX}"
TAGS=(
"--tag=${DOCKER_IMAGE}:${CLICKHOUSE_VERSION_STRING}${VERSION_SUFFIX}"
"--tag=${DOCKER_IMAGE}:${CLICKHOUSE_VERSION_MINOR}${VERSION_SUFFIX}"
"--tag=${DOCKER_IMAGE}:${CLICKHOUSE_VERSION_MAJOR}${VERSION_SUFFIX}"
)
if [ "$IS_LATEST" = "1" ]; then
TAGS+=("--tag=${DOCKER_IMAGE}:latest${VERSION_SUFFIX}")
fi
echo "Following tags will be created: ${TAGS[*]}"
# shellcheck disable=SC2086,SC2048
docker buildx build \
--platform=linux/amd64,linux/arm64 \
--output=type=registry \
--label=com.clickhouse.build.version="$LABEL_VERSION" \
${TAGS[*]} \
--build-arg=VERSION="$CLICKHOUSE_VERSION_STRING" \
--progress=plain \
--file="$dockerfile" \
../../docker/server
done
git checkout -
python3 ./create_release.py --set-progress-completed
- name: Docker clickhouse/clickhouse-keeper building
if: ${{ inputs.type == 'patch' }}
if: ${{ inputs.type == 'patch' && inputs.dry-run != true }}
shell: bash
run: |
cd "./tests/ci"
python3 ./create_release.py --set-progress-started --progress "docker keeper release"
export CHECK_NAME="Docker keeper image"
python3 docker_server.py --tag-type ${{ env.DOCKER_TAG_TYPE }} --version ${{ env.RELEASE_TAG }} --check-name "$CHECK_NAME" --sha ${{ env.COMMIT_SHA }} ${{ ! inputs.dry-run && '--push' || '' }}
export DOCKER_IMAGE="clickhouse/clickhouse-keeper"
# We must use docker file from the release commit
git checkout "${{ env.RELEASE_TAG }}"
python3 ./version_helper.py --export > /tmp/version.sh
. /tmp/version.sh
if [[ $CLICKHOUSE_VERSION_STRING =~ ^[0-9]+\.[0-9]+\.[0-9]+\.[0-9]+$ ]]; then
echo "ClickHouse version: $CLICKHOUSE_VERSION_STRING"
else
echo "Invalid version string: $CLICKHOUSE_VERSION_STRING"
exit 1
fi
CLICKHOUSE_VERSION_MINOR=${CLICKHOUSE_VERSION_STRING%.*}
CLICKHOUSE_VERSION_MAJOR=${CLICKHOUSE_VERSION_MINOR%.*}
# Define build configurations
configs=(
"ubuntu:../../docker/keeper/Dockerfile.ubuntu"
"alpine:../../docker/keeper/Dockerfile.alpine"
)
for config in "${configs[@]}"; do
# Split the config into variant and Dockerfile path
variant=${config%%:*}
dockerfile=${config##*:}
VERSION_SUFFIX=$([ "$variant" = "ubuntu" ] && echo "" || echo "-$variant")
LABEL_VERSION="${CLICKHOUSE_VERSION_STRING}${VERSION_SUFFIX}"
TAGS=(
"--tag=${DOCKER_IMAGE}:${CLICKHOUSE_VERSION_STRING}${VERSION_SUFFIX}"
"--tag=${DOCKER_IMAGE}:${CLICKHOUSE_VERSION_MINOR}${VERSION_SUFFIX}"
"--tag=${DOCKER_IMAGE}:${CLICKHOUSE_VERSION_MAJOR}${VERSION_SUFFIX}"
)
if [ "$IS_LATEST" = "1" ]; then
TAGS+=("--tag=${DOCKER_IMAGE}:latest${VERSION_SUFFIX}")
fi
echo "Following tags will be created: ${TAGS[*]}"
# shellcheck disable=SC2086,SC2048
docker buildx build \
--platform=linux/amd64,linux/arm64 \
--output=type=registry \
--label=com.clickhoghuse.build.version="$LABEL_VERSION" \
${TAGS[*]} \
--build-arg=VERSION="$CLICKHOUSE_VERSION_STRING" \
--progress=plain \
--file="$dockerfile" \
../../docker/keeper
done
git checkout -
python3 ./create_release.py --set-progress-completed
# check out back if previous steps failed
- name: Checkout back
if: ${{ ! cancelled() }}
shell: bash
run: |
git checkout ${{ github.ref }}
- name: Update release info. Merge created PRs
shell: bash
run: |

View File

@ -1,4 +1,5 @@
### Table of Contents
**[ClickHouse release v24.11, 2024-11-26](#2411)**<br/>
**[ClickHouse release v24.10, 2024-10-31](#2410)**<br/>
**[ClickHouse release v24.9, 2024-09-26](#249)**<br/>
**[ClickHouse release v24.8 LTS, 2024-08-20](#248)**<br/>
@ -13,6 +14,95 @@
# 2024 Changelog
### <a id="2411"></a> ClickHouse release 24.11, 2024-11-26
#### Backward Incompatible Change
* Remove system tables `generate_series` and `generateSeries`. They were added by mistake here: [#59390](https://github.com/ClickHouse/ClickHouse/issues/59390). [#71091](https://github.com/ClickHouse/ClickHouse/pull/71091) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
* Remove `StorageExternalDistributed`. Closes [#70600](https://github.com/ClickHouse/ClickHouse/issues/70600).[#71176](https://github.com/ClickHouse/ClickHouse/pull/71176) ([flynn](https://github.com/ucasfl)).
* The table engines Kafka, NATS and RabbitMQ are now covered by their own grants in the `SOURCES` hierarchy. Add grants to any non-default database users that create tables with these engine types. [#71250](https://github.com/ClickHouse/ClickHouse/pull/71250) ([Christoph Wurm](https://github.com/cwurm)).
* Check the full mutation query before executing it (including subqueries). This prevents accidentally running an invalid query and building up dead mutations that block valid mutations. [#71300](https://github.com/ClickHouse/ClickHouse/pull/71300) ([Christoph Wurm](https://github.com/cwurm)).
* Rename filesystem cache setting `skip_download_if_exceeds_query_cache` to `filesystem_cache_skip_download_if_exceeds_per_query_cache_write_limit`. [#71578](https://github.com/ClickHouse/ClickHouse/pull/71578) ([Kseniia Sumarokova](https://github.com/kssenii)).
* Remove support for `Enum` as well as `UInt128` and `UInt256` arguments in `deltaSumTimestamp`. Remove support for `Int8`, `UInt8`, `Int16`, and `UInt16` of the second ("timestamp") argument of `deltaSumTimestamp`. [#71790](https://github.com/ClickHouse/ClickHouse/pull/71790) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
* When retrieving data directly from a dictionary using Dictionary storage, dictionary table function, or direct SELECT from the dictionary itself, it is now enough to have `SELECT` permission or `dictGet` permission for the dictionary. This aligns with previous attempts to prevent ACL bypasses: https://github.com/ClickHouse/ClickHouse/pull/57362 and https://github.com/ClickHouse/ClickHouse/pull/65359. It also makes the latter one backward compatible. [#72051](https://github.com/ClickHouse/ClickHouse/pull/72051) ([Nikita Mikhaylov](https://github.com/nikitamikhaylov)).
#### Experimental feature
* Implement `allowed_feature_tier` as a global switch to disable all experimental / beta features. [#71841](https://github.com/ClickHouse/ClickHouse/pull/71841) ([Raúl Marín](https://github.com/Algunenano)).
* Fix possible error `No such file or directory` due to unescaped special symbols in files for JSON subcolumns. [#71182](https://github.com/ClickHouse/ClickHouse/pull/71182) ([Pavel Kruglov](https://github.com/Avogar)).
* Support alter from String to JSON. This PR also changes the serialization of JSON and Dynamic types to new version V2. Old version V1 can be still used by enabling setting `merge_tree_use_v1_object_and_dynamic_serialization` (can be used during upgrade to be able to rollback the version without issues). [#70442](https://github.com/ClickHouse/ClickHouse/pull/70442) ([Pavel Kruglov](https://github.com/Avogar)).
* Implement simple CAST from Map/Tuple/Object to new JSON through serialization/deserialization from JSON string. [#71320](https://github.com/ClickHouse/ClickHouse/pull/71320) ([Pavel Kruglov](https://github.com/Avogar)).
* Don't allow Variant/Dynamic types in ORDER BY/GROUP BY/PARTITION BY/PRIMARY KEY by default because it may lead to unexpected results. [#69731](https://github.com/ClickHouse/ClickHouse/pull/69731) ([Pavel Kruglov](https://github.com/Avogar)).
* Forbid Dynamic/Variant types in min/max functions to avoid confusion. [#71761](https://github.com/ClickHouse/ClickHouse/pull/71761) ([Pavel Kruglov](https://github.com/Avogar)).
#### New Feature
* A new data type, `BFloat16`, represents 16-bit floating point numbers with 8-bit exponent, sign, and 7-bit mantissa. This closes [#44206](https://github.com/ClickHouse/ClickHouse/issues/44206). This closes [#49937](https://github.com/ClickHouse/ClickHouse/issues/49937). [#64712](https://github.com/ClickHouse/ClickHouse/pull/64712) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
* Add `CHECK GRANT` query to check whether the current user/role has been granted the specific privilege and whether the corresponding table/column exists in the memory. [#68885](https://github.com/ClickHouse/ClickHouse/pull/68885) ([Unalian](https://github.com/Unalian)).
* Added SQL syntax to describe workload and resource management. https://clickhouse.com/docs/en/operations/workload-scheduling. [#69187](https://github.com/ClickHouse/ClickHouse/pull/69187) ([Sergei Trifonov](https://github.com/serxa)).
* Added server setting `async_load_system_database` that allows the server to start with not fully loaded system database. This helps to start ClickHouse faster if there are many system tables. [#69847](https://github.com/ClickHouse/ClickHouse/pull/69847) ([Sergei Trifonov](https://github.com/serxa)).
* Allow each authentication method to have its own expiration date, remove from user entity. [#70090](https://github.com/ClickHouse/ClickHouse/pull/70090) ([Arthur Passos](https://github.com/arthurpassos)).
* Push external user roles from query originator to other nodes in cluster. Helpful when only originator has access to the external authenticator (like LDAP). [#70332](https://github.com/ClickHouse/ClickHouse/pull/70332) ([Andrey Zvonov](https://github.com/zvonand)).
* Added a new header type for S3 endpoints for user authentication (`access_header`). This allows to get some access header with the lowest priority, which will be overwritten with `access_key_id` from any other source (for example, a table schema or a named collection). [#71011](https://github.com/ClickHouse/ClickHouse/pull/71011) ([MikhailBurdukov](https://github.com/MikhailBurdukov)).
* Initial implementation of settings tiers. [#71145](https://github.com/ClickHouse/ClickHouse/pull/71145) ([Raúl Marín](https://github.com/Algunenano)).
* Add support for staleness clause in order by with fill operator. [#71151](https://github.com/ClickHouse/ClickHouse/pull/71151) ([Mikhail Artemenko](https://github.com/Michicosun)).
* Added aliases `anyRespectNulls`, `firstValueRespectNulls`, and `anyValueRespectNulls` for aggregation function `any`. Also added aliases `anyLastRespectNulls` and `lastValueRespectNulls` for aggregation function `anyLast`. This allows using more natural camel-case-only syntax rather than mixed camel-case/underscore syntax, for example: `SELECT anyLastRespectNullsStateIf` instead of `anyLast_respect_nullsStateIf`. [#71403](https://github.com/ClickHouse/ClickHouse/pull/71403) ([Peter Nguyen](https://github.com/petern48)).
* Added the configuration `date_time_utc` parameter, enabling JSON log formatting to support UTC date-time in RFC 3339/ISO8601 format. [#71560](https://github.com/ClickHouse/ClickHouse/pull/71560) ([Ali](https://github.com/xogoodnow)).
* Optimized memory usage for values of index granularity if granularity is constant for part. Added an ability to always select constant granularity for part (setting `use_const_adaptive_granularity`), which helps to ensure that it is always optimized in memory. It helps in large workloads (trillions of rows in shared storage) to avoid constantly growing memory usage by metadata (values of index granularity) of data parts. [#71786](https://github.com/ClickHouse/ClickHouse/pull/71786) ([Anton Popov](https://github.com/CurtizJ)).
* Add `iceberg[S3;HDFS;Azure]Cluster`, `deltaLakeCluster`, `hudiCluster` table functions. [#72045](https://github.com/ClickHouse/ClickHouse/pull/72045) ([Mikhail Artemenko](https://github.com/Michicosun)).
#### Performance Improvement
* Now we won't copy input blocks columns for `join_algorithm='parallel_hash'` when distribute them between threads for parallel processing. [#67782](https://github.com/ClickHouse/ClickHouse/pull/67782) ([Nikita Taranov](https://github.com/nickitat)).
* Optimized `Replacing` merge algorithm for non intersecting parts. [#70977](https://github.com/ClickHouse/ClickHouse/pull/70977) ([Anton Popov](https://github.com/CurtizJ)).
* Do not list detached parts from readonly and write-once disks for metrics and system.detached_parts. [#71086](https://github.com/ClickHouse/ClickHouse/pull/71086) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
* Do not calculate heavy asynchronous metrics by default. The feature was introduced in [#40332](https://github.com/ClickHouse/ClickHouse/issues/40332), but it isn't good to have a heavy background job that is needed for only a single customer. [#71087](https://github.com/ClickHouse/ClickHouse/pull/71087) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
* Improve the performance and accuracy of system.query_metric_log collection interval by reducing the critical region. [#71473](https://github.com/ClickHouse/ClickHouse/pull/71473) ([Pablo Marcos](https://github.com/pamarcos)).
#### Improvement
* Higher-order functions with constant arrays and constant captured arguments will return constants. [#58400](https://github.com/ClickHouse/ClickHouse/pull/58400) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
* Read-in-order optimization via generating virtual rows, so less data would be read during merge sort especially useful when multiple parts exist. [#62125](https://github.com/ClickHouse/ClickHouse/pull/62125) ([Shichao Jin](https://github.com/jsc0218)).
* Query plan step names (`EXPLAIN PLAN json=1`) and pipeline processor names (`EXPLAIN PIPELINE compact=0,graph=1`) now have a unique id as a suffix. This allows to match processors profiler output and OpenTelemetry traces with explain output. [#63518](https://github.com/ClickHouse/ClickHouse/pull/63518) ([qhsong](https://github.com/qhsong)).
* Added option to check object exists after writing to Azure Blob Storage, this is controlled by setting `check_objects_after_upload`. [#64847](https://github.com/ClickHouse/ClickHouse/pull/64847) ([Smita Kulkarni](https://github.com/SmitaRKulkarni)).
* Use `Atomic` database by default in `clickhouse-local`. Address items 1 and 5 from [#50647](https://github.com/ClickHouse/ClickHouse/issues/50647). Closes [#44817](https://github.com/ClickHouse/ClickHouse/issues/44817). [#68024](https://github.com/ClickHouse/ClickHouse/pull/68024) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
* Exceptions break the HTTP protocol in order to alert the client about error. [#68800](https://github.com/ClickHouse/ClickHouse/pull/68800) ([Sema Checherinda](https://github.com/CheSema)).
* Report running DDLWorker hosts by creating replica_dir and mark replicas active in DDLWorker. [#69658](https://github.com/ClickHouse/ClickHouse/pull/69658) ([tuanpach](https://github.com/tuanpach)).
* Wait only on active replicas for database ON CLUSTER queries if distributed_ddl_output_mode is set to be *_only_active. [#69660](https://github.com/ClickHouse/ClickHouse/pull/69660) ([tuanpach](https://github.com/tuanpach)).
* Better error-handling and cancellation of `ON CLUSTER` backups and restores: - If a backup or restore fails on one host then it'll be cancelled on other hosts automatically - No weird errors must be produced because some hosts failed while other hosts continued their work - If a backup or restore is cancelled on one host then it'll be cancelled on other hosts automatically - Fix issues with `test_disallow_concurrency` - now disabling of concurrency must work better - Backups and restores now are much more resistant to ZooKeeper disconnects. [#70027](https://github.com/ClickHouse/ClickHouse/pull/70027) ([Vitaly Baranov](https://github.com/vitlibar)).
* Enable `parallel_replicas_local_plan` by default. Building a full-fledged local plan on the query initiator improves parallel replicas performance with less resource consumption, provides opportunities to apply more query optimizations. [#70171](https://github.com/ClickHouse/ClickHouse/pull/70171) ([Igor Nikonov](https://github.com/devcrafter)).
* Add ability to set user/password in http_handlers (for `dynamic_query_handler`/`predefined_query_handler`). [#70725](https://github.com/ClickHouse/ClickHouse/pull/70725) ([Azat Khuzhin](https://github.com/azat)).
* Support `ALTER TABLE ... MODIFY/RESET SETTING ...` for certain settings in storage S3Queue. [#70811](https://github.com/ClickHouse/ClickHouse/pull/70811) ([Kseniia Sumarokova](https://github.com/kssenii)).
* Do not call the object storage API when listing directories, as this may be cost-inefficient. Instead, store the list of filenames in the memory. The trade-offs are increased initial load time and memory required to store filenames. [#70823](https://github.com/ClickHouse/ClickHouse/pull/70823) ([Julia Kartseva](https://github.com/jkartseva)).
* Add `--threads` parameter to `clickhouse-compressor`, which allows to compress data in parallel. [#70860](https://github.com/ClickHouse/ClickHouse/pull/70860) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
* Added the ability to reload client certificates in the same way as the procedure for reloading server certificates. [#70997](https://github.com/ClickHouse/ClickHouse/pull/70997) ([Roman Antonov](https://github.com/Romeo58rus)).
* Refactored internal structure of files which work with DataLake Storages. [#71012](https://github.com/ClickHouse/ClickHouse/pull/71012) ([Daniil Ivanik](https://github.com/divanik)).
* Make the Replxx client history size configurable. [#71014](https://github.com/ClickHouse/ClickHouse/pull/71014) ([Jiří Kozlovský](https://github.com/jirislav)).
* Added a setting `prewarm_mark_cache` which enables loading of marks to mark cache on inserts, merges, fetches of parts and on startup of the table. [#71053](https://github.com/ClickHouse/ClickHouse/pull/71053) ([Anton Popov](https://github.com/CurtizJ)).
* Boolean support for parquet native reader. [#71055](https://github.com/ClickHouse/ClickHouse/pull/71055) ([Arthur Passos](https://github.com/arthurpassos)).
* Retry more errors when interacting with S3, such as "Malformed message". [#71088](https://github.com/ClickHouse/ClickHouse/pull/71088) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
* Lower log level for some messages about S3. [#71090](https://github.com/ClickHouse/ClickHouse/pull/71090) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
* Support write hdfs files with space. [#71105](https://github.com/ClickHouse/ClickHouse/pull/71105) ([exmy](https://github.com/exmy)).
* Added settings limiting the number of replicated tables, dictionaries and views. [#71179](https://github.com/ClickHouse/ClickHouse/pull/71179) ([Kirill](https://github.com/kirillgarbar)).
* Use `AWS_CONTAINER_AUTHORIZATION_TOKEN_FILE` instead of `AWS_CONTAINER_AUTHORIZATION_TOKEN` if former is available. Fixes [#71074](https://github.com/ClickHouse/ClickHouse/issues/71074). [#71269](https://github.com/ClickHouse/ClickHouse/pull/71269) ([Konstantin Bogdanov](https://github.com/thevar1able)).
* Remove the metadata_version ZooKeeper node creation from RMT restarting thread. The only scenario where we need to create this node is when the user updated from a version earlier than 20.4 straight to one later than 24.10. ClickHouse does not support upgrades that span more than a year, so we should throw an exception and ask the user to update gradually, instead of creating the node. [#71385](https://github.com/ClickHouse/ClickHouse/pull/71385) ([Miсhael Stetsyuk](https://github.com/mstetsyuk)).
* Add per host dashboards `Overview (host)` and `Cloud overview (host)` to advanced dashboard. [#71422](https://github.com/ClickHouse/ClickHouse/pull/71422) ([alesapin](https://github.com/alesapin)).
* The methods `removeObject` and `removeObjects` are not idempotent. When retries happen due to network errors, the result could be `object not found` because it has been deleted at previous attempts. [#71529](https://github.com/ClickHouse/ClickHouse/pull/71529) ([Sema Checherinda](https://github.com/CheSema)).
* Added new functions `parseDateTime64`, `parseDateTime64OrNull` and `parseDateTime64OrZero`. Compared to the existing function `parseDateTime` (and variants), they return a value of type `DateTime64` instead of `DateTime`. [#71581](https://github.com/ClickHouse/ClickHouse/pull/71581) ([kevinyhzou](https://github.com/KevinyhZou)).
* Allow using clickhouse with a file argument as --queries-file. [#71589](https://github.com/ClickHouse/ClickHouse/pull/71589) ([Raúl Marín](https://github.com/Algunenano)).
* Shrink to fit index_granularity array in memory to reduce memory footprint for MergeTree table engines family. [#71595](https://github.com/ClickHouse/ClickHouse/pull/71595) ([alesapin](https://github.com/alesapin)).
* `clickhouse-local` uses implicit SELECT by default, which allows to use it as a calculator. Improve the syntax highlighting for the implicit SELECT mode. [#71620](https://github.com/ClickHouse/ClickHouse/pull/71620) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
* The command line applications will highlight syntax even for multi-statements. [#71622](https://github.com/ClickHouse/ClickHouse/pull/71622) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
* Command-line applications will return non-zero exit codes on errors. In previous versions, the `disks` application returned zero on errors, and other applications returned zero for errors 256 (`PARTITION_ALREADY_EXISTS`) and 512 (`SET_NON_GRANTED_ROLE`). [#71623](https://github.com/ClickHouse/ClickHouse/pull/71623) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
* When user/group is given as ID, the `clickhouse su` fails. This patch fixes it to accept `UID:GID` as well. ### Documentation entry for user-facing changes. [#71626](https://github.com/ClickHouse/ClickHouse/pull/71626) ([Mikhail f. Shiryaev](https://github.com/Felixoid)).
* The `Vertical` format (which is also activated when you end your query with `\G`) gets the features of Pretty formats, such as: - highlighting thousand groups in numbers; - printing a readable number tip. [#71630](https://github.com/ClickHouse/ClickHouse/pull/71630) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
* Allow to disable memory buffer increase for filesystem cache via setting `filesystem_cache_prefer_bigger_buffer_size`. [#71640](https://github.com/ClickHouse/ClickHouse/pull/71640) ([Kseniia Sumarokova](https://github.com/kssenii)).
* Add a separate setting `background_download_max_file_segment_size` for background download max file segment size in filesystem cache. [#71648](https://github.com/ClickHouse/ClickHouse/pull/71648) ([Kseniia Sumarokova](https://github.com/kssenii)).
* Changes the default value of `enable_http_compression` from 0 to 1. Closes [#71591](https://github.com/ClickHouse/ClickHouse/issues/71591). [#71774](https://github.com/ClickHouse/ClickHouse/pull/71774) ([Peter Nguyen](https://github.com/petern48)).
* Slightly better JSON type parsing: if current block for the JSON path contains values of several types, try to choose the best type by trying types in special best-effort order. [#71785](https://github.com/ClickHouse/ClickHouse/pull/71785) ([Pavel Kruglov](https://github.com/Avogar)).
* Previously reading from `system.asynchronous_metrics` would wait for concurrent update to finish. This can take long time if system is under heavy load. With this change the previously collected values can always be read. [#71798](https://github.com/ClickHouse/ClickHouse/pull/71798) ([Alexander Gololobov](https://github.com/davenger)).
* Set `polling_max_timeout_ms` to 10 minutes, `polling_backoff_ms` to 30 seconds. [#71817](https://github.com/ClickHouse/ClickHouse/pull/71817) ([Kseniia Sumarokova](https://github.com/kssenii)).
* Turn-off filesystem cache setting `boundary_alignment` for non-disk read. [#71827](https://github.com/ClickHouse/ClickHouse/pull/71827) ([Kseniia Sumarokova](https://github.com/kssenii)).
* Update `HostResolver` 3 times in a `history` period. [#71863](https://github.com/ClickHouse/ClickHouse/pull/71863) ([Sema Checherinda](https://github.com/CheSema)).
* Queries like 'SELECT * FROM t LIMIT 1' used to load part indexes even though they were not used. [#71866](https://github.com/ClickHouse/ClickHouse/pull/71866) ([Alexander Gololobov](https://github.com/davenger)).
* Allow_reorder_prewhere_conditions is on by default with old compatibility settings. [#71867](https://github.com/ClickHouse/ClickHouse/pull/71867) ([Raúl Marín](https://github.com/Algunenano)).
* On the advanced dashboard HTML page added a dropdown selector for the dashboard from `system.dashboards` table. [#72081](https://github.com/ClickHouse/ClickHouse/pull/72081) ([Sergei Trifonov](https://github.com/serxa)).
### <a id="2410"></a> ClickHouse release 24.10, 2024-10-31
#### Backward Incompatible Change
@ -344,7 +434,7 @@
* The system table `text_log` is enabled by default. This is fully compatible with previous versions, but you may notice subtly increased disk usage on the local disk (this system table takes a tiny amount of disk space). [#67428](https://github.com/ClickHouse/ClickHouse/pull/67428) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
* In previous versions, `arrayWithConstant` can be slow if asked to generate very large arrays. In the new version, it is limited to 1 GB per array. This closes [#32754](https://github.com/ClickHouse/ClickHouse/issues/32754). [#67741](https://github.com/ClickHouse/ClickHouse/pull/67741) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
* Fix REPLACE modifier formatting (forbid omitting brackets). [#67774](https://github.com/ClickHouse/ClickHouse/pull/67774) ([Azat Khuzhin](https://github.com/azat)).
* Backported in [#68349](https://github.com/ClickHouse/ClickHouse/issues/68349): Reimplement `Dynamic` type. Now when the limit of dynamic data types is reached new types are not casted to String but stored in a special data structure in binary format with binary encoded data type. Now any type ever inserted into `Dynamic` column can be read from it as subcolumn. [#68132](https://github.com/ClickHouse/ClickHouse/pull/68132) ([Kruglov Pavel](https://github.com/Avogar)).
* Backported in [#68349](https://github.com/ClickHouse/ClickHouse/issues/68349): Reimplement `Dynamic` type. Now when the limit of dynamic data types is reached new types are not cast to String but stored in a special data structure in binary format with binary encoded data type. Now any type ever inserted into `Dynamic` column can be read from it as subcolumn. [#68132](https://github.com/ClickHouse/ClickHouse/pull/68132) ([Kruglov Pavel](https://github.com/Avogar)).
#### New Feature
* Added a new `MergeTree` setting `deduplicate_merge_projection_mode` to control the projections during merges (for specific engines) and `OPTIMIZE DEDUPLICATE` query. Supported options: `throw` (throw an exception in case the projection is not fully supported for *MergeTree engine), `drop` (remove projection during merge if it can't be merged itself consistently) and `rebuild` (rebuild projection from scratch, which is a heavy operation). [#66672](https://github.com/ClickHouse/ClickHouse/pull/66672) ([jsc0218](https://github.com/jsc0218)).

View File

@ -11,9 +11,9 @@
*
* In contrast to std::bit_cast can cast types of different width.
*
* Note: for signed types of narrower size, the casted result is zero-extended
* Note: for signed types of narrower size, the cast result is zero-extended
* instead of sign-extended as with regular static_cast.
* For example, -1 Int8 (represented as 0xFF) bit_casted to UInt64
* For example, -1 Int8 (represented as 0xFF) bit_cast to UInt64
* gives 255 (represented as 0x00000000000000FF) instead of 0xFFFFFFFFFFFFFFFF
*/
template <typename To, typename From>

View File

@ -30,8 +30,6 @@ SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*/
#include <cmath>
#include <cstdint>
#include <cstdio>
double preciseExp10(double x)
{

View File

@ -337,7 +337,7 @@ struct integer<Bits, Signed>::_impl
/** Here we have to use strict comparison.
* The max_int is 2^64 - 1.
* When casted to floating point type, it will be rounded to the closest representable number,
* When cast to a floating point type, it will be rounded to the closest representable number,
* which is 2^64.
* But 2^64 is not representable in uint64_t,
* so the maximum representable number will be strictly less.

View File

@ -4,8 +4,9 @@ FROM ubuntu:22.04
# ARG for quick switch to a given ubuntu mirror
ARG apt_archive="http://archive.ubuntu.com"
RUN sed -i "s|http://archive.ubuntu.com|$apt_archive|g" /etc/apt/sources.list
ARG LLVM_APT_VERSION="1:19.1.4~*"
ENV DEBIAN_FRONTEND=noninteractive LLVM_VERSION=18
ENV DEBIAN_FRONTEND=noninteractive LLVM_VERSION=19
RUN apt-get update \
&& apt-get install \
@ -26,7 +27,7 @@ RUN apt-get update \
&& echo "deb https://apt.llvm.org/${CODENAME}/ llvm-toolchain-${CODENAME}-${LLVM_VERSION} main" >> \
/etc/apt/sources.list \
&& apt-get update \
&& apt-get install --yes --no-install-recommends --verbose-versions llvm-${LLVM_VERSION} \
&& apt-get install --yes --no-install-recommends --verbose-versions llvm-${LLVM_VERSION}>=${LLVM_APT_VERSION} \
&& apt-get clean \
&& rm -rf /var/lib/apt/lists/* /var/cache/debconf /tmp/*
@ -72,10 +73,6 @@ RUN ln -s /usr/bin/lld-${LLVM_VERSION} /usr/bin/ld.lld
# https://salsa.debian.org/pkg-llvm-team/llvm-toolchain/-/commit/992e52c0b156a5ba9c6a8a54f8c4857ddd3d371d
RUN sed -i '/_IMPORT_CHECK_FILES_FOR_\(mlir-\|llvm-bolt\|merge-fdata\|MLIR\)/ {s|^|#|}' /usr/lib/llvm-${LLVM_VERSION}/lib/cmake/llvm/LLVMExports-*.cmake
# LLVM changes paths for compiler-rt libraries. For some reason clang-18.1.8 cannot catch up libraries from default install path.
# It's very dirty workaround, better to build compiler and LLVM ourself and use it. Details: https://github.com/llvm/llvm-project/issues/95792
RUN test ! -d /usr/lib/llvm-18/lib/clang/18/lib/x86_64-pc-linux-gnu || ln -s /usr/lib/llvm-18/lib/clang/18/lib/x86_64-pc-linux-gnu /usr/lib/llvm-18/lib/clang/18/lib/x86_64-unknown-linux-gnu
ARG TARGETARCH
ARG SCCACHE_VERSION=v0.7.7
ENV SCCACHE_IGNORE_SERVER_IO_ERROR=1

View File

@ -1316,7 +1316,6 @@ bools
boringssl
boundingRatio
bozerkins
broadcasted
brotli
bson
bsoneachrow
@ -1342,7 +1341,6 @@ cardinalities
cardinality
cartesian
cassandra
casted
catboost
catboostEvaluate
categoricalInformationValue

View File

@ -2,11 +2,11 @@
# NOTE: VERSION_REVISION has nothing common with DBMS_TCP_PROTOCOL_VERSION,
# only DBMS_TCP_PROTOCOL_VERSION should be incremented on protocol changes.
SET(VERSION_REVISION 54492)
SET(VERSION_REVISION 54493)
SET(VERSION_MAJOR 24)
SET(VERSION_MINOR 11)
SET(VERSION_MINOR 12)
SET(VERSION_PATCH 1)
SET(VERSION_GITHASH c82cf25b3e5864bcc153cbe45adb8c6527e1ec6e)
SET(VERSION_DESCRIBE v24.11.1.1-testing)
SET(VERSION_STRING 24.11.1.1)
SET(VERSION_GITHASH e4c9b022237992620c966d032cee495da8d0b5ac)
SET(VERSION_DESCRIBE v24.12.1.1-testing)
SET(VERSION_STRING 24.12.1.1)
# end of autochange

View File

@ -5,14 +5,14 @@ if (ENABLE_CLANG_TIDY)
find_program (CLANG_TIDY_CACHE_PATH NAMES "clang-tidy-cache")
if (CLANG_TIDY_CACHE_PATH)
find_program (_CLANG_TIDY_PATH NAMES "clang-tidy-18" "clang-tidy-17" "clang-tidy-16" "clang-tidy")
find_program (_CLANG_TIDY_PATH NAMES "clang-tidy-19" "clang-tidy-18" "clang-tidy-17" "clang-tidy")
# Why do we use ';' here?
# It's a cmake black magic: https://cmake.org/cmake/help/latest/prop_tgt/LANG_CLANG_TIDY.html#prop_tgt:%3CLANG%3E_CLANG_TIDY
# The CLANG_TIDY_PATH is passed to CMAKE_CXX_CLANG_TIDY, which follows CXX_CLANG_TIDY syntax.
set (CLANG_TIDY_PATH "${CLANG_TIDY_CACHE_PATH};${_CLANG_TIDY_PATH}" CACHE STRING "A combined command to run clang-tidy with caching wrapper")
else ()
find_program (CLANG_TIDY_PATH NAMES "clang-tidy-18" "clang-tidy-17" "clang-tidy-16" "clang-tidy")
find_program (CLANG_TIDY_PATH NAMES "clang-tidy-19" "clang-tidy-18" "clang-tidy-17" "clang-tidy")
endif ()
if (CLANG_TIDY_PATH)

View File

@ -17,9 +17,4 @@ set (CMAKE_C_FLAGS "${CMAKE_C_FLAGS} --gcc-toolchain=${TOOLCHAIN_PATH}")
set (CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} --gcc-toolchain=${TOOLCHAIN_PATH}")
set (CMAKE_ASM_FLAGS "${CMAKE_ASM_FLAGS} --gcc-toolchain=${TOOLCHAIN_PATH}")
set (CMAKE_EXE_LINKER_FLAGS "${CMAKE_EXE_LINKER_FLAGS} -fuse-ld=bfd")
# Currently, lld does not work with the error:
# ld.lld: error: section size decrease is too large
# But GNU BinUtils work.
set (LINKER_NAME "riscv64-linux-gnu-ld.bfd" CACHE STRING "Linker name" FORCE)
set (CMAKE_EXE_LINKER_FLAGS "${CMAKE_EXE_LINKER_FLAGS} -fuse-ld=lld")

View File

@ -57,6 +57,7 @@ endif()
SET(AWS_SDK_DIR "${ClickHouse_SOURCE_DIR}/contrib/aws")
SET(AWS_SDK_CORE_DIR "${AWS_SDK_DIR}/src/aws-cpp-sdk-core")
SET(AWS_SDK_S3_DIR "${AWS_SDK_DIR}/generated/src/aws-cpp-sdk-s3")
SET(AWS_SDK_KMS_DIR "${AWS_SDK_DIR}/generated/src/aws-cpp-sdk-kms")
SET(AWS_AUTH_DIR "${ClickHouse_SOURCE_DIR}/contrib/aws-c-auth")
SET(AWS_CAL_DIR "${ClickHouse_SOURCE_DIR}/contrib/aws-c-cal")
@ -145,6 +146,17 @@ list(APPEND AWS_SOURCES ${AWS_SDK_S3_SRC})
list(APPEND AWS_PUBLIC_INCLUDES "${AWS_SDK_S3_DIR}/include/")
if(CLICKHOUSE_CLOUD)
# aws-cpp-sdk-kms
file(GLOB AWS_SDK_KMS_SRC
"${AWS_SDK_KMS_DIR}/source/*.cpp"
"${AWS_SDK_KMS_DIR}/source/model/*.cpp"
)
list(APPEND AWS_SOURCES ${AWS_SDK_KMS_SRC})
list(APPEND AWS_PUBLIC_INCLUDES "${AWS_SDK_KMS_DIR}/include/")
endif()
# aws-c-auth
file(GLOB AWS_AUTH_SRC
"${AWS_AUTH_DIR}/source/*.c"

View File

@ -17,3 +17,4 @@ git config submodule."contrib/protobuf".update '!../sparse-checkout/update-proto
git config submodule."contrib/postgres".update '!../sparse-checkout/update-postgres.sh'
git config submodule."contrib/libxml2".update '!../sparse-checkout/update-libxml2.sh'
git config submodule."contrib/brotli".update '!../sparse-checkout/update-brotli.sh'
git config submodule."contrib/google-cloud-cpp".update '!../sparse-checkout/update-google-cloud-cpp.sh'

View File

@ -7,6 +7,7 @@ echo '/*' > $FILES_TO_CHECKOUT
echo '!/*/*' >> $FILES_TO_CHECKOUT
echo '/src/aws-cpp-sdk-core/*' >> $FILES_TO_CHECKOUT
echo '/generated/src/aws-cpp-sdk-s3/*' >> $FILES_TO_CHECKOUT
echo '/generated/src/aws-cpp-sdk-aws/*' >> $FILES_TO_CHECKOUT
git config core.sparsecheckout true
git checkout $1

View File

@ -0,0 +1,18 @@
#!/bin/sh
echo "Using sparse checkout for google-cloud-cpp"
FILES_TO_CHECKOUT=$(git rev-parse --git-dir)/info/sparse-checkout
echo '!/*' > $FILES_TO_CHECKOUT
echo '/google/cloud/*.cc' >> $FILES_TO_CHECKOUT
echo '/google/cloud/*.h' >> $FILES_TO_CHECKOUT
echo '/google/cloud/internal/*' >> $FILES_TO_CHECKOUT
echo '/google/cloud/grpc_utils/*' >> $FILES_TO_CHECKOUT
echo '/google/cloud/kms/*' >> $FILES_TO_CHECKOUT
echo '/cmake/*' >> $FILES_TO_CHECKOUT
echo '/protos/*' >> $FILES_TO_CHECKOUT
echo '/external/googleapis' >> $FILES_TO_CHECKOUT
git config core.sparsecheckout true
git checkout $1
git read-tree -mu HEAD

View File

@ -38,7 +38,7 @@ RUN arch=${TARGETARCH:-amd64} \
# lts / testing / prestable / etc
ARG REPO_CHANNEL="stable"
ARG REPOSITORY="https://packages.clickhouse.com/tgz/${REPO_CHANNEL}"
ARG VERSION="24.10.2.80"
ARG VERSION="24.10.3.21"
ARG PACKAGES="clickhouse-keeper"
ARG DIRECT_DOWNLOAD_URLS=""

View File

@ -3,10 +3,10 @@ compilers and build settings. Correctly configured Docker daemon is single depen
Usage:
Build deb package with `clang-18` in `debug` mode:
Build deb package with `clang-19` in `debug` mode:
```
$ mkdir deb/test_output
$ ./packager --output-dir deb/test_output/ --package-type deb --compiler=clang-18 --debug-build
$ ./packager --output-dir deb/test_output/ --package-type deb --compiler=clang-19 --debug-build
$ ls -l deb/test_output
-rw-r--r-- 1 root root 3730 clickhouse-client_22.2.2+debug_all.deb
-rw-r--r-- 1 root root 84221888 clickhouse-common-static_22.2.2+debug_amd64.deb
@ -17,11 +17,11 @@ $ ls -l deb/test_output
```
Build ClickHouse binary with `clang-18` and `address` sanitizer in `relwithdebuginfo`
Build ClickHouse binary with `clang-19` and `address` sanitizer in `relwithdebuginfo`
mode:
```
$ mkdir $HOME/some_clickhouse
$ ./packager --output-dir=$HOME/some_clickhouse --package-type binary --compiler=clang-18 --sanitizer=address
$ ./packager --output-dir=$HOME/some_clickhouse --package-type binary --compiler=clang-19 --sanitizer=address
$ ls -l $HOME/some_clickhouse
-rwxr-xr-x 1 root root 787061952 clickhouse
lrwxrwxrwx 1 root root 10 clickhouse-benchmark -> clickhouse

View File

@ -407,20 +407,20 @@ def parse_args() -> argparse.Namespace:
parser.add_argument(
"--compiler",
choices=(
"clang-18",
"clang-18-darwin",
"clang-18-darwin-aarch64",
"clang-18-aarch64",
"clang-18-aarch64-v80compat",
"clang-18-ppc64le",
"clang-18-riscv64",
"clang-18-s390x",
"clang-18-loongarch64",
"clang-18-amd64-compat",
"clang-18-amd64-musl",
"clang-18-freebsd",
"clang-19",
"clang-19-darwin",
"clang-19-darwin-aarch64",
"clang-19-aarch64",
"clang-19-aarch64-v80compat",
"clang-19-ppc64le",
"clang-19-riscv64",
"clang-19-s390x",
"clang-19-loongarch64",
"clang-19-amd64-compat",
"clang-19-amd64-musl",
"clang-19-freebsd",
),
default="clang-18",
default="clang-19",
help="a compiler to use",
)
parser.add_argument(

View File

@ -35,7 +35,7 @@ RUN arch=${TARGETARCH:-amd64} \
# lts / testing / prestable / etc
ARG REPO_CHANNEL="stable"
ARG REPOSITORY="https://packages.clickhouse.com/tgz/${REPO_CHANNEL}"
ARG VERSION="24.10.2.80"
ARG VERSION="24.10.3.21"
ARG PACKAGES="clickhouse-client clickhouse-server clickhouse-common-static"
ARG DIRECT_DOWNLOAD_URLS=""

View File

@ -28,7 +28,7 @@ RUN sed -i "s|http://archive.ubuntu.com|${apt_archive}|g" /etc/apt/sources.list
ARG REPO_CHANNEL="stable"
ARG REPOSITORY="deb [signed-by=/usr/share/keyrings/clickhouse-keyring.gpg] https://packages.clickhouse.com/deb ${REPO_CHANNEL} main"
ARG VERSION="24.10.2.80"
ARG VERSION="24.10.3.21"
ARG PACKAGES="clickhouse-client clickhouse-server clickhouse-common-static"
#docker-official-library:off

View File

@ -40,10 +40,6 @@ RUN ln -s /usr/bin/lld-${LLVM_VERSION} /usr/bin/ld.lld
# https://salsa.debian.org/pkg-llvm-team/llvm-toolchain/-/commit/992e52c0b156a5ba9c6a8a54f8c4857ddd3d371d
RUN sed -i '/_IMPORT_CHECK_FILES_FOR_\(mlir-\|llvm-bolt\|merge-fdata\|MLIR\)/ {s|^|#|}' /usr/lib/llvm-${LLVM_VERSION}/lib/cmake/llvm/LLVMExports-*.cmake
# LLVM changes paths for compiler-rt libraries. For some reason clang-18.1.8 cannot catch up libraries from default install path.
# It's very dirty workaround, better to build compiler and LLVM ourself and use it. Details: https://github.com/llvm/llvm-project/issues/95792
RUN test ! -d /usr/lib/llvm-18/lib/clang/18/lib/x86_64-pc-linux-gnu || ln -s /usr/lib/llvm-18/lib/clang/18/lib/x86_64-pc-linux-gnu /usr/lib/llvm-18/lib/clang/18/lib/x86_64-unknown-linux-gnu
ARG CCACHE_VERSION=4.6.1
RUN mkdir /tmp/ccache \
&& cd /tmp/ccache \

View File

@ -27,7 +27,6 @@ pandas==1.5.3
pip==24.1.1
pipdeptree==2.23.0
pyparsing==2.4.7
python-apt==2.4.0+ubuntu3
python-dateutil==2.9.0.post0
pytz==2024.1
requests==2.32.3

View File

@ -18,7 +18,6 @@ pip==24.1.1
pipdeptree==2.23.0
PyJWT==2.3.0
pyparsing==2.4.7
python-apt==2.4.0+ubuntu3
SecretStorage==3.3.1
setuptools==59.6.0
six==1.16.0

View File

@ -17,7 +17,7 @@ stage=${stage:-}
script_dir="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )"
echo "$script_dir"
repo_dir=ch
BINARY_TO_DOWNLOAD=${BINARY_TO_DOWNLOAD:="clang-18_debug_none_unsplitted_disable_False_binary"}
BINARY_TO_DOWNLOAD=${BINARY_TO_DOWNLOAD:="clang-19_debug_none_unsplitted_disable_False_binary"}
BINARY_URL_TO_DOWNLOAD=${BINARY_URL_TO_DOWNLOAD:="https://clickhouse-builds.s3.amazonaws.com/$PR_TO_TEST/$SHA_TO_TEST/clickhouse_build_check/$BINARY_TO_DOWNLOAD/clickhouse"}
function git_clone_with_retry

View File

@ -17,7 +17,6 @@ pipdeptree==2.23.0
pycurl==7.45.3
PyJWT==2.3.0
pyparsing==2.4.7
python-apt==2.4.0+ubuntu3
SecretStorage==3.3.1
setuptools==59.6.0
six==1.16.0

View File

@ -2,7 +2,7 @@
set -euo pipefail
CLICKHOUSE_PACKAGE=${CLICKHOUSE_PACKAGE:="https://clickhouse-builds.s3.amazonaws.com/$PR_TO_TEST/$SHA_TO_TEST/clickhouse_build_check/clang-18_relwithdebuginfo_none_unsplitted_disable_False_binary/clickhouse"}
CLICKHOUSE_PACKAGE=${CLICKHOUSE_PACKAGE:="https://clickhouse-builds.s3.amazonaws.com/$PR_TO_TEST/$SHA_TO_TEST/clickhouse_build_check/clang-19_relwithdebuginfo_none_unsplitted_disable_False_binary/clickhouse"}
CLICKHOUSE_REPO_PATH=${CLICKHOUSE_REPO_PATH:=""}

View File

@ -18,7 +18,6 @@ pip==24.1.1
pipdeptree==2.23.0
PyJWT==2.3.0
pyparsing==2.4.7
python-apt==2.4.0+ubuntu3
SecretStorage==3.3.1
setuptools==59.6.0
six==1.16.0

View File

@ -19,7 +19,6 @@ pipdeptree==2.23.0
Pygments==2.11.2
PyJWT==2.3.0
pyparsing==2.4.7
python-apt==2.4.0+ubuntu3
pytz==2023.4
PyYAML==6.0.1
scipy==1.12.0

View File

@ -2,7 +2,7 @@
set -euo pipefail
CLICKHOUSE_PACKAGE=${CLICKHOUSE_PACKAGE:="https://clickhouse-builds.s3.amazonaws.com/$PR_TO_TEST/$SHA_TO_TEST/clickhouse_build_check/clang-18_relwithdebuginfo_none_unsplitted_disable_False_binary/clickhouse"}
CLICKHOUSE_PACKAGE=${CLICKHOUSE_PACKAGE:="https://clickhouse-builds.s3.amazonaws.com/$PR_TO_TEST/$SHA_TO_TEST/clickhouse_build_check/clang-19_relwithdebuginfo_none_unsplitted_disable_False_binary/clickhouse"}
CLICKHOUSE_REPO_PATH=${CLICKHOUSE_REPO_PATH:=""}

View File

@ -20,7 +20,6 @@ pipdeptree==2.23.0
PyJWT==2.3.0
pyodbc==5.1.0
pyparsing==2.4.7
python-apt==2.4.0+ubuntu3
SecretStorage==3.3.1
setuptools==59.6.0
six==1.16.0

View File

@ -17,7 +17,6 @@ pip==24.1.1
pipdeptree==2.23.0
PyJWT==2.3.0
pyparsing==2.4.7
python-apt==2.4.0+ubuntu3
pytz==2024.1
PyYAML==6.0.1
SecretStorage==3.3.1

View File

@ -6,7 +6,7 @@ set -e
set -u
set -o pipefail
BINARY_TO_DOWNLOAD=${BINARY_TO_DOWNLOAD:="clang-18_debug_none_unsplitted_disable_False_binary"}
BINARY_TO_DOWNLOAD=${BINARY_TO_DOWNLOAD:="clang-19_debug_none_unsplitted_disable_False_binary"}
BINARY_URL_TO_DOWNLOAD=${BINARY_URL_TO_DOWNLOAD:="https://clickhouse-builds.s3.amazonaws.com/$PR_TO_TEST/$SHA_TO_TEST/clickhouse_build_check/$BINARY_TO_DOWNLOAD/clickhouse"}
function wget_with_retry

View File

@ -34,7 +34,6 @@ pyarrow==15.0.0
pyasn1==0.4.8
PyJWT==2.3.0
pyparsing==2.4.7
python-apt==2.4.0+ubuntu3
python-dateutil==2.8.1
pytz==2024.1
PyYAML==6.0.1

View File

@ -4,8 +4,9 @@ FROM ubuntu:22.04
# ARG for quick switch to a given ubuntu mirror
ARG apt_archive="http://archive.ubuntu.com"
RUN sed -i "s|http://archive.ubuntu.com|$apt_archive|g" /etc/apt/sources.list
ARG LLVM_APT_VERSION="1:19.1.4~*"
ENV DEBIAN_FRONTEND=noninteractive LLVM_VERSION=18
ENV DEBIAN_FRONTEND=noninteractive LLVM_VERSION=19
RUN apt-get update \
&& apt-get install \
@ -28,7 +29,7 @@ RUN apt-get update \
&& echo "deb https://apt.llvm.org/${CODENAME}/ llvm-toolchain-${CODENAME}-${LLVM_VERSION} main" >> \
/etc/apt/sources.list \
&& apt-get update \
&& apt-get install --yes --no-install-recommends --verbose-versions llvm-${LLVM_VERSION} \
&& apt-get install --yes --no-install-recommends --verbose-versions llvm-${LLVM_VERSION}>=${LLVM_APT_VERSION} \
&& apt-get clean \
&& rm -rf /var/lib/apt/lists/* /var/cache/debconf /tmp/*

View File

@ -56,7 +56,7 @@ sidebar_label: 2023
#### Improvement
* This is the second part of Kusto Query Language dialect support. [Phase 1 implementation ](https://github.com/ClickHouse/ClickHouse/pull/37961) has been merged. [#42510](https://github.com/ClickHouse/ClickHouse/pull/42510) ([larryluogit](https://github.com/larryluogit)).
* Op processors IDs are raw ptrs casted to UInt64. Print it in a prettier manner:. [#48852](https://github.com/ClickHouse/ClickHouse/pull/48852) ([Vlad Seliverstov](https://github.com/behebot)).
* Op processors IDs are raw ptrs cast to UInt64. Print it in a prettier manner:. [#48852](https://github.com/ClickHouse/ClickHouse/pull/48852) ([Vlad Seliverstov](https://github.com/behebot)).
* Creating a direct dictionary with a lifetime field set will be rejected at create time. Fixes: [#27861](https://github.com/ClickHouse/ClickHouse/issues/27861). [#49043](https://github.com/ClickHouse/ClickHouse/pull/49043) ([Rory Crispin](https://github.com/RoryCrispin)).
* Allow parameters in queries with partitions like `ALTER TABLE t DROP PARTITION`. Closes [#49449](https://github.com/ClickHouse/ClickHouse/issues/49449). [#49516](https://github.com/ClickHouse/ClickHouse/pull/49516) ([Nikolay Degterinsky](https://github.com/evillique)).
* 1.Refactor the code about zookeeper_connection 2.Add a new column xid for zookeeper_connection. [#50702](https://github.com/ClickHouse/ClickHouse/pull/50702) ([helifu](https://github.com/helifu)).

View File

@ -73,7 +73,7 @@ sidebar_label: 2023
* ``` sumIf(123, cond) -> 123 * countIf(1, cond) sum(if(cond, 123, 0)) -> 123 * countIf(cond) sum(if(cond, 0, 123)) -> 123 * countIf(not(cond)) ```. [#44728](https://github.com/ClickHouse/ClickHouse/pull/44728) ([李扬](https://github.com/taiyang-li)).
* Optimize behavior for a replica delay api logic in case the replica is read-only. [#45148](https://github.com/ClickHouse/ClickHouse/pull/45148) ([mateng915](https://github.com/mateng0915)).
* Introduce gwp-asan implemented by llvm runtime. This closes [#27039](https://github.com/ClickHouse/ClickHouse/issues/27039). [#45226](https://github.com/ClickHouse/ClickHouse/pull/45226) ([Han Fei](https://github.com/hanfei1991)).
* ... in the case key casted from uint64 to uint32, small impact for little endian platform but key value becomes zero in big endian case. ### Documentation entry for user-facing changes. [#45375](https://github.com/ClickHouse/ClickHouse/pull/45375) ([Suzy Wang](https://github.com/SuzyWangIBMer)).
* ... in the case key cast from uint64 to uint32, small impact for little endian platform but key value becomes zero in big endian case. ### Documentation entry for user-facing changes. [#45375](https://github.com/ClickHouse/ClickHouse/pull/45375) ([Suzy Wang](https://github.com/SuzyWangIBMer)).
* Mark Gorilla compression on columns of non-Float* type as suspicious. [#45376](https://github.com/ClickHouse/ClickHouse/pull/45376) ([Robert Schulze](https://github.com/rschu1ze)).
* Allow removing redundant aggregation keys with constants (e.g., simplify `GROUP BY a, a + 1` to `GROUP BY a`). [#45415](https://github.com/ClickHouse/ClickHouse/pull/45415) ([Dmitry Novik](https://github.com/novikd)).
* Show replica name that is executing a merge in the postpone_reason. [#45458](https://github.com/ClickHouse/ClickHouse/pull/45458) ([Frank Chen](https://github.com/FrankChen021)).

View File

@ -15,7 +15,7 @@ sidebar_label: 2024
* The system table `text_log` is enabled by default. This is fully compatible with previous versions, but you may notice subtly increased disk usage on the local disk (this system table takes a tiny amount of disk space). [#67428](https://github.com/ClickHouse/ClickHouse/pull/67428) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
* In previous versions, `arrayWithConstant` can be slow if asked to generate very large arrays. In the new version, it is limited to 1 GB per array. This closes [#32754](https://github.com/ClickHouse/ClickHouse/issues/32754). [#67741](https://github.com/ClickHouse/ClickHouse/pull/67741) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
* Fix REPLACE modifier formatting (forbid omitting brackets). [#67774](https://github.com/ClickHouse/ClickHouse/pull/67774) ([Azat Khuzhin](https://github.com/azat)).
* Backported in [#68349](https://github.com/ClickHouse/ClickHouse/issues/68349): Reimplement Dynamic type. Now when the limit of dynamic data types is reached new types are not casted to String but stored in a special data structure in binary format with binary encoded data type. Now any type ever inserted into Dynamic column can be read from it as subcolumn. [#68132](https://github.com/ClickHouse/ClickHouse/pull/68132) ([Kruglov Pavel](https://github.com/Avogar)).
* Backported in [#68349](https://github.com/ClickHouse/ClickHouse/issues/68349): Reimplement Dynamic type. Now when the limit of dynamic data types is reached new types are not cast to String but stored in a special data structure in binary format with binary encoded data type. Now any type ever inserted into Dynamic column can be read from it as subcolumn. [#68132](https://github.com/ClickHouse/ClickHouse/pull/68132) ([Kruglov Pavel](https://github.com/Avogar)).
#### New Feature
* Add new experimental Kafka storage engine to store offsets in Keeper instead of relying on committing them to Kafka. [#57625](https://github.com/ClickHouse/ClickHouse/pull/57625) ([János Benjamin Antal](https://github.com/antaljanosbenjamin)).

View File

@ -0,0 +1,24 @@
---
sidebar_position: 1
sidebar_label: 2024
---
# 2024 Changelog
### ClickHouse release v24.8.8.17-lts (81036bd118b) FIXME as compared to v24.8.7.41-lts (e28553d4f2b)
#### Improvement
* Backported in [#72060](https://github.com/ClickHouse/ClickHouse/issues/72060): When retrieving data directly from a dictionary using Dictionary storage, dictionary table function, or direct SELECT from the dictionary itself, it is now enough to have `SELECT` permission or `dictGet` permission for the dictionary. This aligns with previous attempts to prevent ACL bypasses: https://github.com/ClickHouse/ClickHouse/pull/57362 and https://github.com/ClickHouse/ClickHouse/pull/65359. It also makes the latter one backward compatible. [#72051](https://github.com/ClickHouse/ClickHouse/pull/72051) ([Nikita Mikhaylov](https://github.com/nikitamikhaylov)).
#### Bug Fix (user-visible misbehavior in an official stable release)
* Backported in [#71981](https://github.com/ClickHouse/ClickHouse/issues/71981): After [this issue](https://github.com/ClickHouse/ClickHouse/pull/59946#issuecomment-1943653197) there are quite a few table replicas in production such that their `metadata_version` node value is both equal to `0` and is different from the respective table's `metadata` node version. This leads to `alter` queries failing on such replicas. [#69274](https://github.com/ClickHouse/ClickHouse/pull/69274) ([Miсhael Stetsyuk](https://github.com/mstetsyuk)).
* Backported in [#72142](https://github.com/ClickHouse/ClickHouse/issues/72142): Acquiring zero-copy shared lock before moving a part to zero-copy disk to prevent possible data loss if Keeper is unavailable. [#71845](https://github.com/ClickHouse/ClickHouse/pull/71845) ([Aleksei Filatov](https://github.com/aalexfvk)).
* Backported in [#72038](https://github.com/ClickHouse/ClickHouse/issues/72038): Fix exception for toDayOfWeek on WHERE condition with primary key of DateTime64 type. [#71849](https://github.com/ClickHouse/ClickHouse/pull/71849) ([Yakov Olkhovskiy](https://github.com/yakov-olkhovskiy)).
* Backported in [#72032](https://github.com/ClickHouse/ClickHouse/issues/72032): Fix `Illegal type` error for `MergeTree` tables with binary monotonic function in `ORDER BY` when the first argument is constant. Fixes [#71941](https://github.com/ClickHouse/ClickHouse/issues/71941). [#71966](https://github.com/ClickHouse/ClickHouse/pull/71966) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
* Backported in [#72155](https://github.com/ClickHouse/ClickHouse/issues/72155): Allow only SELECT queries in EXPLAIN AST used inside subquery. Other types of queries lead to logical error: 'Bad cast from type DB::ASTCreateQuery to DB::ASTSelectWithUnionQuery' or `Inconsistent AST formatting`. [#71982](https://github.com/ClickHouse/ClickHouse/pull/71982) ([Pavel Kruglov](https://github.com/Avogar)).
* Backported in [#72114](https://github.com/ClickHouse/ClickHouse/issues/72114): Fix formatting of `MOVE PARTITION ... TO TABLE ...` alter commands when `format_alter_commands_with_parentheses` is enabled. [#72080](https://github.com/ClickHouse/ClickHouse/pull/72080) ([János Benjamin Antal](https://github.com/antaljanosbenjamin)).
#### NOT FOR CHANGELOG / INSIGNIFICANT
* Backported in [#72067](https://github.com/ClickHouse/ClickHouse/issues/72067): Fix client syntax highlighting that was broken in https://github.com/ClickHouse/ClickHouse/pull/71949. [#72049](https://github.com/ClickHouse/ClickHouse/pull/72049) ([Nikolay Degterinsky](https://github.com/evillique)).

View File

@ -10,7 +10,7 @@ sidebar_label: 2024
#### Backward Incompatible Change
* Allow to write `SETTINGS` before `FORMAT` in a chain of queries with `UNION` when subqueries are inside parentheses. This closes [#39712](https://github.com/ClickHouse/ClickHouse/issues/39712). Change the behavior when a query has the SETTINGS clause specified twice in a sequence. The closest SETTINGS clause will have a preference for the corresponding subquery. In the previous versions, the outermost SETTINGS clause could take a preference over the inner one. [#60197](https://github.com/ClickHouse/ClickHouse/pull/60197) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
* Do not allow explicitly specifying UUID when creating a table in Replicated database. Also, do not allow explicitly specifying ZooKeeper path and replica name for *MergeTree tables in Replicated databases. [#66104](https://github.com/ClickHouse/ClickHouse/pull/66104) ([Alexander Tokmakov](https://github.com/tavplubix)).
* Reimplement Dynamic type. Now when the limit of dynamic data types is reached new types are not casted to String but stored in a special data structure in binary format with binary encoded data type. Now any type ever inserted into Dynamic column can be read from it as subcolumn. [#68132](https://github.com/ClickHouse/ClickHouse/pull/68132) ([Pavel Kruglov](https://github.com/Avogar)).
* Reimplement Dynamic type. Now when the limit of dynamic data types is reached new types are not cast to String but stored in a special data structure in binary format with binary encoded data type. Now any type ever inserted into Dynamic column can be read from it as subcolumn. [#68132](https://github.com/ClickHouse/ClickHouse/pull/68132) ([Pavel Kruglov](https://github.com/Avogar)).
* Expressions like `a[b].c` are supported for named tuples, as well as named subscripts from arbitrary expressions, e.g., `expr().name`. This is useful for processing JSON. This closes [#54965](https://github.com/ClickHouse/ClickHouse/issues/54965). In previous versions, an expression of form `expr().name` was parsed as `tupleElement(expr(), name)`, and the query analyzer was searching for a column `name` rather than for the corresponding tuple element; while in the new version, it is changed to `tupleElement(expr(), 'name')`. In most cases, the previous version was not working, but it is possible to imagine a very unusual scenario when this change could lead to incompatibility: if you stored names of tuple elements in a column or an alias, that was named differently than the tuple element's name: `SELECT 'b' AS a, CAST([tuple(123)] AS 'Array(Tuple(b UInt8))') AS t, t[1].a`. It is very unlikely that you used such queries, but we still have to mark this change as potentially backward incompatible. [#68435](https://github.com/ClickHouse/ClickHouse/pull/68435) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
* When the setting `print_pretty_type_names` is enabled, it will print `Tuple` data type in a pretty form in `SHOW CREATE TABLE` statements, `formatQuery` function, and in the interactive mode in `clickhouse-client` and `clickhouse-local`. In previous versions, this setting was only applied to `DESCRIBE` queries and `toTypeName`. This closes [#65753](https://github.com/ClickHouse/ClickHouse/issues/65753). [#68492](https://github.com/ClickHouse/ClickHouse/pull/68492) ([Alexey Milovidov](https://github.com/alexey-milovidov)).

View File

@ -11,7 +11,7 @@ This is for the case when you have Linux machine and want to use it to build `cl
The cross-build for LoongArch64 is based on the [Build instructions](../development/build.md), follow them first.
## Install Clang-18
## Install Clang-19
Follow the instructions from https://apt.llvm.org/ for your Ubuntu or Debian setup or do
```
@ -21,11 +21,11 @@ sudo bash -c "$(wget -O - https://apt.llvm.org/llvm.sh)"
## Build ClickHouse {#build-clickhouse}
The llvm version required for building must be greater than or equal to 18.1.0.
The llvm version required for building must be greater than or equal to 19.1.0.
``` bash
cd ClickHouse
mkdir build-loongarch64
CC=clang-18 CXX=clang++-18 cmake . -Bbuild-loongarch64 -G Ninja -DCMAKE_TOOLCHAIN_FILE=cmake/linux/toolchain-loongarch64.cmake
CC=clang-19 CXX=clang++-19 cmake . -Bbuild-loongarch64 -G Ninja -DCMAKE_TOOLCHAIN_FILE=cmake/linux/toolchain-loongarch64.cmake
ninja -C build-loongarch64
```

View File

@ -13,14 +13,14 @@ The cross-build for macOS is based on the [Build instructions](../development/bu
The following sections provide a walk-through for building ClickHouse for `x86_64` macOS. If youre targeting ARM architecture, simply substitute all occurrences of `x86_64` with `aarch64`. For example, replace `x86_64-apple-darwin` with `aarch64-apple-darwin` throughout the steps.
## Install clang-18
## Install clang-19
Follow the instructions from https://apt.llvm.org/ for your Ubuntu or Debian setup.
For example the commands for Bionic are like:
``` bash
sudo echo "deb [trusted=yes] http://apt.llvm.org/bionic/ llvm-toolchain-bionic-17 main" >> /etc/apt/sources.list
sudo apt-get install clang-18
sudo apt-get install clang-19
```
## Install Cross-Compilation Toolset {#install-cross-compilation-toolset}
@ -59,7 +59,7 @@ curl -L 'https://github.com/phracker/MacOSX-SDKs/releases/download/11.3/MacOSX11
cd ClickHouse
mkdir build-darwin
cd build-darwin
CC=clang-18 CXX=clang++-18 cmake -DCMAKE_AR:FILEPATH=${CCTOOLS}/bin/x86_64-apple-darwin-ar -DCMAKE_INSTALL_NAME_TOOL=${CCTOOLS}/bin/x86_64-apple-darwin-install_name_tool -DCMAKE_RANLIB:FILEPATH=${CCTOOLS}/bin/x86_64-apple-darwin-ranlib -DLINKER_NAME=${CCTOOLS}/bin/x86_64-apple-darwin-ld -DCMAKE_TOOLCHAIN_FILE=cmake/darwin/toolchain-x86_64.cmake ..
CC=clang-19 CXX=clang++-19 cmake -DCMAKE_AR:FILEPATH=${CCTOOLS}/bin/x86_64-apple-darwin-ar -DCMAKE_INSTALL_NAME_TOOL=${CCTOOLS}/bin/x86_64-apple-darwin-install_name_tool -DCMAKE_RANLIB:FILEPATH=${CCTOOLS}/bin/x86_64-apple-darwin-ranlib -DLINKER_NAME=${CCTOOLS}/bin/x86_64-apple-darwin-ld -DCMAKE_TOOLCHAIN_FILE=cmake/darwin/toolchain-x86_64.cmake ..
ninja
```

View File

@ -11,7 +11,7 @@ This is for the case when you have Linux machine and want to use it to build `cl
The cross-build for RISC-V 64 is based on the [Build instructions](../development/build.md), follow them first.
## Install Clang-18
## Install Clang-19
Follow the instructions from https://apt.llvm.org/ for your Ubuntu or Debian setup or do
```
@ -23,7 +23,7 @@ sudo bash -c "$(wget -O - https://apt.llvm.org/llvm.sh)"
``` bash
cd ClickHouse
mkdir build-riscv64
CC=clang-18 CXX=clang++-18 cmake . -Bbuild-riscv64 -G Ninja -DCMAKE_TOOLCHAIN_FILE=cmake/linux/toolchain-riscv64.cmake -DGLIBC_COMPATIBILITY=OFF -DENABLE_LDAP=OFF -DOPENSSL_NO_ASM=ON -DENABLE_JEMALLOC=ON -DENABLE_PARQUET=OFF -DENABLE_GRPC=OFF -DENABLE_HDFS=OFF -DENABLE_MYSQL=OFF
CC=clang-19 CXX=clang++-19 cmake . -Bbuild-riscv64 -G Ninja -DCMAKE_TOOLCHAIN_FILE=cmake/linux/toolchain-riscv64.cmake -DGLIBC_COMPATIBILITY=OFF -DENABLE_LDAP=OFF -DOPENSSL_NO_ASM=ON -DENABLE_JEMALLOC=ON -DENABLE_PARQUET=OFF -DENABLE_GRPC=OFF -DENABLE_HDFS=OFF -DENABLE_MYSQL=OFF
ninja -C build-riscv64
```

View File

@ -54,8 +54,8 @@ to see what version you have installed before setting this environment variable.
:::
``` bash
export CC=clang-18
export CXX=clang++-18
export CC=clang-19
export CXX=clang++-19
```
### Install Rust compiler
@ -109,7 +109,7 @@ The build requires the following components:
- Git (used to checkout the sources, not needed for the build)
- CMake 3.20 or newer
- Compiler: clang-18 or newer
- Compiler: clang-19 or newer
- Linker: lld-17 or newer
- Ninja
- Yasm

View File

@ -156,7 +156,7 @@ Builds ClickHouse in various configurations for use in further steps. You have t
### Report Details
- **Compiler**: `clang-18`, optionally with the name of a target platform
- **Compiler**: `clang-19`, optionally with the name of a target platform
- **Build type**: `Debug` or `RelWithDebInfo` (cmake).
- **Sanitizer**: `none` (without sanitizers), `address` (ASan), `memory` (MSan), `undefined` (UBSan), or `thread` (TSan).
- **Status**: `success` or `fail`
@ -180,7 +180,7 @@ Performs static analysis and code style checks using `clang-tidy`. The report is
There is a convenience `packager` script that runs the clang-tidy build in docker
```sh
mkdir build_tidy
./docker/packager/packager --output-dir=./build_tidy --package-type=binary --compiler=clang-18 --debug-build --clang-tidy
./docker/packager/packager --output-dir=./build_tidy --package-type=binary --compiler=clang-19 --debug-build --clang-tidy
```

View File

@ -121,7 +121,7 @@ While inside the `build` directory, configure your build by running CMake. Befor
export CC=clang CXX=clang++
cmake ..
If you installed clang using the automatic installation script above, also specify the version of clang installed in the first command, e.g. `export CC=clang-18 CXX=clang++-18`. The clang version will be in the script output.
If you installed clang using the automatic installation script above, also specify the version of clang installed in the first command, e.g. `export CC=clang-19 CXX=clang++-19`. The clang version will be in the script output.
The `CC` variable specifies the compiler for C (short for C Compiler), and `CXX` variable instructs which C++ compiler is to be used for building.

View File

@ -64,7 +64,7 @@ Result:
## Converting Tuple to Map
Values of type `Tuple()` can be casted to values of type `Map()` using function [CAST](../../sql-reference/functions/type-conversion-functions.md#type_conversion_function-cast):
Values of type `Tuple()` can be cast to values of type `Map()` using function [CAST](../../sql-reference/functions/type-conversion-functions.md#type_conversion_function-cast):
**Example**

View File

@ -187,7 +187,7 @@ select json.a.g.:Float64, dynamicType(json.a.g), json.d.:Date, dynamicType(json.
└─────────────────────┴───────────────────────┴────────────────┴─────────────────────┘
```
`Dynamic` subcolumns can be casted to any data type. In this case the exception will be thrown if internal type inside `Dynamic` cannot be casted to the requested type:
`Dynamic` subcolumns can be cast to any data type. In this case the exception will be thrown if internal type inside `Dynamic` cannot be cast to the requested type:
```sql
select json.a.g::UInt64 as uint FROM test;

View File

@ -8,7 +8,7 @@ sidebar_label: Arithmetic
Arithmetic functions work for any two operands of type `UInt8`, `UInt16`, `UInt32`, `UInt64`, `Int8`, `Int16`, `Int32`, `Int64`, `Float32`, or `Float64`.
Before performing the operation, both operands are casted to the result type. The result type is determined as follows (unless specified
Before performing the operation, both operands are cast to the result type. The result type is determined as follows (unless specified
differently in the function documentation below):
- If both operands are up to 32 bits wide, the size of the result type will be the size of the next bigger type following the bigger of the
two operands (integer size promotion). For example, `UInt8 + UInt16 = UInt32` or `Float32 * Float32 = Float64`.

View File

@ -468,7 +468,7 @@ SELECT JSONLength('{"a": "hello", "b": [-100, 200.0, 300]}') = 2
### JSONType
Return the type of a JSON value. If the value does not exist, `Null` will be returned.
Return the type of a JSON value. If the value does not exist, `Null=0` will be returned (not usual [Null](../data-types/nullable.md), but `Null=0` of `Enum8('Null' = 0, 'String' = 34,...`). .
**Syntax**
@ -488,7 +488,7 @@ JSONType(json [, indices_or_keys]...)
**Returned value**
- Returns the type of a JSON value as a string, otherwise if the value doesn't exists it returns `Null`. [String](../data-types/string.md).
- Returns the type of a JSON value as a string, otherwise if the value doesn't exists it returns `Null=0`. [Enum](../data-types/enum.md).
**Examples**
@ -520,7 +520,7 @@ JSONExtractUInt(json [, indices_or_keys]...)
**Returned value**
- Returns a UInt value if it exists, otherwise it returns `Null`. [UInt64](../data-types/string.md).
- Returns a UInt value if it exists, otherwise it returns `0`. [UInt64](../data-types/int-uint.md).
**Examples**
@ -560,7 +560,7 @@ JSONExtractInt(json [, indices_or_keys]...)
**Returned value**
- Returns an Int value if it exists, otherwise it returns `Null`. [Int64](../data-types/int-uint.md).
- Returns an Int value if it exists, otherwise it returns `0`. [Int64](../data-types/int-uint.md).
**Examples**
@ -600,7 +600,7 @@ JSONExtractFloat(json [, indices_or_keys]...)
**Returned value**
- Returns an Float value if it exists, otherwise it returns `Null`. [Float64](../data-types/float.md).
- Returns an Float value if it exists, otherwise it returns `0`. [Float64](../data-types/float.md).
**Examples**

View File

@ -85,7 +85,7 @@ Result:
└───────────────────────────────────────────┘
```
`mapFromArrays` also accepts arguments of type [Map](../data-types/map.md). These are casted to array of tuples during execution.
`mapFromArrays` also accepts arguments of type [Map](../data-types/map.md). These are cast to array of tuples during execution.
```sql
SELECT mapFromArrays([1, 2, 3], map('a', 1, 'b', 2, 'c', 3))

View File

@ -6257,7 +6257,7 @@ Code: 70. DB::Exception: Received from localhost:9000. DB::Exception: Value in c
## accurateCastOrNull(x, T)
Converts input value `x` to the specified data type `T`. Always returns [Nullable](../data-types/nullable.md) type and returns [NULL](../syntax.md/#null-literal) if the casted value is not representable in the target type.
Converts input value `x` to the specified data type `T`. Always returns [Nullable](../data-types/nullable.md) type and returns [NULL](../syntax.md/#null-literal) if the cast value is not representable in the target type.
**Syntax**
@ -6310,7 +6310,7 @@ Result:
## accurateCastOrDefault(x, T[, default_value])
Converts input value `x` to the specified data type `T`. Returns default type value or `default_value` if specified if the casted value is not representable in the target type.
Converts input value `x` to the specified data type `T`. Returns default type value or `default_value` if specified if the cast value is not representable in the target type.
**Syntax**

View File

@ -21,4 +21,4 @@ Queries will add or remove metadata about constraints from table, so they are pr
Constraint check **will not be executed** on existing data if it was added.
:::
All changes on replicated tables are broadcasted to ZooKeeper and will be applied on other replicas as well.
All changes on replicated tables are broadcast to ZooKeeper and will be applied on other replicas as well.

View File

@ -131,8 +131,8 @@ For the query to run successfully, the following conditions must be met:
- Both tables must have the same structure.
- Both tables must have the same partition key, the same order by key and the same primary key.
- Both tables must have the same indices and projections.
- Both tables must have the same storage policy.
- The destination table must include all indices and projections from the source table. If the `enforce_index_structure_match_on_partition_manipulation` setting is enabled in destination table, the indices and projections must be identical. Otherwise, the destination table can have a superset of the source tables indices and projections.
## REPLACE PARTITION
@ -151,8 +151,8 @@ For the query to run successfully, the following conditions must be met:
- Both tables must have the same structure.
- Both tables must have the same partition key, the same order by key and the same primary key.
- Both tables must have the same indices and projections.
- Both tables must have the same storage policy.
- The destination table must include all indices and projections from the source table. If the `enforce_index_structure_match_on_partition_manipulation` setting is enabled in destination table, the indices and projections must be identical. Otherwise, the destination table can have a superset of the source tables indices and projections.
## MOVE PARTITION TO TABLE
@ -166,9 +166,9 @@ For the query to run successfully, the following conditions must be met:
- Both tables must have the same structure.
- Both tables must have the same partition key, the same order by key and the same primary key.
- Both tables must have the same indices and projections.
- Both tables must have the same storage policy.
- Both tables must be the same engine family (replicated or non-replicated).
- The destination table must include all indices and projections from the source table. If the `enforce_index_structure_match_on_partition_manipulation` setting is enabled in destination table, the indices and projections must be identical. Otherwise, the destination table can have a superset of the source tables indices and projections.
## CLEAR COLUMN IN PARTITION

View File

@ -16,7 +16,7 @@ Manipulates data matching the specified filtering expression. Implemented as a [
The `ALTER TABLE` prefix makes this syntax different from most other systems supporting SQL. It is intended to signify that unlike similar queries in OLTP databases this is a heavy operation not designed for frequent use.
:::
The `filter_expr` must be of type `UInt8`. This query updates values of specified columns to the values of corresponding expressions in rows for which the `filter_expr` takes a non-zero value. Values are casted to the column type using the `CAST` operator. Updating columns that are used in the calculation of the primary or the partition key is not supported.
The `filter_expr` must be of type `UInt8`. This query updates values of specified columns to the values of corresponding expressions in rows for which the `filter_expr` takes a non-zero value. Values are cast to the column type using the `CAST` operator. Updating columns that are used in the calculation of the primary or the partition key is not supported.
One query can contain several commands separated by commas.

View File

@ -18,7 +18,7 @@ public:
void executeImpl(const CommandLineOptions &, DisksClient & client) override
{
auto disk = client.getCurrentDiskWithPath();
const auto & disk = client.getCurrentDiskWithPath();
std::cout << "Disk: " << disk.getDisk()->getName() << "\nPath: " << disk.getCurrentPath() << std::endl;
}
};

View File

@ -20,7 +20,7 @@ public:
void executeImpl(const CommandLineOptions & options, DisksClient & client) override
{
auto disk = client.getCurrentDiskWithPath();
const auto & disk = client.getCurrentDiskWithPath();
const String & path_from = disk.getRelativeFromRoot(getValueFromCommandLineOptionsThrow<String>(options, "path-from"));
const String & path_to = disk.getRelativeFromRoot(getValueFromCommandLineOptionsThrow<String>(options, "path-to"));

View File

@ -23,7 +23,7 @@ public:
{
bool recursive = options.count("recursive");
bool show_hidden = options.count("all");
auto disk = client.getCurrentDiskWithPath();
const auto & disk = client.getCurrentDiskWithPath();
String path = getValueFromCommandLineOptionsWithDefault<String>(options, "path", ".");
if (recursive)

View File

@ -21,7 +21,7 @@ public:
void executeImpl(const CommandLineOptions & options, DisksClient & client) override
{
bool recursive = options.count("parents");
auto disk = client.getCurrentDiskWithPath();
const auto & disk = client.getCurrentDiskWithPath();
String path = disk.getRelativeFromRoot(getValueFromCommandLineOptionsThrow<String>(options, "path"));

View File

@ -22,7 +22,7 @@ public:
void executeImpl(const CommandLineOptions & options, DisksClient & client) override
{
auto disk = client.getCurrentDiskWithPath();
const auto & disk = client.getCurrentDiskWithPath();
String path_from = disk.getRelativeFromRoot(getValueFromCommandLineOptionsThrow<String>(options, "path-from"));
std::optional<String> path_to = getValueFromCommandLineOptionsWithOptional<String>(options, "path-to");

View File

@ -25,7 +25,7 @@ public:
void executeImpl(const CommandLineOptions & options, DisksClient & client) override
{
auto disk = client.getCurrentDiskWithPath();
const auto & disk = client.getCurrentDiskWithPath();
const String & path = disk.getRelativeFromRoot(getValueFromCommandLineOptionsThrow<String>(options, "path"));
bool recursive = options.count("recursive");
if (disk.getDisk()->existsDirectory(path))

View File

@ -20,7 +20,7 @@ public:
void executeImpl(const CommandLineOptions & options, DisksClient & client) override
{
auto disk = client.getCurrentDiskWithPath();
const auto & disk = client.getCurrentDiskWithPath();
String path = getValueFromCommandLineOptionsThrow<String>(options, "path");
disk.getDisk()->createFile(disk.getRelativeFromRoot(path));

View File

@ -129,7 +129,7 @@ std::vector<String> DisksApp::getCompletions(const String & prefix) const
}
if (arguments.size() == 1)
{
String command_prefix = arguments[0];
const String & command_prefix = arguments[0];
return getCommandsToComplete(command_prefix);
}

View File

@ -243,7 +243,7 @@ enum class FileChangeType : uint8_t
Type,
};
void writeText(FileChangeType type, WriteBuffer & out)
static void writeText(FileChangeType type, WriteBuffer & out)
{
switch (type)
{
@ -299,7 +299,7 @@ enum class LineType : uint8_t
Code,
};
void writeText(LineType type, WriteBuffer & out)
static void writeText(LineType type, WriteBuffer & out)
{
switch (type)
{
@ -429,7 +429,7 @@ using CommitDiff = std::map<std::string /* path */, FileDiff>;
/** Parsing helpers */
void skipUntilWhitespace(ReadBuffer & buf)
static void skipUntilWhitespace(ReadBuffer & buf)
{
while (!buf.eof())
{
@ -444,7 +444,7 @@ void skipUntilWhitespace(ReadBuffer & buf)
}
}
void skipUntilNextLine(ReadBuffer & buf)
static void skipUntilNextLine(ReadBuffer & buf)
{
while (!buf.eof())
{
@ -462,7 +462,7 @@ void skipUntilNextLine(ReadBuffer & buf)
}
}
void readStringUntilNextLine(std::string & s, ReadBuffer & buf)
static void readStringUntilNextLine(std::string & s, ReadBuffer & buf)
{
s.clear();
while (!buf.eof())
@ -680,7 +680,7 @@ using Snapshot = std::map<std::string /* path */, FileBlame>;
* - the author, time and commit of the previous change to every found line (blame).
* And update the snapshot.
*/
void updateSnapshot(Snapshot & snapshot, const Commit & commit, CommitDiff & file_changes)
static void updateSnapshot(Snapshot & snapshot, const Commit & commit, CommitDiff & file_changes)
{
/// Renames and copies.
for (auto & elem : file_changes)
@ -755,7 +755,7 @@ void updateSnapshot(Snapshot & snapshot, const Commit & commit, CommitDiff & fil
*/
using DiffHashes = std::unordered_set<UInt128>;
UInt128 diffHash(const CommitDiff & file_changes)
static UInt128 diffHash(const CommitDiff & file_changes)
{
SipHash hasher;
@ -791,7 +791,7 @@ UInt128 diffHash(const CommitDiff & file_changes)
* :100644 100644 828dedf6b5 828dedf6b5 R100 dbms/src/Functions/GeoUtils.h dbms/src/Functions/PolygonUtils.h
* according to the output of 'git show --raw'
*/
void processFileChanges(
static void processFileChanges(
ReadBuffer & in,
const Options & options,
Commit & commit,
@ -883,7 +883,7 @@ void processFileChanges(
* - we expect some specific format of the diff; but it may actually depend on git config;
* - non-ASCII file names are not processed correctly (they will not be found and will be ignored).
*/
void processDiffs(
static void processDiffs(
ReadBuffer & in,
std::optional<size_t> size_limit,
Commit & commit,
@ -1055,7 +1055,7 @@ void processDiffs(
/** Process the "git show" result for a single commit. Append the result to tables.
*/
void processCommit(
static void processCommit(
ReadBuffer & in,
const Options & options,
size_t commit_num,
@ -1123,7 +1123,7 @@ void processCommit(
/** Runs child process and allows to read the result.
* Multiple processes can be run for parallel processing.
*/
auto gitShow(const std::string & hash)
static auto gitShow(const std::string & hash)
{
std::string command = fmt::format(
"git show --raw --pretty='format:%ct%x00%aN%x00%P%x00%s%x00' --patch --unified=0 {}",
@ -1135,7 +1135,7 @@ auto gitShow(const std::string & hash)
/** Obtain the list of commits and process them.
*/
void processLog(const Options & options)
static void processLog(const Options & options)
{
ResultWriter result;

View File

@ -63,7 +63,7 @@ int printHelp(int, char **)
}
bool isClickhouseApp(std::string_view app_suffix, std::vector<char *> & argv)
static bool isClickhouseApp(std::string_view app_suffix, std::vector<char *> & argv)
{
/// Use app if the first arg 'app' is passed (the arg should be quietly removed)
if (argv.size() >= 2)
@ -132,7 +132,7 @@ __attribute__((constructor(0))) void init_je_malloc_message() { malloc_message =
///
/// extern bool inside_main;
/// class C { C() { assert(inside_main); } };
bool inside_main = false;
static bool inside_main = false;
int main(int argc_, char ** argv_)
{

View File

@ -68,7 +68,7 @@ struct ExternalDictionaryLibraryAPI
using LibrarySettings = CStrings *;
using LibraryData = void *;
using RawClickHouseLibraryTable = void *;
/// Can be safely casted into const Table * with static_cast<const ClickHouseLibrary::Table *>
/// Can be safely cast into const Table * with static_cast<const ClickHouseLibrary::Table *>
using RequestedColumnsNames = CStrings *;
using RequestedIds = const VectorUInt64 *;
using RequestedKeys = Table *;

View File

@ -136,7 +136,7 @@ using ModelPtr = std::unique_ptr<IModel>;
template <typename... Ts>
UInt64 hash(Ts... xs)
static UInt64 hash(Ts... xs)
{
SipHash hash;
(hash.update(xs), ...);
@ -271,7 +271,7 @@ public:
/// Pseudorandom permutation of mantissa.
template <typename Float>
Float transformFloatMantissa(Float x, UInt64 seed)
static Float transformFloatMantissa(Float x, UInt64 seed)
{
using UInt = std::conditional_t<std::is_same_v<Float, Float32>, UInt32, UInt64>;
constexpr size_t mantissa_num_bits = std::is_same_v<Float, Float32> ? 23 : 52;

View File

@ -32,7 +32,7 @@ namespace ErrorCodes
* If test-mode option is added, files will be put by given url via PUT request.
*/
void processFile(const fs::path & file_path, const fs::path & dst_path, bool test_mode, bool link, WriteBuffer & metadata_buf)
static void processFile(const fs::path & file_path, const fs::path & dst_path, bool test_mode, bool link, WriteBuffer & metadata_buf)
{
String remote_path;
RE2::FullMatch(file_path.string(), EXTRACT_PATH_PATTERN, &remote_path);
@ -77,7 +77,7 @@ void processFile(const fs::path & file_path, const fs::path & dst_path, bool tes
}
void processTableFiles(const fs::path & data_path, fs::path dst_path, bool test_mode, bool link)
static void processTableFiles(const fs::path & data_path, fs::path dst_path, bool test_mode, bool link)
{
std::cerr << "Data path: " << data_path << ", destination path: " << dst_path << std::endl;

View File

@ -40,7 +40,7 @@ namespace ErrorCodes
extern const int SYSTEM_ERROR;
}
void setUserAndGroup(std::string arg_uid, std::string arg_gid)
static void setUserAndGroup(std::string arg_uid, std::string arg_gid)
{
static constexpr size_t buf_size = 16384; /// Linux man page says it is enough. Nevertheless, we will check if it's not enough and throw.
std::unique_ptr<char[]> buf(new char[buf_size]);

View File

@ -53,7 +53,7 @@ String serializeAccessEntity(const IAccessEntity & entity)
return buf.str();
}
AccessEntityPtr deserializeAccessEntityImpl(const String & definition)
static AccessEntityPtr deserializeAccessEntityImpl(const String & definition)
{
ASTs queries;
ParserAttachAccessEntity parser;

View File

@ -80,7 +80,7 @@ AuthenticationData::Digest AuthenticationData::Util::encodeBcrypt(std::string_vi
if (ret != 0)
throw Exception(ErrorCodes::LOGICAL_ERROR, "BCrypt library failed: bcrypt_gensalt returned {}", ret);
ret = bcrypt_hashpw(text.data(), salt, reinterpret_cast<char *>(hash.data()));
ret = bcrypt_hashpw(text.data(), salt, reinterpret_cast<char *>(hash.data())); /// NOLINT(bugprone-suspicious-stringview-data-usage)
if (ret != 0)
throw Exception(ErrorCodes::LOGICAL_ERROR, "BCrypt library failed: bcrypt_hashpw returned {}", ret);
@ -95,7 +95,7 @@ AuthenticationData::Digest AuthenticationData::Util::encodeBcrypt(std::string_vi
bool AuthenticationData::Util::checkPasswordBcrypt(std::string_view password [[maybe_unused]], const Digest & password_bcrypt [[maybe_unused]])
{
#if USE_BCRYPT
int ret = bcrypt_checkpw(password.data(), reinterpret_cast<const char *>(password_bcrypt.data()));
int ret = bcrypt_checkpw(password.data(), reinterpret_cast<const char *>(password_bcrypt.data())); /// NOLINT(bugprone-suspicious-stringview-data-usage)
/// Before 24.6 we didn't validate hashes on creation, so it could be that the stored hash is invalid
/// and it could not be decoded by the library
if (ret == -1)

View File

@ -371,7 +371,7 @@ void ExternalAuthenticators::setConfiguration(const Poco::Util::AbstractConfigur
}
}
UInt128 computeParamsHash(const LDAPClient::Params & params, const LDAPClient::RoleSearchParamsList * role_search_params)
static UInt128 computeParamsHash(const LDAPClient::Params & params, const LDAPClient::RoleSearchParamsList * role_search_params)
{
SipHash hash;
params.updateHash(hash);

View File

@ -36,7 +36,7 @@ void SettingsProfileElement::init(const ASTSettingsProfileElement & ast, const A
if (id_mode)
return parse<UUID>(name_);
assert(access_control);
return access_control->getID<SettingsProfile>(name_);
return access_control->getID<SettingsProfile>(name_); /// NOLINT(clang-analyzer-core.CallAndMessage)
};
if (!ast.parent_profile.empty())

View File

@ -139,7 +139,7 @@ public:
}
};
AggregateFunctionPtr createAggregateFunctionDistinctDynamicTypes(
static AggregateFunctionPtr createAggregateFunctionDistinctDynamicTypes(
const std::string & name, const DataTypes & argument_types, const Array & parameters, const Settings *)
{
assertNoParameters(name, parameters);

View File

@ -327,7 +327,7 @@ private:
};
template <typename Data>
AggregateFunctionPtr createAggregateFunctionDistinctJSONPathsAndTypes(
static AggregateFunctionPtr createAggregateFunctionDistinctJSONPathsAndTypes(
const std::string & name, const DataTypes & argument_types, const Array & parameters, const Settings *)
{
assertNoParameters(name, parameters);

View File

@ -217,7 +217,7 @@ static void fillColumn(DB::PaddedPODArray<UInt8> & chars, DB::PaddedPODArray<UIn
insertData(chars, offsets, str.data() + start, end - start);
}
void dumpFlameGraph(
static void dumpFlameGraph(
const AggregateFunctionFlameGraphTree::Traces & traces,
DB::PaddedPODArray<UInt8> & chars,
DB::PaddedPODArray<UInt64> & offsets)
@ -630,7 +630,7 @@ static void check(const std::string & name, const DataTypes & argument_types, co
name, argument_types[2]->getName());
}
AggregateFunctionPtr createAggregateFunctionFlameGraph(const std::string & name, const DataTypes & argument_types, const Array & params, const Settings * settings)
static AggregateFunctionPtr createAggregateFunctionFlameGraph(const std::string & name, const DataTypes & argument_types, const Array & params, const Settings * settings)
{
if (!(*settings)[Setting::allow_introspection_functions])
throw Exception(ErrorCodes::FUNCTION_NOT_ALLOWED,

View File

@ -95,7 +95,7 @@ struct GroupArraySamplerData
/// With a large number of values, we will generate random numbers several times slower.
if (lim <= static_cast<UInt64>(pcg32_fast::max()))
return rng() % lim;
return rng() % lim; /// NOLINT(clang-analyzer-core.DivideZero)
return (static_cast<UInt64>(rng()) * (static_cast<UInt64>(pcg32::max()) + 1ULL) + static_cast<UInt64>(rng())) % lim;
}
@ -494,7 +494,7 @@ class GroupArrayGeneralImpl final
{
static constexpr bool limit_num_elems = Trait::has_limit;
using Data = GroupArrayGeneralData<Node, Trait::sampler != Sampler::NONE>;
static Data & data(AggregateDataPtr __restrict place) { return *reinterpret_cast<Data *>(place); }
static Data & data(AggregateDataPtr __restrict place) { return *reinterpret_cast<Data *>(place); } /// NOLINT(readability-non-const-parameter)
static const Data & data(ConstAggregateDataPtr __restrict place) { return *reinterpret_cast<const Data *>(place); }
DataTypePtr & data_type;

View File

@ -384,7 +384,7 @@ public:
auto * column = typeid_cast<ColumnFloat64 *>(&to);
if (!column)
throw Exception(ErrorCodes::LOGICAL_ERROR, "Cast of column of predictions is incorrect. "
"getReturnTypeToPredict must return same value as it is casted to");
"getReturnTypeToPredict must return same value as it is cast to");
this->data(place).predict(column->getData(), arguments, offset, limit, context);
}

View File

@ -179,7 +179,7 @@ class SequenceNextNodeImpl final
using Self = SequenceNextNodeImpl<T, Node>;
using Data = SequenceNextNodeGeneralData<Node>;
static Data & data(AggregateDataPtr __restrict place) { return *reinterpret_cast<Data *>(place); }
static Data & data(AggregateDataPtr __restrict place) { return *reinterpret_cast<Data *>(place); } /// NOLINT(readability-non-const-parameter)
static const Data & data(ConstAggregateDataPtr __restrict place) { return *reinterpret_cast<const Data *>(place); }
static constexpr size_t base_cond_column_idx = 2;

View File

@ -694,7 +694,7 @@ class IAggregateFunctionDataHelper : public IAggregateFunctionHelper<Derived>
protected:
using Data = T;
static Data & data(AggregateDataPtr __restrict place) { return *reinterpret_cast<Data *>(place); }
static Data & data(AggregateDataPtr __restrict place) { return *reinterpret_cast<Data *>(place); } /// NOLINT(readability-non-const-parameter)
static const Data & data(ConstAggregateDataPtr __restrict place) { return *reinterpret_cast<const Data *>(place); }
public:

View File

@ -259,7 +259,7 @@ private:
/// With a large number of values, we will generate random numbers several times slower.
if (limit <= static_cast<UInt64>(pcg32_fast::max()))
return rng() % limit;
return rng() % limit; /// NOLINT(clang-analyzer-core.DivideZero)
return (static_cast<UInt64>(rng()) * (static_cast<UInt64>(pcg32_fast::max()) + 1ULL) + static_cast<UInt64>(rng())) % limit;
}

View File

@ -174,7 +174,7 @@ struct IdentifierResolveScope
size_t found_nullable_group_by_key_in_scope = 0;
/** It's possible that after a JOIN, a column in the projection has a type different from the column in the source table.
* (For example, after join_use_nulls or USING column casted to supertype)
* (For example, after join_use_nulls or USING column cast to supertype)
* However, the column in the projection still refers to the table as its source.
* This map is used to revert these columns back to their original columns in the source table.
*/

View File

@ -100,13 +100,13 @@ replxx::Replxx::completions_t LineReader::Suggest::getCompletions(const String &
range = std::equal_range(
to_search.begin(), to_search.end(), last_word, [prefix_length](std::string_view s, std::string_view prefix_searched)
{
return strncasecmp(s.data(), prefix_searched.data(), prefix_length) < 0;
return strncasecmp(s.data(), prefix_searched.data(), prefix_length) < 0; /// NOLINT(bugprone-suspicious-stringview-data-usage)
});
else
range = std::equal_range(
to_search.begin(), to_search.end(), last_word, [prefix_length](std::string_view s, std::string_view prefix_searched)
{
return strncmp(s.data(), prefix_searched.data(), prefix_length) < 0;
return strncmp(s.data(), prefix_searched.data(), prefix_length) < 0; /// NOLINT(bugprone-suspicious-stringview-data-usage)
});
return replxx::Replxx::completions_t(range.first, range.second);

View File

@ -121,7 +121,7 @@ void TerminalKeystrokeInterceptor::runImpl(const DB::TerminalKeystrokeIntercepto
if (available <= 0)
return;
if (read(fd, &ch, 1) > 0)
if (read(fd, &ch, 1) > 0) /// NOLINT(clang-analyzer-unix.BlockInCriticalSection)
{
auto it = map.find(ch);
if (it != map.end())

View File

@ -319,6 +319,8 @@ public:
variant_column_ptr = assert_cast<ColumnVariant *>(variant_column.get());
}
void forEachSubcolumn(ColumnCallback callback) const override { callback(variant_column); }
void forEachSubcolumnRecursively(RecursiveMutableColumnCallback callback) override
{
callback(*variant_column);

View File

@ -440,7 +440,7 @@ bool ColumnObject::tryInsert(const Field & x)
column->popBack(column->size() - prev_size);
}
if (shared_data_paths->size() != prev_paths_size)
if (shared_data_paths->size() != prev_paths_size) /// NOLINT(clang-analyzer-core.NullDereference)
shared_data_paths->popBack(shared_data_paths->size() - prev_paths_size);
if (shared_data_values->size() != prev_values_size)
shared_data_values->popBack(shared_data_values->size() - prev_values_size);

View File

@ -392,14 +392,14 @@ void ColumnObjectDeprecated::Subcolumn::insertRangeFrom(const Subcolumn & src, s
if (n * 3 >= column->size())
{
auto casted_column = castColumn({column, column_type, ""}, least_common_type.get());
data.back()->insertRangeFrom(*casted_column, from, n);
auto cast_column = castColumn({column, column_type, ""}, least_common_type.get());
data.back()->insertRangeFrom(*cast_column, from, n);
return;
}
auto casted_column = column->cut(from, n);
casted_column = castColumn({casted_column, column_type, ""}, least_common_type.get());
data.back()->insertRangeFrom(*casted_column, 0, n);
auto cast_column = column->cut(from, n);
cast_column = castColumn({cast_column, column_type, ""}, least_common_type.get());
data.back()->insertRangeFrom(*cast_column, 0, n);
};
size_t pos = 0;

View File

@ -156,7 +156,7 @@ public:
throw Exception(ErrorCodes::NOT_IMPLEMENTED, "Method get64 is not supported for {}", getName());
}
/// If column stores native numeric type, it returns n-th element casted to Float64
/// If column stores native numeric type, it returns n-th element cast to Float64
/// Is used in regression methods to cast each features into uniform type
[[nodiscard]] virtual Float64 getFloat64(size_t /*n*/) const
{
@ -168,7 +168,7 @@ public:
throw Exception(ErrorCodes::NOT_IMPLEMENTED, "Method getFloat32 is not supported for {}", getName());
}
/** If column is numeric, return value of n-th element, casted to UInt64.
/** If column is numeric, return value of n-th element, cast to UInt64.
* For NULL values of Nullable column it is allowed to return arbitrary value.
* Otherwise throw an exception.
*/
@ -185,7 +185,7 @@ public:
[[nodiscard]] virtual bool isDefaultAt(size_t n) const = 0;
[[nodiscard]] virtual bool isNullAt(size_t /*n*/) const { return false; }
/** If column is numeric, return value of n-th element, casted to bool.
/** If column is numeric, return value of n-th element, cast to bool.
* For NULL values of Nullable column returns false.
* Otherwise throw an exception.
*/
@ -815,6 +815,15 @@ bool isColumnNullableOrLowCardinalityNullable(const IColumn & column);
template <typename Derived, typename Parent = IColumn>
class IColumnHelper : public Parent
{
private:
using Self = IColumnHelper<Derived, Parent>;
friend Derived;
friend class COWHelper<Self, Derived>;
IColumnHelper() = default;
IColumnHelper(const IColumnHelper &) = default;
/// Devirtualize insertFrom.
MutableColumns scatter(IColumn::ColumnIndex num_columns, const IColumn::Selector & selector) const override;

View File

@ -24,7 +24,7 @@ void expandDataByMask(PaddedPODArray<T> & data, const PaddedPODArray<UInt8> & ma
ssize_t from = data.size() - 1;
ssize_t index = mask.size() - 1;
data.resize_exact(mask.size());
data.resize(mask.size());
while (index >= 0)
{
if (!!mask[index] ^ inverted)

View File

@ -13,6 +13,7 @@ TEST(IColumn, dumpStructure)
String expected_structure = "LowCardinality(size = 0, UInt8(size = 0), Unique(size = 1, String(size = 1)))";
std::vector<std::thread> threads;
threads.reserve(6);
for (size_t i = 0; i < 6; ++i)
{
threads.emplace_back([&]

View File

@ -724,14 +724,14 @@ void AsyncLoader::enqueue(Info & info, const LoadJobPtr & job, std::unique_lock<
// (when high-priority job A function waits for a lower-priority job B, and B never starts due to its priority)
// 4) Resolve "blocked pool" deadlocks -- spawn more workers
// (when job A in pool P waits for another ready job B in P, but B never starts because there are no free workers in P)
thread_local LoadJob * current_load_job = nullptr;
static thread_local LoadJob * current_load_job = nullptr;
size_t currentPoolOr(size_t pool)
{
return current_load_job ? current_load_job->executionPool() : pool;
}
bool detectWaitDependentDeadlock(const LoadJobPtr & waited)
bool static detectWaitDependentDeadlock(const LoadJobPtr & waited)
{
if (waited.get() == current_load_job)
return true;

View File

@ -75,10 +75,15 @@
template <typename Derived>
class COW : public boost::intrusive_ref_counter<Derived>
{
friend Derived;
private:
Derived * derived() { return static_cast<Derived *>(this); }
const Derived * derived() const { return static_cast<const Derived *>(this); }
COW() = default;
COW(const COW&) = default;
protected:
template <typename T>
class mutable_ptr : public boost::intrusive_ptr<T> /// NOLINT
@ -271,10 +276,15 @@ public:
template <typename Base, typename Derived>
class COWHelper : public Base
{
friend Derived;
private:
Derived * derived() { return static_cast<Derived *>(this); }
const Derived * derived() const { return static_cast<const Derived *>(this); }
COWHelper() = default;
COWHelper(const COWHelper &) = default;
public:
using Ptr = typename Base::template immutable_ptr<Derived>;
using MutablePtr = typename Base::template mutable_ptr<Derived>;

View File

@ -161,7 +161,7 @@ public:
FindResultImpl(Mapped * value_, bool found_, size_t off)
: FindResultImplBase(found_), FindResultImplOffsetBase<need_offset>(off), value(value_) {}
Mapped & getMapped() const { return *value; }
Mapped & getMapped() const { return *value; } /// NOLINT(clang-analyzer-core.uninitialized.UndefReturn)
};
template <bool need_offset>

View File

@ -377,7 +377,7 @@ String DNSResolver::getHostName()
return *impl->host_name;
}
static const String & cacheElemToString(const String & str) { return str; }
static String cacheElemToString(String str) { return str; }
static String cacheElemToString(const Poco::Net::IPAddress & addr) { return addr.toString(); }
template <typename UpdateF, typename ElemsT>

View File

@ -22,7 +22,7 @@ namespace ErrorCodes
}
/// Embedded timezones.
std::string_view getTimeZone(const char * name);
std::string_view getTimeZone(const char * name); /// NOLINT(misc-use-internal-linkage)
namespace

View File

@ -256,7 +256,7 @@ uint64_t readOffset(std::string_view & sp, bool is64_bit)
std::string_view readBytes(std::string_view & sp, uint64_t len)
{
SAFE_CHECK(len <= sp.size(), "invalid string length: {} vs. {}", len, sp.size());
std::string_view ret(sp.data(), len);
std::string_view ret(sp.data(), len); /// NOLINT(bugprone-suspicious-stringview-data-usage)
sp.remove_prefix(len);
return ret;
}
@ -266,7 +266,7 @@ std::string_view readNullTerminated(std::string_view & sp)
{
const char * p = static_cast<const char *>(memchr(sp.data(), 0, sp.size()));
SAFE_CHECK(p, "invalid null-terminated string");
std::string_view ret(sp.data(), p - sp.data());
std::string_view ret(sp.data(), p - sp.data()); /// NOLINT(bugprone-suspicious-stringview-data-usage)
sp = std::string_view(p + 1, sp.size());
return ret;
}
@ -442,7 +442,7 @@ bool Dwarf::Section::next(std::string_view & chunk)
is64_bit = (initial_length == uint32_t(-1));
auto length = is64_bit ? read<uint64_t>(chunk) : initial_length;
SAFE_CHECK(length <= chunk.size(), "invalid DWARF section");
chunk = std::string_view(chunk.data(), length);
chunk = std::string_view(chunk.data(), length); /// NOLINT(bugprone-suspicious-stringview-data-usage)
data = std::string_view(chunk.end(), data.end() - chunk.end());
return true;
}
@ -937,7 +937,7 @@ bool Dwarf::findDebugInfoOffset(uintptr_t address, std::string_view aranges, uin
// Padded to a multiple of 2 addresses.
// Strangely enough, this is the only place in the DWARF spec that requires
// padding.
skipPadding(chunk, aranges.data(), 2 * sizeof(uintptr_t));
skipPadding(chunk, aranges.data(), 2 * sizeof(uintptr_t)); /// NOLINT(bugprone-suspicious-stringview-data-usage)
for (;;)
{
auto start = read<uintptr_t>(chunk);
@ -1681,7 +1681,7 @@ struct LineNumberAttribute
std::variant<uint64_t, std::string_view> attr_value;
};
LineNumberAttribute readLineNumberAttribute(
LineNumberAttribute static readLineNumberAttribute(
bool is64_bit, std::string_view & format, std::string_view & entries, std::string_view debugStr, std::string_view debugLineStr)
{
uint64_t content_type_code = readULEB(format);
@ -1817,7 +1817,7 @@ void Dwarf::LineNumberVM::init()
}
uint64_t header_length = readOffset(data_, is64Bit_);
SAFE_CHECK(header_length <= data_.size(), "invalid line number VM header length");
std::string_view header(data_.data(), header_length);
std::string_view header(data_.data(), header_length); /// NOLINT(bugprone-suspicious-stringview-data-usage)
data_ = std::string_view(header.end(), data_.end() - header.end());
minLength_ = read<uint8_t>(header);
@ -1846,7 +1846,7 @@ void Dwarf::LineNumberVM::init()
{
++v4_.includeDirectoryCount;
}
v4_.includeDirectories = {tmp, header.data()};
v4_.includeDirectories = {tmp, header.data()}; /// NOLINT(bugprone-suspicious-stringview-data-usage)
tmp = header.data();
FileName fn;
@ -1855,7 +1855,7 @@ void Dwarf::LineNumberVM::init()
{
++v4_.fileNameCount;
}
v4_.fileNames = {tmp, header.data()};
v4_.fileNames = {tmp, header.data()}; /// NOLINT(bugprone-suspicious-stringview-data-usage)
}
else if (version_ == 5)
{
@ -1868,7 +1868,7 @@ void Dwarf::LineNumberVM::init()
readULEB(header); // A content type code
readULEB(header); // A form code using the attribute form codes
}
v5_.directoryEntryFormat = {tmp, header.data()};
v5_.directoryEntryFormat = {tmp, header.data()}; /// NOLINT(bugprone-suspicious-stringview-data-usage)
v5_.directoriesCount = readULEB(header);
tmp = header.data();
for (uint64_t i = 0; i < v5_.directoriesCount; i++)
@ -1879,7 +1879,7 @@ void Dwarf::LineNumberVM::init()
readLineNumberAttribute(is64Bit_, format, header, debugStr_, debugLineStr_);
}
}
v5_.directories = {tmp, header.data()};
v5_.directories = {tmp, header.data()}; /// NOLINT(bugprone-suspicious-stringview-data-usage)
v5_.fileNameEntryFormatCount = read<uint8_t>(header);
tmp = header.data();
@ -1890,7 +1890,7 @@ void Dwarf::LineNumberVM::init()
readULEB(header); // A content type code
readULEB(header); // A form code using the attribute form codes
}
v5_.fileNameEntryFormat = {tmp, header.data()};
v5_.fileNameEntryFormat = {tmp, header.data()}; /// NOLINT(bugprone-suspicious-stringview-data-usage)
v5_.fileNamesCount = readULEB(header);
tmp = header.data();
for (uint64_t i = 0; i < v5_.fileNamesCount; i++)
@ -1901,7 +1901,7 @@ void Dwarf::LineNumberVM::init()
readLineNumberAttribute(is64Bit_, format, header, debugStr_, debugLineStr_);
}
}
v5_.fileNames = {tmp, header.data()};
v5_.fileNames = {tmp, header.data()}; /// NOLINT(bugprone-suspicious-stringview-data-usage)
}
}

View File

@ -650,7 +650,7 @@ namespace ErrorCodes
APPLY_FOR_ERROR_CODES(M)
#undef M
}
} error_codes_names;
} static error_codes_names;
std::string_view getName(ErrorCode error_code)
{

View File

@ -61,7 +61,7 @@ std::function<void(const std::string & msg, int code, bool remote, const Excepti
/// - Aborts the process if error code is LOGICAL_ERROR.
/// - Increments error codes statistics.
void handle_error_code(const std::string & msg, int code, bool remote, const Exception::FramePointers & trace)
static void handle_error_code(const std::string & msg, int code, bool remote, const Exception::FramePointers & trace)
{
// In debug builds and builds with sanitizers, treat LOGICAL_ERROR as an assertion failure.
// Log the message before we fail.

View File

@ -94,7 +94,7 @@ namespace ErrorCodes
}
IHTTPConnectionPoolForEndpoint::Metrics getMetricsForStorageConnectionPool()
static IHTTPConnectionPoolForEndpoint::Metrics getMetricsForStorageConnectionPool()
{
return IHTTPConnectionPoolForEndpoint::Metrics{
.created = ProfileEvents::StorageConnectionsCreated,
@ -110,7 +110,7 @@ IHTTPConnectionPoolForEndpoint::Metrics getMetricsForStorageConnectionPool()
}
IHTTPConnectionPoolForEndpoint::Metrics getMetricsForDiskConnectionPool()
static IHTTPConnectionPoolForEndpoint::Metrics getMetricsForDiskConnectionPool()
{
return IHTTPConnectionPoolForEndpoint::Metrics{
.created = ProfileEvents::DiskConnectionsCreated,
@ -126,7 +126,7 @@ IHTTPConnectionPoolForEndpoint::Metrics getMetricsForDiskConnectionPool()
}
IHTTPConnectionPoolForEndpoint::Metrics getMetricsForHTTPConnectionPool()
static IHTTPConnectionPoolForEndpoint::Metrics getMetricsForHTTPConnectionPool()
{
return IHTTPConnectionPoolForEndpoint::Metrics{
.created = ProfileEvents::HTTPConnectionsCreated,
@ -142,7 +142,7 @@ IHTTPConnectionPoolForEndpoint::Metrics getMetricsForHTTPConnectionPool()
}
IHTTPConnectionPoolForEndpoint::Metrics getConnectionPoolMetrics(HTTPConnectionGroupType type)
static IHTTPConnectionPoolForEndpoint::Metrics getConnectionPoolMetrics(HTTPConnectionGroupType type)
{
switch (type)
{
@ -779,7 +779,7 @@ struct Hasher
}
};
IExtendedPool::Ptr
static IExtendedPool::Ptr
createConnectionPool(ConnectionGroup::Ptr group, std::string host, UInt16 port, bool secure, ProxyConfiguration proxy_configuration)
{
if (secure)

View File

@ -172,7 +172,7 @@ struct HashTableCell
const value_type & getValue() const { return key; }
/// Get the key (internally).
static const Key & getKey(const value_type & value) { return value; }
static const Key & getKey(const value_type & value) { return value; } /// NOLINT(bugprone-return-const-ref-from-parameter)
/// Are the keys at the cells equal?
bool keyEquals(const Key & key_) const { return bitEquals(key, key_); }

Some files were not shown because too many files have changed in this diff Show More