mirror of
https://github.com/ClickHouse/ClickHouse.git
synced 2024-11-27 18:12:02 +00:00
Merge branch 'master' of github.com:ClickHouse/ClickHouse into fix-memory-leak-nullkey-distinct
This commit is contained in:
commit
8534e749f8
21
.github/actions/check_workflow/action.yml
vendored
Normal file
21
.github/actions/check_workflow/action.yml
vendored
Normal file
@ -0,0 +1,21 @@
|
|||||||
|
name: CheckWorkflowResults
|
||||||
|
|
||||||
|
description: Check overall workflow status and post error to slack if any
|
||||||
|
|
||||||
|
inputs:
|
||||||
|
needs:
|
||||||
|
description: github needs context as a json string
|
||||||
|
required: true
|
||||||
|
type: string
|
||||||
|
|
||||||
|
runs:
|
||||||
|
using: "composite"
|
||||||
|
steps:
|
||||||
|
- name: Check Workflow
|
||||||
|
shell: bash
|
||||||
|
run: |
|
||||||
|
export WORKFLOW_RESULT_FILE="/tmp/workflow_results.json"
|
||||||
|
cat > "$WORKFLOW_RESULT_FILE" << 'EOF'
|
||||||
|
${{ inputs.needs }}
|
||||||
|
EOF
|
||||||
|
python3 ./tests/ci/ci_buddy.py --check-wf-status
|
168
.github/actions/release/action.yml
vendored
168
.github/actions/release/action.yml
vendored
@ -1,168 +0,0 @@
|
|||||||
name: Release
|
|
||||||
|
|
||||||
description: Makes patch releases and creates new release branch
|
|
||||||
|
|
||||||
inputs:
|
|
||||||
ref:
|
|
||||||
description: 'Git reference (branch or commit sha) from which to create the release'
|
|
||||||
required: true
|
|
||||||
type: string
|
|
||||||
type:
|
|
||||||
description: 'The type of release: "new" for a new release or "patch" for a patch release'
|
|
||||||
required: true
|
|
||||||
type: choice
|
|
||||||
options:
|
|
||||||
- patch
|
|
||||||
- new
|
|
||||||
dry-run:
|
|
||||||
description: 'Dry run'
|
|
||||||
required: false
|
|
||||||
default: true
|
|
||||||
type: boolean
|
|
||||||
token:
|
|
||||||
required: true
|
|
||||||
type: string
|
|
||||||
|
|
||||||
runs:
|
|
||||||
using: "composite"
|
|
||||||
steps:
|
|
||||||
- name: Prepare Release Info
|
|
||||||
shell: bash
|
|
||||||
run: |
|
|
||||||
python3 ./tests/ci/create_release.py --prepare-release-info \
|
|
||||||
--ref ${{ inputs.ref }} --release-type ${{ inputs.type }} \
|
|
||||||
${{ inputs.dry-run && '--dry-run' || '' }}
|
|
||||||
echo "::group::Release Info"
|
|
||||||
python3 -m json.tool /tmp/release_info.json
|
|
||||||
echo "::endgroup::"
|
|
||||||
release_tag=$(jq -r '.release_tag' /tmp/release_info.json)
|
|
||||||
commit_sha=$(jq -r '.commit_sha' /tmp/release_info.json)
|
|
||||||
echo "Release Tag: $release_tag"
|
|
||||||
echo "RELEASE_TAG=$release_tag" >> "$GITHUB_ENV"
|
|
||||||
echo "COMMIT_SHA=$commit_sha" >> "$GITHUB_ENV"
|
|
||||||
- name: Download All Release Artifacts
|
|
||||||
if: ${{ inputs.type == 'patch' }}
|
|
||||||
shell: bash
|
|
||||||
run: |
|
|
||||||
python3 ./tests/ci/create_release.py --download-packages ${{ inputs.dry-run && '--dry-run' || '' }}
|
|
||||||
- name: Push Git Tag for the Release
|
|
||||||
shell: bash
|
|
||||||
run: |
|
|
||||||
python3 ./tests/ci/create_release.py --push-release-tag ${{ inputs.dry-run && '--dry-run' || '' }}
|
|
||||||
- name: Push New Release Branch
|
|
||||||
if: ${{ inputs.type == 'new' }}
|
|
||||||
shell: bash
|
|
||||||
run: |
|
|
||||||
python3 ./tests/ci/create_release.py --push-new-release-branch ${{ inputs.dry-run && '--dry-run' || '' }}
|
|
||||||
- name: Bump CH Version and Update Contributors' List
|
|
||||||
shell: bash
|
|
||||||
run: |
|
|
||||||
python3 ./tests/ci/create_release.py --create-bump-version-pr ${{ inputs.dry-run && '--dry-run' || '' }}
|
|
||||||
- name: Bump Docker versions, Changelog, Security
|
|
||||||
if: ${{ inputs.type == 'patch' }}
|
|
||||||
shell: bash
|
|
||||||
run: |
|
|
||||||
git checkout master
|
|
||||||
python3 ./tests/ci/create_release.py --set-progress-started --progress "update changelog, docker version, security"
|
|
||||||
echo "List versions"
|
|
||||||
./utils/list-versions/list-versions.sh > ./utils/list-versions/version_date.tsv
|
|
||||||
echo "Update docker version"
|
|
||||||
./utils/list-versions/update-docker-version.sh
|
|
||||||
echo "Generate ChangeLog"
|
|
||||||
export CI=1
|
|
||||||
docker run -u "${UID}:${GID}" -e PYTHONUNBUFFERED=1 -e CI=1 --network=host \
|
|
||||||
--volume=".:/ClickHouse" clickhouse/style-test \
|
|
||||||
/ClickHouse/tests/ci/changelog.py -v --debug-helpers \
|
|
||||||
--gh-user-or-token=${{ inputs.token }} --jobs=5 \
|
|
||||||
--output="/ClickHouse/docs/changelogs/${{ env.RELEASE_TAG }}.md" ${{ env.RELEASE_TAG }}
|
|
||||||
git add ./docs/changelogs/${{ env.RELEASE_TAG }}.md
|
|
||||||
echo "Generate Security"
|
|
||||||
python3 ./utils/security-generator/generate_security.py > SECURITY.md
|
|
||||||
git diff HEAD
|
|
||||||
- name: Create ChangeLog PR
|
|
||||||
if: ${{ inputs.type == 'patch' && ! inputs.dry-run }}
|
|
||||||
uses: peter-evans/create-pull-request@v6
|
|
||||||
with:
|
|
||||||
author: "robot-clickhouse <robot-clickhouse@users.noreply.github.com>"
|
|
||||||
token: ${{ inputs.token }}
|
|
||||||
committer: "robot-clickhouse <robot-clickhouse@users.noreply.github.com>"
|
|
||||||
commit-message: Update version_date.tsv and changelogs after ${{ env.RELEASE_TAG }}
|
|
||||||
branch: auto/${{ env.RELEASE_TAG }}
|
|
||||||
assignees: ${{ github.event.sender.login }} # assign the PR to the tag pusher
|
|
||||||
delete-branch: true
|
|
||||||
title: Update version_date.tsv and changelog after ${{ env.RELEASE_TAG }}
|
|
||||||
labels: do not test
|
|
||||||
body: |
|
|
||||||
Update version_date.tsv and changelogs after ${{ env.RELEASE_TAG }}
|
|
||||||
### Changelog category (leave one):
|
|
||||||
- Not for changelog (changelog entry is not required)
|
|
||||||
- name: Complete previous steps and Restore git state
|
|
||||||
if: ${{ inputs.type == 'patch' }}
|
|
||||||
shell: bash
|
|
||||||
run: |
|
|
||||||
python3 ./tests/ci/create_release.py --set-progress-completed
|
|
||||||
git reset --hard HEAD
|
|
||||||
git checkout "$GITHUB_REF_NAME"
|
|
||||||
- name: Create GH Release
|
|
||||||
shell: bash
|
|
||||||
if: ${{ inputs.type == 'patch' }}
|
|
||||||
run: |
|
|
||||||
python3 ./tests/ci/create_release.py --create-gh-release ${{ inputs.dry-run && '--dry-run' || '' }}
|
|
||||||
- name: Export TGZ Packages
|
|
||||||
if: ${{ inputs.type == 'patch' }}
|
|
||||||
shell: bash
|
|
||||||
run: |
|
|
||||||
python3 ./tests/ci/artifactory.py --export-tgz ${{ inputs.dry-run && '--dry-run' || '' }}
|
|
||||||
- name: Test TGZ Packages
|
|
||||||
if: ${{ inputs.type == 'patch' }}
|
|
||||||
shell: bash
|
|
||||||
run: |
|
|
||||||
python3 ./tests/ci/artifactory.py --test-tgz ${{ inputs.dry-run && '--dry-run' || '' }}
|
|
||||||
- name: Export RPM Packages
|
|
||||||
if: ${{ inputs.type == 'patch' }}
|
|
||||||
shell: bash
|
|
||||||
run: |
|
|
||||||
python3 ./tests/ci/artifactory.py --export-rpm ${{ inputs.dry-run && '--dry-run' || '' }}
|
|
||||||
- name: Test RPM Packages
|
|
||||||
if: ${{ inputs.type == 'patch' }}
|
|
||||||
shell: bash
|
|
||||||
run: |
|
|
||||||
python3 ./tests/ci/artifactory.py --test-rpm ${{ inputs.dry-run && '--dry-run' || '' }}
|
|
||||||
- name: Export Debian Packages
|
|
||||||
if: ${{ inputs.type == 'patch' }}
|
|
||||||
shell: bash
|
|
||||||
run: |
|
|
||||||
python3 ./tests/ci/artifactory.py --export-debian ${{ inputs.dry-run && '--dry-run' || '' }}
|
|
||||||
- name: Test Debian Packages
|
|
||||||
if: ${{ inputs.type == 'patch' }}
|
|
||||||
shell: bash
|
|
||||||
run: |
|
|
||||||
python3 ./tests/ci/artifactory.py --test-debian ${{ inputs.dry-run && '--dry-run' || '' }}
|
|
||||||
- name: Docker clickhouse/clickhouse-server building
|
|
||||||
if: ${{ inputs.type == 'patch' }}
|
|
||||||
shell: bash
|
|
||||||
run: |
|
|
||||||
cd "./tests/ci"
|
|
||||||
python3 ./create_release.py --set-progress-started --progress "docker server release"
|
|
||||||
export CHECK_NAME="Docker server image"
|
|
||||||
python3 docker_server.py --release-type auto --version ${{ env.RELEASE_TAG }} --check-name "$CHECK_NAME" --sha ${{ env.COMMIT_SHA }} ${{ ! inputs.dry-run && '--push' || '' }}
|
|
||||||
python3 ./create_release.py --set-progress-completed
|
|
||||||
- name: Docker clickhouse/clickhouse-keeper building
|
|
||||||
if: ${{ inputs.type == 'patch' }}
|
|
||||||
shell: bash
|
|
||||||
run: |
|
|
||||||
cd "./tests/ci"
|
|
||||||
python3 ./create_release.py --set-progress-started --progress "docker keeper release"
|
|
||||||
export CHECK_NAME="Docker keeper image"
|
|
||||||
python3 docker_server.py --release-type auto --version ${{ env.RELEASE_TAG }} --check-name "$CHECK_NAME" --sha ${{ env.COMMIT_SHA }} ${{ ! inputs.dry-run && '--push' || '' }}
|
|
||||||
python3 ./create_release.py --set-progress-completed
|
|
||||||
- name: Set current Release progress to Completed with OK
|
|
||||||
shell: bash
|
|
||||||
run: |
|
|
||||||
python3 ./tests/ci/create_release.py --set-progress-started --progress "completed"
|
|
||||||
python3 ./tests/ci/create_release.py --set-progress-completed
|
|
||||||
- name: Post Slack Message
|
|
||||||
if: ${{ !cancelled() }}
|
|
||||||
shell: bash
|
|
||||||
run: |
|
|
||||||
python3 ./tests/ci/create_release.py --post-status ${{ inputs.dry-run && '--dry-run' || '' }}
|
|
111
.github/workflows/auto_release.yml
vendored
111
.github/workflows/auto_release.yml
vendored
@ -1,111 +0,0 @@
|
|||||||
name: AutoRelease
|
|
||||||
|
|
||||||
env:
|
|
||||||
PYTHONUNBUFFERED: 1
|
|
||||||
DRY_RUN: true
|
|
||||||
|
|
||||||
concurrency:
|
|
||||||
group: release
|
|
||||||
on: # yamllint disable-line rule:truthy
|
|
||||||
# Workflow uses a test bucket for packages and dry run mode (no real releases)
|
|
||||||
schedule:
|
|
||||||
- cron: '0 9 * * *'
|
|
||||||
- cron: '0 15 * * *'
|
|
||||||
workflow_dispatch:
|
|
||||||
inputs:
|
|
||||||
dry-run:
|
|
||||||
description: 'Dry run'
|
|
||||||
required: false
|
|
||||||
default: true
|
|
||||||
type: boolean
|
|
||||||
|
|
||||||
jobs:
|
|
||||||
AutoRelease:
|
|
||||||
runs-on: [self-hosted, release-maker]
|
|
||||||
steps:
|
|
||||||
- name: DebugInfo
|
|
||||||
uses: hmarr/debug-action@f7318c783045ac39ed9bb497e22ce835fdafbfe6
|
|
||||||
- name: Set envs
|
|
||||||
run: |
|
|
||||||
cat >> "$GITHUB_ENV" << 'EOF'
|
|
||||||
ROBOT_CLICKHOUSE_SSH_KEY<<RCSK
|
|
||||||
${{secrets.ROBOT_CLICKHOUSE_SSH_KEY}}
|
|
||||||
RCSK
|
|
||||||
EOF
|
|
||||||
- name: Set DRY_RUN for schedule
|
|
||||||
if: ${{ github.event_name == 'schedule' }}
|
|
||||||
run: echo "DRY_RUN=true" >> "$GITHUB_ENV"
|
|
||||||
- name: Set DRY_RUN for dispatch
|
|
||||||
if: ${{ github.event_name == 'workflow_dispatch' }}
|
|
||||||
run: echo "DRY_RUN=${{ github.event.inputs.dry-run }}" >> "$GITHUB_ENV"
|
|
||||||
- name: Check out repository code
|
|
||||||
uses: ClickHouse/checkout@v1
|
|
||||||
with:
|
|
||||||
token: ${{secrets.ROBOT_CLICKHOUSE_COMMIT_TOKEN}}
|
|
||||||
fetch-depth: 0
|
|
||||||
- name: Auto Release Prepare
|
|
||||||
run: |
|
|
||||||
cd "$GITHUB_WORKSPACE/tests/ci"
|
|
||||||
python3 auto_release.py --prepare
|
|
||||||
echo "::group::Auto Release Info"
|
|
||||||
python3 -m json.tool /tmp/autorelease_info.json
|
|
||||||
echo "::endgroup::"
|
|
||||||
{
|
|
||||||
echo 'AUTO_RELEASE_PARAMS<<EOF'
|
|
||||||
cat /tmp/autorelease_info.json
|
|
||||||
echo 'EOF'
|
|
||||||
} >> "$GITHUB_ENV"
|
|
||||||
- name: Post Release Branch statuses
|
|
||||||
run: |
|
|
||||||
cd "$GITHUB_WORKSPACE/tests/ci"
|
|
||||||
python3 auto_release.py --post-status
|
|
||||||
- name: Release ${{ fromJson(env.AUTO_RELEASE_PARAMS).releases[0].release_branch }}
|
|
||||||
if: ${{ fromJson(env.AUTO_RELEASE_PARAMS).releases[0] && fromJson(env.AUTO_RELEASE_PARAMS).releases[0].ready }}
|
|
||||||
uses: ./.github/actions/release
|
|
||||||
with:
|
|
||||||
ref: ${{ fromJson(env.AUTO_RELEASE_PARAMS).releases[0].commit_sha }}
|
|
||||||
type: patch
|
|
||||||
dry-run: ${{ env.DRY_RUN }}
|
|
||||||
token: ${{secrets.ROBOT_CLICKHOUSE_COMMIT_TOKEN}}
|
|
||||||
- name: Release ${{ fromJson(env.AUTO_RELEASE_PARAMS).releases[1].release_branch }}
|
|
||||||
if: ${{ fromJson(env.AUTO_RELEASE_PARAMS).releases[0] && fromJson(env.AUTO_RELEASE_PARAMS).releases[1].ready }}
|
|
||||||
uses: ./.github/actions/release
|
|
||||||
with:
|
|
||||||
ref: ${{ fromJson(env.AUTO_RELEASE_PARAMS).releases[1].commit_sha }}
|
|
||||||
type: patch
|
|
||||||
dry-run: ${{ env.DRY_RUN }}
|
|
||||||
token: ${{secrets.ROBOT_CLICKHOUSE_COMMIT_TOKEN}}
|
|
||||||
- name: Release ${{ fromJson(env.AUTO_RELEASE_PARAMS).releases[2].release_branch }}
|
|
||||||
if: ${{ fromJson(env.AUTO_RELEASE_PARAMS).releases[2] && fromJson(env.AUTO_RELEASE_PARAMS).releases[2].ready }}
|
|
||||||
uses: ./.github/actions/release
|
|
||||||
with:
|
|
||||||
ref: ${{ fromJson(env.AUTO_RELEASE_PARAMS).releases[2].commit_sha }}
|
|
||||||
type: patch
|
|
||||||
dry-run: ${{ env.DRY_RUN }}
|
|
||||||
token: ${{secrets.ROBOT_CLICKHOUSE_COMMIT_TOKEN}}
|
|
||||||
- name: Release ${{ fromJson(env.AUTO_RELEASE_PARAMS).releases[3].release_branch }}
|
|
||||||
if: ${{ fromJson(env.AUTO_RELEASE_PARAMS).releases[3] && fromJson(env.AUTO_RELEASE_PARAMS).releases[3].ready }}
|
|
||||||
uses: ./.github/actions/release
|
|
||||||
with:
|
|
||||||
ref: ${{ fromJson(env.AUTO_RELEASE_PARAMS).releases[3].commit_sha }}
|
|
||||||
type: patch
|
|
||||||
dry-run: ${{ env.DRY_RUN }}
|
|
||||||
token: ${{secrets.ROBOT_CLICKHOUSE_COMMIT_TOKEN}}
|
|
||||||
- name: Release ${{ fromJson(env.AUTO_RELEASE_PARAMS).releases[4].release_branch }}
|
|
||||||
if: ${{ fromJson(env.AUTO_RELEASE_PARAMS).releases[4] && fromJson(env.AUTO_RELEASE_PARAMS).releases[4].ready }}
|
|
||||||
uses: ./.github/actions/release
|
|
||||||
with:
|
|
||||||
ref: ${{ fromJson(env.AUTO_RELEASE_PARAMS).releases[4].commit_sha }}
|
|
||||||
type: patch
|
|
||||||
dry-run: ${{ env.DRY_RUN }}
|
|
||||||
token: ${{secrets.ROBOT_CLICKHOUSE_COMMIT_TOKEN}}
|
|
||||||
- name: Post Slack Message
|
|
||||||
if: ${{ !cancelled() }}
|
|
||||||
run: |
|
|
||||||
cd "$GITHUB_WORKSPACE/tests/ci"
|
|
||||||
python3 auto_release.py --post-auto-release-complete --wf-status ${{ job.status }}
|
|
||||||
- name: Clean up
|
|
||||||
run: |
|
|
||||||
docker ps --quiet | xargs --no-run-if-empty docker kill ||:
|
|
||||||
docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||:
|
|
||||||
sudo rm -fr "$TEMP_PATH"
|
|
172
.github/workflows/create_release.yml
vendored
172
.github/workflows/create_release.yml
vendored
@ -16,10 +16,15 @@ concurrency:
|
|||||||
options:
|
options:
|
||||||
- patch
|
- patch
|
||||||
- new
|
- new
|
||||||
|
only-repo:
|
||||||
|
description: 'Run only repos updates including docker (repo-recovery, tests)'
|
||||||
|
required: false
|
||||||
|
default: false
|
||||||
|
type: boolean
|
||||||
dry-run:
|
dry-run:
|
||||||
description: 'Dry run'
|
description: 'Dry run'
|
||||||
required: false
|
required: false
|
||||||
default: true
|
default: false
|
||||||
type: boolean
|
type: boolean
|
||||||
|
|
||||||
jobs:
|
jobs:
|
||||||
@ -35,10 +40,163 @@ jobs:
|
|||||||
with:
|
with:
|
||||||
token: ${{secrets.ROBOT_CLICKHOUSE_COMMIT_TOKEN}}
|
token: ${{secrets.ROBOT_CLICKHOUSE_COMMIT_TOKEN}}
|
||||||
fetch-depth: 0
|
fetch-depth: 0
|
||||||
- name: Call Release Action
|
- name: Prepare Release Info
|
||||||
uses: ./.github/actions/release
|
shell: bash
|
||||||
|
run: |
|
||||||
|
if [ ${{ inputs.only-repo }} == "true" ]; then
|
||||||
|
git tag -l ${{ inputs.ref }} || { echo "With only-repo option ref must be a valid release tag"; exit 1; }
|
||||||
|
fi
|
||||||
|
python3 ./tests/ci/create_release.py --prepare-release-info \
|
||||||
|
--ref ${{ inputs.ref }} --release-type ${{ inputs.type }} \
|
||||||
|
${{ inputs.dry-run == true && '--dry-run' || '' }} \
|
||||||
|
${{ inputs.only-repo == true && '--skip-tag-check' || '' }}
|
||||||
|
echo "::group::Release Info"
|
||||||
|
python3 -m json.tool /tmp/release_info.json
|
||||||
|
echo "::endgroup::"
|
||||||
|
release_tag=$(jq -r '.release_tag' /tmp/release_info.json)
|
||||||
|
commit_sha=$(jq -r '.commit_sha' /tmp/release_info.json)
|
||||||
|
is_latest=$(jq -r '.latest' /tmp/release_info.json)
|
||||||
|
echo "Release Tag: $release_tag"
|
||||||
|
echo "RELEASE_TAG=$release_tag" >> "$GITHUB_ENV"
|
||||||
|
echo "COMMIT_SHA=$commit_sha" >> "$GITHUB_ENV"
|
||||||
|
if [ "$is_latest" == "true" ]; then
|
||||||
|
echo "DOCKER_TAG_TYPE=release-latest" >> "$GITHUB_ENV"
|
||||||
|
else
|
||||||
|
echo "DOCKER_TAG_TYPE=release" >> "$GITHUB_ENV"
|
||||||
|
fi
|
||||||
|
- name: Download All Release Artifacts
|
||||||
|
if: ${{ inputs.type == 'patch' }}
|
||||||
|
shell: bash
|
||||||
|
run: |
|
||||||
|
python3 ./tests/ci/create_release.py --download-packages ${{ inputs.dry-run == true && '--dry-run' || '' }}
|
||||||
|
- name: Push Git Tag for the Release
|
||||||
|
if: ${{ ! inputs.only-repo }}
|
||||||
|
shell: bash
|
||||||
|
run: |
|
||||||
|
python3 ./tests/ci/create_release.py --push-release-tag ${{ inputs.dry-run == true && '--dry-run' || '' }}
|
||||||
|
- name: Push New Release Branch
|
||||||
|
if: ${{ inputs.type == 'new' && ! inputs.only-repo }}
|
||||||
|
shell: bash
|
||||||
|
run: |
|
||||||
|
python3 ./tests/ci/create_release.py --push-new-release-branch ${{ inputs.dry-run == true && '--dry-run' || '' }}
|
||||||
|
- name: Bump CH Version and Update Contributors' List
|
||||||
|
if: ${{ ! inputs.only-repo }}
|
||||||
|
shell: bash
|
||||||
|
run: |
|
||||||
|
python3 ./tests/ci/create_release.py --create-bump-version-pr ${{ inputs.dry-run == true && '--dry-run' || '' }}
|
||||||
|
- name: Bump Docker versions, Changelog, Security
|
||||||
|
if: ${{ inputs.type == 'patch' && ! inputs.only-repo }}
|
||||||
|
shell: bash
|
||||||
|
run: |
|
||||||
|
python3 ./tests/ci/create_release.py --set-progress-started --progress "update changelog, docker version, security"
|
||||||
|
|
||||||
|
git checkout master # in case WF started from feature branch
|
||||||
|
echo "List versions"
|
||||||
|
./utils/list-versions/list-versions.sh > ./utils/list-versions/version_date.tsv
|
||||||
|
echo "Update docker version"
|
||||||
|
./utils/list-versions/update-docker-version.sh
|
||||||
|
echo "Generate ChangeLog"
|
||||||
|
export CI=1
|
||||||
|
docker run -u "${UID}:${GID}" -e PYTHONUNBUFFERED=1 -e CI=1 --network=host \
|
||||||
|
--volume=".:/wd" --workdir="/wd" \
|
||||||
|
clickhouse/style-test \
|
||||||
|
./tests/ci/changelog.py -v --debug-helpers \
|
||||||
|
--jobs=5 \
|
||||||
|
--output="./docs/changelogs/${{ env.RELEASE_TAG }}.md" ${{ env.RELEASE_TAG }}
|
||||||
|
git add ./docs/changelogs/${{ env.RELEASE_TAG }}.md
|
||||||
|
echo "Generate Security"
|
||||||
|
python3 ./utils/security-generator/generate_security.py > SECURITY.md
|
||||||
|
git diff HEAD
|
||||||
|
- name: Create ChangeLog PR
|
||||||
|
if: ${{ inputs.type == 'patch' && ! inputs.dry-run && ! inputs.only-repo }}
|
||||||
|
uses: peter-evans/create-pull-request@v6
|
||||||
with:
|
with:
|
||||||
ref: ${{ inputs.ref }}
|
author: "robot-clickhouse <robot-clickhouse@users.noreply.github.com>"
|
||||||
type: ${{ inputs.type }}
|
token: ${{ secrets.ROBOT_CLICKHOUSE_COMMIT_TOKEN }}
|
||||||
dry-run: ${{ inputs.dry-run }}
|
committer: "robot-clickhouse <robot-clickhouse@users.noreply.github.com>"
|
||||||
token: ${{secrets.ROBOT_CLICKHOUSE_COMMIT_TOKEN}}
|
commit-message: Update version_date.tsv and changelogs after ${{ env.RELEASE_TAG }}
|
||||||
|
branch: auto/${{ env.RELEASE_TAG }}
|
||||||
|
base: master
|
||||||
|
assignees: ${{ github.event.sender.login }} # assign the PR to the tag pusher
|
||||||
|
delete-branch: true
|
||||||
|
title: Update version_date.tsv and changelog after ${{ env.RELEASE_TAG }}
|
||||||
|
labels: do not test
|
||||||
|
body: |
|
||||||
|
Update version_date.tsv and changelogs after ${{ env.RELEASE_TAG }}
|
||||||
|
### Changelog category (leave one):
|
||||||
|
- Not for changelog (changelog entry is not required)
|
||||||
|
- name: Complete previous steps and Restore git state
|
||||||
|
if: ${{ inputs.type == 'patch' && ! inputs.only-repo }}
|
||||||
|
shell: bash
|
||||||
|
run: |
|
||||||
|
python3 ./tests/ci/create_release.py --set-progress-completed
|
||||||
|
git reset --hard HEAD
|
||||||
|
git checkout "$GITHUB_REF_NAME"
|
||||||
|
- name: Create GH Release
|
||||||
|
if: ${{ inputs.type == 'patch' && ! inputs.only-repo }}
|
||||||
|
shell: bash
|
||||||
|
run: |
|
||||||
|
python3 ./tests/ci/create_release.py --create-gh-release ${{ inputs.dry-run == true && '--dry-run' || '' }}
|
||||||
|
- name: Export TGZ Packages
|
||||||
|
if: ${{ inputs.type == 'patch' }}
|
||||||
|
shell: bash
|
||||||
|
run: |
|
||||||
|
python3 ./tests/ci/artifactory.py --export-tgz ${{ inputs.dry-run == true && '--dry-run' || '' }}
|
||||||
|
- name: Test TGZ Packages
|
||||||
|
if: ${{ inputs.type == 'patch' }}
|
||||||
|
shell: bash
|
||||||
|
run: |
|
||||||
|
python3 ./tests/ci/artifactory.py --test-tgz ${{ inputs.dry-run == true && '--dry-run' || '' }}
|
||||||
|
- name: Export RPM Packages
|
||||||
|
if: ${{ inputs.type == 'patch' }}
|
||||||
|
shell: bash
|
||||||
|
run: |
|
||||||
|
python3 ./tests/ci/artifactory.py --export-rpm ${{ inputs.dry-run == true && '--dry-run' || '' }}
|
||||||
|
- name: Test RPM Packages
|
||||||
|
if: ${{ inputs.type == 'patch' }}
|
||||||
|
shell: bash
|
||||||
|
run: |
|
||||||
|
python3 ./tests/ci/artifactory.py --test-rpm ${{ inputs.dry-run == true && '--dry-run' || '' }}
|
||||||
|
- name: Export Debian Packages
|
||||||
|
if: ${{ inputs.type == 'patch' }}
|
||||||
|
shell: bash
|
||||||
|
run: |
|
||||||
|
python3 ./tests/ci/artifactory.py --export-debian ${{ inputs.dry-run == true && '--dry-run' || '' }}
|
||||||
|
- name: Test Debian Packages
|
||||||
|
if: ${{ inputs.type == 'patch' }}
|
||||||
|
shell: bash
|
||||||
|
run: |
|
||||||
|
python3 ./tests/ci/artifactory.py --test-debian ${{ inputs.dry-run == true && '--dry-run' || '' }}
|
||||||
|
- name: Docker clickhouse/clickhouse-server building
|
||||||
|
if: ${{ inputs.type == 'patch' }}
|
||||||
|
shell: bash
|
||||||
|
run: |
|
||||||
|
cd "./tests/ci"
|
||||||
|
python3 ./create_release.py --set-progress-started --progress "docker server release"
|
||||||
|
export CHECK_NAME="Docker server image"
|
||||||
|
python3 docker_server.py --tag-type ${{ env.DOCKER_TAG_TYPE }} --version ${{ env.RELEASE_TAG }} --check-name "$CHECK_NAME" --sha ${{ env.COMMIT_SHA }} ${{ ! inputs.dry-run && '--push' || '' }}
|
||||||
|
python3 ./create_release.py --set-progress-completed
|
||||||
|
- name: Docker clickhouse/clickhouse-keeper building
|
||||||
|
if: ${{ inputs.type == 'patch' }}
|
||||||
|
shell: bash
|
||||||
|
run: |
|
||||||
|
cd "./tests/ci"
|
||||||
|
python3 ./create_release.py --set-progress-started --progress "docker keeper release"
|
||||||
|
export CHECK_NAME="Docker keeper image"
|
||||||
|
python3 docker_server.py --tag-type ${{ env.DOCKER_TAG_TYPE }} --version ${{ env.RELEASE_TAG }} --check-name "$CHECK_NAME" --sha ${{ env.COMMIT_SHA }} ${{ ! inputs.dry-run && '--push' || '' }}
|
||||||
|
python3 ./create_release.py --set-progress-completed
|
||||||
|
- name: Update release info. Merge created PRs
|
||||||
|
shell: bash
|
||||||
|
run: |
|
||||||
|
python3 ./tests/ci/create_release.py --merge-prs ${{ inputs.dry-run == true && '--dry-run' || '' }}
|
||||||
|
- name: Set current Release progress to Completed with OK
|
||||||
|
shell: bash
|
||||||
|
run: |
|
||||||
|
# dummy stage to finalize release info with "progress: completed; status: OK"
|
||||||
|
python3 ./tests/ci/create_release.py --set-progress-started --progress "completed"
|
||||||
|
python3 ./tests/ci/create_release.py --set-progress-completed
|
||||||
|
- name: Post Slack Message
|
||||||
|
if: ${{ !cancelled() }}
|
||||||
|
shell: bash
|
||||||
|
run: |
|
||||||
|
python3 ./tests/ci/create_release.py --post-status ${{ inputs.dry-run == true && '--dry-run' || '' }}
|
||||||
|
21
.github/workflows/master.yml
vendored
21
.github/workflows/master.yml
vendored
@ -93,21 +93,21 @@ jobs:
|
|||||||
with:
|
with:
|
||||||
stage: Builds_2
|
stage: Builds_2
|
||||||
data: ${{ needs.RunConfig.outputs.data }}
|
data: ${{ needs.RunConfig.outputs.data }}
|
||||||
Tests_2:
|
Tests_2_ww:
|
||||||
needs: [RunConfig, Builds_2]
|
needs: [RunConfig, Builds_2]
|
||||||
|
if: ${{ !failure() && !cancelled() && contains(fromJson(needs.RunConfig.outputs.data).stages_data.stages_to_do, 'Tests_2_ww') }}
|
||||||
|
uses: ./.github/workflows/reusable_test_stage.yml
|
||||||
|
with:
|
||||||
|
stage: Tests_2_ww
|
||||||
|
data: ${{ needs.RunConfig.outputs.data }}
|
||||||
|
Tests_2:
|
||||||
|
# Test_3 should not wait for Test_1/Test_2 and should not be blocked by them on master branch since all jobs need to run there.
|
||||||
|
needs: [RunConfig, Builds_1]
|
||||||
if: ${{ !failure() && !cancelled() && contains(fromJson(needs.RunConfig.outputs.data).stages_data.stages_to_do, 'Tests_2') }}
|
if: ${{ !failure() && !cancelled() && contains(fromJson(needs.RunConfig.outputs.data).stages_data.stages_to_do, 'Tests_2') }}
|
||||||
uses: ./.github/workflows/reusable_test_stage.yml
|
uses: ./.github/workflows/reusable_test_stage.yml
|
||||||
with:
|
with:
|
||||||
stage: Tests_2
|
stage: Tests_2
|
||||||
data: ${{ needs.RunConfig.outputs.data }}
|
data: ${{ needs.RunConfig.outputs.data }}
|
||||||
Tests_3:
|
|
||||||
# Test_3 should not wait for Test_1/Test_2 and should not be blocked by them on master branch since all jobs need to run there.
|
|
||||||
needs: [RunConfig, Builds_1]
|
|
||||||
if: ${{ !failure() && !cancelled() && contains(fromJson(needs.RunConfig.outputs.data).stages_data.stages_to_do, 'Tests_3') }}
|
|
||||||
uses: ./.github/workflows/reusable_test_stage.yml
|
|
||||||
with:
|
|
||||||
stage: Tests_3
|
|
||||||
data: ${{ needs.RunConfig.outputs.data }}
|
|
||||||
|
|
||||||
################################# Reports #################################
|
################################# Reports #################################
|
||||||
# Reports should run even if Builds_1/2 fail - run them separately, not in Tests_1/2/3
|
# Reports should run even if Builds_1/2 fail - run them separately, not in Tests_1/2/3
|
||||||
@ -123,7 +123,7 @@ jobs:
|
|||||||
|
|
||||||
FinishCheck:
|
FinishCheck:
|
||||||
if: ${{ !cancelled() }}
|
if: ${{ !cancelled() }}
|
||||||
needs: [RunConfig, Builds_1, Builds_2, Builds_Report, Tests_1, Tests_2, Tests_3]
|
needs: [RunConfig, Builds_1, Builds_2, Builds_Report, Tests_1, Tests_2_ww, Tests_2]
|
||||||
runs-on: [self-hosted, style-checker-aarch64]
|
runs-on: [self-hosted, style-checker-aarch64]
|
||||||
steps:
|
steps:
|
||||||
- name: Check out repository code
|
- name: Check out repository code
|
||||||
@ -133,6 +133,7 @@ jobs:
|
|||||||
cd "$GITHUB_WORKSPACE/tests/ci"
|
cd "$GITHUB_WORKSPACE/tests/ci"
|
||||||
python3 finish_check.py --wf-status ${{ contains(needs.*.result, 'failure') && 'failure' || 'success' }}
|
python3 finish_check.py --wf-status ${{ contains(needs.*.result, 'failure') && 'failure' || 'success' }}
|
||||||
- name: Check Workflow results
|
- name: Check Workflow results
|
||||||
|
if: ${{ !cancelled() }}
|
||||||
run: |
|
run: |
|
||||||
export WORKFLOW_RESULT_FILE="/tmp/workflow_results.json"
|
export WORKFLOW_RESULT_FILE="/tmp/workflow_results.json"
|
||||||
cat > "$WORKFLOW_RESULT_FILE" << 'EOF'
|
cat > "$WORKFLOW_RESULT_FILE" << 'EOF'
|
||||||
|
38
.github/workflows/pull_request.yml
vendored
38
.github/workflows/pull_request.yml
vendored
@ -123,27 +123,32 @@ jobs:
|
|||||||
stage: Builds_2
|
stage: Builds_2
|
||||||
data: ${{ needs.RunConfig.outputs.data }}
|
data: ${{ needs.RunConfig.outputs.data }}
|
||||||
# stage for running non-required checks without being blocked by required checks (Test_1) if corresponding settings is selected
|
# stage for running non-required checks without being blocked by required checks (Test_1) if corresponding settings is selected
|
||||||
Tests_2:
|
Tests_2_ww:
|
||||||
needs: [RunConfig, Builds_1]
|
needs: [RunConfig, Builds_1]
|
||||||
|
if: ${{ !failure() && !cancelled() && contains(fromJson(needs.RunConfig.outputs.data).stages_data.stages_to_do, 'Tests_2_ww') }}
|
||||||
|
uses: ./.github/workflows/reusable_test_stage.yml
|
||||||
|
with:
|
||||||
|
stage: Tests_2_ww
|
||||||
|
data: ${{ needs.RunConfig.outputs.data }}
|
||||||
|
Tests_2:
|
||||||
|
needs: [RunConfig, Builds_1, Tests_1]
|
||||||
if: ${{ !failure() && !cancelled() && contains(fromJson(needs.RunConfig.outputs.data).stages_data.stages_to_do, 'Tests_2') }}
|
if: ${{ !failure() && !cancelled() && contains(fromJson(needs.RunConfig.outputs.data).stages_data.stages_to_do, 'Tests_2') }}
|
||||||
uses: ./.github/workflows/reusable_test_stage.yml
|
uses: ./.github/workflows/reusable_test_stage.yml
|
||||||
with:
|
with:
|
||||||
stage: Tests_2
|
stage: Tests_2
|
||||||
data: ${{ needs.RunConfig.outputs.data }}
|
data: ${{ needs.RunConfig.outputs.data }}
|
||||||
Tests_3:
|
|
||||||
needs: [RunConfig, Builds_1, Tests_1]
|
|
||||||
if: ${{ !failure() && !cancelled() && contains(fromJson(needs.RunConfig.outputs.data).stages_data.stages_to_do, 'Tests_3') }}
|
|
||||||
uses: ./.github/workflows/reusable_test_stage.yml
|
|
||||||
with:
|
|
||||||
stage: Tests_3
|
|
||||||
data: ${{ needs.RunConfig.outputs.data }}
|
|
||||||
|
|
||||||
################################# Reports #################################
|
################################# Reports #################################
|
||||||
# Reports should run even if Builds_1/2 fail - run them separately (not in Tests_1/2/3)
|
# Reports should run even if Builds_1/2 fail - run them separately (not in Tests_1/2/3)
|
||||||
Builds_Report:
|
Builds_Report:
|
||||||
# run report check for failed builds to indicate the CI error
|
# run report check for failed builds to indicate the CI error
|
||||||
if: ${{ !cancelled() && needs.RunConfig.result == 'success' && contains(fromJson(needs.RunConfig.outputs.data).jobs_data.jobs_to_do, 'Builds') }}
|
if: ${{ !cancelled()
|
||||||
needs: [RunConfig, StyleCheck, Builds_1, Builds_2]
|
&& needs.RunConfig.result == 'success'
|
||||||
|
&& needs.StyleCheck.result != 'failure'
|
||||||
|
&& needs.FastTest.result != 'failure'
|
||||||
|
&& needs.BuildDockers.result != 'failure'
|
||||||
|
&& contains(fromJson(needs.RunConfig.outputs.data).jobs_data.jobs_to_do, 'Builds') }}
|
||||||
|
needs: [RunConfig, BuildDockers, StyleCheck, FastTest, Builds_1, Builds_2]
|
||||||
uses: ./.github/workflows/reusable_test.yml
|
uses: ./.github/workflows/reusable_test.yml
|
||||||
with:
|
with:
|
||||||
test_name: Builds
|
test_name: Builds
|
||||||
@ -154,7 +159,7 @@ jobs:
|
|||||||
if: ${{ !cancelled() }}
|
if: ${{ !cancelled() }}
|
||||||
# Test_2 or Test_3 do not have the jobs required for Mergeable check,
|
# Test_2 or Test_3 do not have the jobs required for Mergeable check,
|
||||||
# however, set them as "needs" to get all checks results before the automatic merge occurs.
|
# however, set them as "needs" to get all checks results before the automatic merge occurs.
|
||||||
needs: [RunConfig, BuildDockers, StyleCheck, FastTest, Builds_1, Builds_2, Builds_Report, Tests_1, Tests_2, Tests_3]
|
needs: [RunConfig, BuildDockers, StyleCheck, FastTest, Builds_1, Builds_2, Builds_Report, Tests_1, Tests_2_ww, Tests_2]
|
||||||
runs-on: [self-hosted, style-checker-aarch64]
|
runs-on: [self-hosted, style-checker-aarch64]
|
||||||
steps:
|
steps:
|
||||||
- name: Check out repository code
|
- name: Check out repository code
|
||||||
@ -167,18 +172,15 @@ jobs:
|
|||||||
cd "$GITHUB_WORKSPACE/tests/ci"
|
cd "$GITHUB_WORKSPACE/tests/ci"
|
||||||
python3 merge_pr.py --set-ci-status --wf-status ${{ contains(needs.*.result, 'failure') && 'failure' || 'success' }}
|
python3 merge_pr.py --set-ci-status --wf-status ${{ contains(needs.*.result, 'failure') && 'failure' || 'success' }}
|
||||||
- name: Check Workflow results
|
- name: Check Workflow results
|
||||||
run: |
|
uses: ./.github/actions/check_workflow
|
||||||
export WORKFLOW_RESULT_FILE="/tmp/workflow_results.json"
|
with:
|
||||||
cat > "$WORKFLOW_RESULT_FILE" << 'EOF'
|
needs: ${{ toJson(needs) }}
|
||||||
${{ toJson(needs) }}
|
|
||||||
EOF
|
|
||||||
python3 ./tests/ci/ci_buddy.py --check-wf-status
|
|
||||||
|
|
||||||
################################# Stage Final #################################
|
################################# Stage Final #################################
|
||||||
#
|
#
|
||||||
FinishCheck:
|
FinishCheck:
|
||||||
if: ${{ !failure() && !cancelled() }}
|
if: ${{ !failure() && !cancelled() }}
|
||||||
needs: [RunConfig, BuildDockers, StyleCheck, FastTest, Builds_1, Builds_2, Builds_Report, Tests_1, Tests_2, Tests_3]
|
needs: [RunConfig, BuildDockers, StyleCheck, FastTest, Builds_1, Builds_2, Builds_Report, Tests_1, Tests_2_ww, Tests_2]
|
||||||
runs-on: [self-hosted, style-checker-aarch64]
|
runs-on: [self-hosted, style-checker-aarch64]
|
||||||
steps:
|
steps:
|
||||||
- name: Check out repository code
|
- name: Check out repository code
|
||||||
|
69
.github/workflows/release.yml
vendored
69
.github/workflows/release.yml
vendored
@ -1,69 +0,0 @@
|
|||||||
name: PublishedReleaseCI
|
|
||||||
# - Gets artifacts from S3
|
|
||||||
# - Sends it to JFROG Artifactory
|
|
||||||
# - Adds them to the release assets
|
|
||||||
|
|
||||||
on: # yamllint disable-line rule:truthy
|
|
||||||
release:
|
|
||||||
types:
|
|
||||||
- published
|
|
||||||
workflow_dispatch:
|
|
||||||
inputs:
|
|
||||||
tag:
|
|
||||||
description: 'Release tag'
|
|
||||||
required: true
|
|
||||||
type: string
|
|
||||||
|
|
||||||
jobs:
|
|
||||||
ReleasePublish:
|
|
||||||
runs-on: [self-hosted, style-checker]
|
|
||||||
steps:
|
|
||||||
- name: Set tag from input
|
|
||||||
if: github.event_name == 'workflow_dispatch'
|
|
||||||
run: |
|
|
||||||
echo "GITHUB_TAG=${{ github.event.inputs.tag }}" >> "$GITHUB_ENV"
|
|
||||||
- name: Set tag from REF
|
|
||||||
if: github.event_name == 'release'
|
|
||||||
run: |
|
|
||||||
echo "GITHUB_TAG=${GITHUB_REF#refs/tags/}" >> "$GITHUB_ENV"
|
|
||||||
- name: Deploy packages and assets
|
|
||||||
run: |
|
|
||||||
curl --silent --data '' --no-buffer \
|
|
||||||
'${{ secrets.PACKAGES_RELEASE_URL }}/release/'"${GITHUB_TAG}"'?binary=binary_darwin&binary=binary_darwin_aarch64&sync=true'
|
|
||||||
############################################################################################
|
|
||||||
##################################### Docker images #######################################
|
|
||||||
############################################################################################
|
|
||||||
DockerServerImages:
|
|
||||||
runs-on: [self-hosted, style-checker]
|
|
||||||
steps:
|
|
||||||
- name: Set tag from input
|
|
||||||
if: github.event_name == 'workflow_dispatch'
|
|
||||||
run: |
|
|
||||||
echo "GITHUB_TAG=${{ github.event.inputs.tag }}" >> "$GITHUB_ENV"
|
|
||||||
- name: Set tag from REF
|
|
||||||
if: github.event_name == 'release'
|
|
||||||
run: |
|
|
||||||
echo "GITHUB_TAG=${GITHUB_REF#refs/tags/}" >> "$GITHUB_ENV"
|
|
||||||
- name: Check out repository code
|
|
||||||
uses: ClickHouse/checkout@v1
|
|
||||||
with:
|
|
||||||
clear-repository: true
|
|
||||||
fetch-depth: 0 # otherwise we will have no version info
|
|
||||||
filter: tree:0
|
|
||||||
ref: ${{ env.GITHUB_TAG }}
|
|
||||||
- name: Check docker clickhouse/clickhouse-server building
|
|
||||||
run: |
|
|
||||||
cd "$GITHUB_WORKSPACE/tests/ci"
|
|
||||||
export CHECK_NAME="Docker server image"
|
|
||||||
python3 docker_server.py --release-type auto --version "$GITHUB_TAG" --check-name "$CHECK_NAME" --push
|
|
||||||
- name: Check docker clickhouse/clickhouse-keeper building
|
|
||||||
run: |
|
|
||||||
cd "$GITHUB_WORKSPACE/tests/ci"
|
|
||||||
export CHECK_NAME="Docker keeper image"
|
|
||||||
python3 docker_server.py --release-type auto --version "$GITHUB_TAG" --check-name "$CHECK_NAME" --push
|
|
||||||
- name: Cleanup
|
|
||||||
if: always()
|
|
||||||
run: |
|
|
||||||
docker ps --quiet | xargs --no-run-if-empty docker kill ||:
|
|
||||||
docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||:
|
|
||||||
sudo rm -fr "$TEMP_PATH"
|
|
74
.github/workflows/tags_stable.yml
vendored
74
.github/workflows/tags_stable.yml
vendored
@ -1,74 +0,0 @@
|
|||||||
name: TagsStableWorkflow
|
|
||||||
# - Gets artifacts from S3
|
|
||||||
# - Sends it to JFROG Artifactory
|
|
||||||
# - Adds them to the release assets
|
|
||||||
|
|
||||||
env:
|
|
||||||
# Force the stdout and stderr streams to be unbuffered
|
|
||||||
PYTHONUNBUFFERED: 1
|
|
||||||
|
|
||||||
on: # yamllint disable-line rule:truthy
|
|
||||||
push:
|
|
||||||
tags:
|
|
||||||
- 'v*-prestable'
|
|
||||||
- 'v*-stable'
|
|
||||||
- 'v*-lts'
|
|
||||||
workflow_dispatch:
|
|
||||||
inputs:
|
|
||||||
tag:
|
|
||||||
description: 'Test tag'
|
|
||||||
required: true
|
|
||||||
type: string
|
|
||||||
|
|
||||||
|
|
||||||
jobs:
|
|
||||||
UpdateVersions:
|
|
||||||
runs-on: [self-hosted, style-checker]
|
|
||||||
steps:
|
|
||||||
- name: Set test tag
|
|
||||||
if: github.event_name == 'workflow_dispatch'
|
|
||||||
run: |
|
|
||||||
echo "GITHUB_TAG=${{ github.event.inputs.tag }}" >> "$GITHUB_ENV"
|
|
||||||
- name: Get tag name
|
|
||||||
if: github.event_name != 'workflow_dispatch'
|
|
||||||
run: |
|
|
||||||
echo "GITHUB_TAG=${GITHUB_REF#refs/tags/}" >> "$GITHUB_ENV"
|
|
||||||
- name: Check out repository code
|
|
||||||
uses: ClickHouse/checkout@v1
|
|
||||||
with:
|
|
||||||
ref: master
|
|
||||||
fetch-depth: 0
|
|
||||||
filter: tree:0
|
|
||||||
- name: Update versions, docker version, changelog, security
|
|
||||||
env:
|
|
||||||
GITHUB_TOKEN: ${{ secrets.ROBOT_CLICKHOUSE_COMMIT_TOKEN }}
|
|
||||||
run: |
|
|
||||||
./utils/list-versions/list-versions.sh > ./utils/list-versions/version_date.tsv
|
|
||||||
./utils/list-versions/update-docker-version.sh
|
|
||||||
GID=$(id -g "${UID}")
|
|
||||||
# --network=host and CI=1 are required for the S3 access from a container
|
|
||||||
docker run -u "${UID}:${GID}" -e PYTHONUNBUFFERED=1 -e CI=1 --network=host \
|
|
||||||
--volume="${GITHUB_WORKSPACE}:/ClickHouse" clickhouse/style-test \
|
|
||||||
/ClickHouse/tests/ci/changelog.py -v --debug-helpers \
|
|
||||||
--gh-user-or-token="$GITHUB_TOKEN" --jobs=5 \
|
|
||||||
--output="/ClickHouse/docs/changelogs/${GITHUB_TAG}.md" "${GITHUB_TAG}"
|
|
||||||
git add "./docs/changelogs/${GITHUB_TAG}.md"
|
|
||||||
python3 ./utils/security-generator/generate_security.py > SECURITY.md
|
|
||||||
git diff HEAD
|
|
||||||
- name: Create Pull Request
|
|
||||||
uses: peter-evans/create-pull-request@v6
|
|
||||||
with:
|
|
||||||
author: "robot-clickhouse <robot-clickhouse@users.noreply.github.com>"
|
|
||||||
token: ${{ secrets.ROBOT_CLICKHOUSE_COMMIT_TOKEN }}
|
|
||||||
committer: "robot-clickhouse <robot-clickhouse@users.noreply.github.com>"
|
|
||||||
commit-message: Update version_date.tsv and changelogs after ${{ env.GITHUB_TAG }}
|
|
||||||
branch: auto/${{ env.GITHUB_TAG }}
|
|
||||||
assignees: ${{ github.event.sender.login }} # assign the PR to the tag pusher
|
|
||||||
delete-branch: true
|
|
||||||
title: Update version_date.tsv and changelogs after ${{ env.GITHUB_TAG }}
|
|
||||||
labels: do not test
|
|
||||||
body: |
|
|
||||||
Update version_date.tsv and changelogs after ${{ env.GITHUB_TAG }}
|
|
||||||
|
|
||||||
### Changelog category (leave one):
|
|
||||||
- Not for changelog (changelog entry is not required)
|
|
3
.gitmodules
vendored
3
.gitmodules
vendored
@ -372,3 +372,6 @@
|
|||||||
[submodule "contrib/double-conversion"]
|
[submodule "contrib/double-conversion"]
|
||||||
path = contrib/double-conversion
|
path = contrib/double-conversion
|
||||||
url = https://github.com/ClickHouse/double-conversion.git
|
url = https://github.com/ClickHouse/double-conversion.git
|
||||||
|
[submodule "contrib/numactl"]
|
||||||
|
path = contrib/numactl
|
||||||
|
url = https://github.com/ClickHouse/numactl.git
|
||||||
|
64
CHANGELOG.md
64
CHANGELOG.md
@ -18,10 +18,10 @@
|
|||||||
* Metric `KeeperOutstandingRequets` was renamed to `KeeperOutstandingRequests`. [#66206](https://github.com/ClickHouse/ClickHouse/pull/66206) ([Robert Schulze](https://github.com/rschu1ze)).
|
* Metric `KeeperOutstandingRequets` was renamed to `KeeperOutstandingRequests`. [#66206](https://github.com/ClickHouse/ClickHouse/pull/66206) ([Robert Schulze](https://github.com/rschu1ze)).
|
||||||
* Remove `is_deterministic` field from the `system.functions` table. [#66630](https://github.com/ClickHouse/ClickHouse/pull/66630) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
* Remove `is_deterministic` field from the `system.functions` table. [#66630](https://github.com/ClickHouse/ClickHouse/pull/66630) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||||
* Function `tuple` will now try to construct named tuples in query (controlled by `enable_named_columns_in_function_tuple`). Introduce function `tupleNames` to extract names from tuples. [#54881](https://github.com/ClickHouse/ClickHouse/pull/54881) ([Amos Bird](https://github.com/amosbird)).
|
* Function `tuple` will now try to construct named tuples in query (controlled by `enable_named_columns_in_function_tuple`). Introduce function `tupleNames` to extract names from tuples. [#54881](https://github.com/ClickHouse/ClickHouse/pull/54881) ([Amos Bird](https://github.com/amosbird)).
|
||||||
|
* Change how deduplication for Materialized Views works. Fixed a lot of cases like: - on destination table: data is split for 2 or more blocks and that blocks is considered as duplicate when that block is inserted in parallel. - on MV destination table: the equal blocks are deduplicated, that happens when MV often produces equal data as a result for different input data due to performing aggregation. - on MV destination table: the equal blocks which comes from different MV are deduplicated. [#61601](https://github.com/ClickHouse/ClickHouse/pull/61601) ([Sema Checherinda](https://github.com/CheSema)).
|
||||||
|
|
||||||
#### New Feature
|
#### New Feature
|
||||||
* Add `ASOF JOIN` support for `full_sorting_join` algorithm. [#55051](https://github.com/ClickHouse/ClickHouse/pull/55051) ([vdimir](https://github.com/vdimir)).
|
* Add `ASOF JOIN` support for `full_sorting_join` algorithm. [#55051](https://github.com/ClickHouse/ClickHouse/pull/55051) ([vdimir](https://github.com/vdimir)).
|
||||||
* Add new window function `percent_rank`. [#62747](https://github.com/ClickHouse/ClickHouse/pull/62747) ([lgbo](https://github.com/lgbo-ustc)).
|
|
||||||
* Support JWT authentication in `clickhouse-client` (will be available only in ClickHouse Cloud). [#62829](https://github.com/ClickHouse/ClickHouse/pull/62829) ([Konstantin Bogdanov](https://github.com/thevar1able)).
|
* Support JWT authentication in `clickhouse-client` (will be available only in ClickHouse Cloud). [#62829](https://github.com/ClickHouse/ClickHouse/pull/62829) ([Konstantin Bogdanov](https://github.com/thevar1able)).
|
||||||
* Add SQL functions `changeYear`, `changeMonth`, `changeDay`, `changeHour`, `changeMinute`, `changeSecond`. For example, `SELECT changeMonth(toDate('2024-06-14'), 7)` returns date `2024-07-14`. [#63186](https://github.com/ClickHouse/ClickHouse/pull/63186) ([cucumber95](https://github.com/cucumber95)).
|
* Add SQL functions `changeYear`, `changeMonth`, `changeDay`, `changeHour`, `changeMinute`, `changeSecond`. For example, `SELECT changeMonth(toDate('2024-06-14'), 7)` returns date `2024-07-14`. [#63186](https://github.com/ClickHouse/ClickHouse/pull/63186) ([cucumber95](https://github.com/cucumber95)).
|
||||||
* Introduce startup scripts, which allow the execution of preconfigured queries at the startup stage. [#64889](https://github.com/ClickHouse/ClickHouse/pull/64889) ([pufit](https://github.com/pufit)).
|
* Introduce startup scripts, which allow the execution of preconfigured queries at the startup stage. [#64889](https://github.com/ClickHouse/ClickHouse/pull/64889) ([pufit](https://github.com/pufit)).
|
||||||
@ -32,23 +32,23 @@
|
|||||||
* Add a new setting to disable/enable writing page index into parquet files. [#65475](https://github.com/ClickHouse/ClickHouse/pull/65475) ([lgbo](https://github.com/lgbo-ustc)).
|
* Add a new setting to disable/enable writing page index into parquet files. [#65475](https://github.com/ClickHouse/ClickHouse/pull/65475) ([lgbo](https://github.com/lgbo-ustc)).
|
||||||
* Introduce `logger.console_log_level` server config to control the log level to the console (if enabled). [#65559](https://github.com/ClickHouse/ClickHouse/pull/65559) ([Azat Khuzhin](https://github.com/azat)).
|
* Introduce `logger.console_log_level` server config to control the log level to the console (if enabled). [#65559](https://github.com/ClickHouse/ClickHouse/pull/65559) ([Azat Khuzhin](https://github.com/azat)).
|
||||||
* Automatically append a wildcard `*` to the end of a directory path with table function `file`. [#66019](https://github.com/ClickHouse/ClickHouse/pull/66019) ([Zhidong (David) Guo](https://github.com/Gun9niR)).
|
* Automatically append a wildcard `*` to the end of a directory path with table function `file`. [#66019](https://github.com/ClickHouse/ClickHouse/pull/66019) ([Zhidong (David) Guo](https://github.com/Gun9niR)).
|
||||||
* Add `--memory-usage` option to client in non interactive mode. [#66393](https://github.com/ClickHouse/ClickHouse/pull/66393) ([vdimir](https://github.com/vdimir)).
|
* Add `--memory-usage` option to client in non-interactive mode. [#66393](https://github.com/ClickHouse/ClickHouse/pull/66393) ([vdimir](https://github.com/vdimir)).
|
||||||
* Make an interactive client for clickhouse-disks, add local disk from the local directory. [#64446](https://github.com/ClickHouse/ClickHouse/pull/64446) ([Daniil Ivanik](https://github.com/divanik)).
|
* Make an interactive client for clickhouse-disks, add local disk from the local directory. [#64446](https://github.com/ClickHouse/ClickHouse/pull/64446) ([Daniil Ivanik](https://github.com/divanik)).
|
||||||
* When lightweight delete happens on a table with projection(s), users have choices either throw an exception (by default) or drop the projection [#65594](https://github.com/ClickHouse/ClickHouse/pull/65594) ([jsc0218](https://github.com/jsc0218)).
|
* When lightweight delete happens on a table with projection(s), users have choices either throw an exception (by default) or drop the projection [#65594](https://github.com/ClickHouse/ClickHouse/pull/65594) ([jsc0218](https://github.com/jsc0218)).
|
||||||
|
* Add system tables with main information about all detached tables. [#65400](https://github.com/ClickHouse/ClickHouse/pull/65400) ([Konstantin Morozov](https://github.com/k-morozov)).
|
||||||
|
|
||||||
#### Experimental Feature
|
#### Experimental Feature
|
||||||
* Change binary serialization of Variant data type: add `compact` mode to avoid writing the same discriminator multiple times for granules with single variant or with only NULL values. Add MergeTree setting `use_compact_variant_discriminators_serialization` that is enabled by default. Note that Variant type is still experimental and backward-incompatible change in serialization is ok. [#62774](https://github.com/ClickHouse/ClickHouse/pull/62774) ([Kruglov Pavel](https://github.com/Avogar)).
|
* Change binary serialization of the `Variant` data type: add `compact` mode to avoid writing the same discriminator multiple times for granules with single variant or with only NULL values. Add MergeTree setting `use_compact_variant_discriminators_serialization` that is enabled by default. Note that Variant type is still experimental and backward-incompatible change in serialization is ok. [#62774](https://github.com/ClickHouse/ClickHouse/pull/62774) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||||
* Support rocksdb as backend storage of keeper. [#56626](https://github.com/ClickHouse/ClickHouse/pull/56626) ([Han Fei](https://github.com/hanfei1991)).
|
* Support on-disk backend storage for clickhouse-keeper. [#56626](https://github.com/ClickHouse/ClickHouse/pull/56626) ([Han Fei](https://github.com/hanfei1991)).
|
||||||
* Refactor JSONExtract functions, support more types including experimental Dynamic type. [#66046](https://github.com/ClickHouse/ClickHouse/pull/66046) ([Kruglov Pavel](https://github.com/Avogar)).
|
* Refactor JSONExtract functions, support more types including experimental Dynamic type. [#66046](https://github.com/ClickHouse/ClickHouse/pull/66046) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||||
* Support null map subcolumn for Variant and Dynamic subcolumns. [#66178](https://github.com/ClickHouse/ClickHouse/pull/66178) ([Kruglov Pavel](https://github.com/Avogar)).
|
* Support null map subcolumn for `Variant` and `Dynamic` subcolumns. [#66178](https://github.com/ClickHouse/ClickHouse/pull/66178) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||||
* Fix reading dynamic subcolumns from altered Memory table. Previously if `max_types` parameter of a Dynamic type was changed in Memory table via alter, further subcolumns reading can return wrong result. [#66066](https://github.com/ClickHouse/ClickHouse/pull/66066) ([Kruglov Pavel](https://github.com/Avogar)).
|
* Fix reading `Dynamic` subcolumns from altered `Memory` table. Previously if `max_types` parameter of a Dynamic type was changed in Memory table via alter, further subcolumns reading can return wrong result. [#66066](https://github.com/ClickHouse/ClickHouse/pull/66066) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||||
* Add support for `cluster_for_parallel_replicas` when using custom key parallel replicas. It allows you to use parallel replicas with custom key with MergeTree tables. [#65453](https://github.com/ClickHouse/ClickHouse/pull/65453) ([Antonio Andelic](https://github.com/antonio2368)).
|
* Add support for `cluster_for_parallel_replicas` when using custom key parallel replicas. It allows you to use parallel replicas with custom key with MergeTree tables. [#65453](https://github.com/ClickHouse/ClickHouse/pull/65453) ([Antonio Andelic](https://github.com/antonio2368)).
|
||||||
|
|
||||||
#### Performance Improvement
|
#### Performance Improvement
|
||||||
* Enable `optimize_functions_to_subcolumns` by default. [#58661](https://github.com/ClickHouse/ClickHouse/pull/58661) ([Anton Popov](https://github.com/CurtizJ)).
|
|
||||||
* Replace int to string algorithm with a faster one (from a modified amdn/itoa to a modified jeaiii/itoa). [#61661](https://github.com/ClickHouse/ClickHouse/pull/61661) ([Raúl Marín](https://github.com/Algunenano)).
|
* Replace int to string algorithm with a faster one (from a modified amdn/itoa to a modified jeaiii/itoa). [#61661](https://github.com/ClickHouse/ClickHouse/pull/61661) ([Raúl Marín](https://github.com/Algunenano)).
|
||||||
* Sizes of hash tables created by join (`parallel_hash` algorithm) is collected and cached now. This information will be used to preallocate space in hash tables for subsequent query executions and save time on hash table resizes. [#64553](https://github.com/ClickHouse/ClickHouse/pull/64553) ([Nikita Taranov](https://github.com/nickitat)).
|
* Sizes of hash tables created by join (`parallel_hash` algorithm) are collected and cached now. This information will be used to preallocate space in hash tables for subsequent query executions and save time on hash table resizes. [#64553](https://github.com/ClickHouse/ClickHouse/pull/64553) ([Nikita Taranov](https://github.com/nickitat)).
|
||||||
* Optimized queries with `ORDER BY` primary key and `WHERE` that have a condition with high selectivity by using of buffering. It is controlled by setting `read_in_order_use_buffering` (enabled by default) and can increase memory usage of query. [#64607](https://github.com/ClickHouse/ClickHouse/pull/64607) ([Anton Popov](https://github.com/CurtizJ)).
|
* Optimized queries with `ORDER BY` primary key and `WHERE` that have a condition with high selectivity by using buffering. It is controlled by setting `read_in_order_use_buffering` (enabled by default) and can increase memory usage of query. [#64607](https://github.com/ClickHouse/ClickHouse/pull/64607) ([Anton Popov](https://github.com/CurtizJ)).
|
||||||
* Improve performance of loading `plain_rewritable` metadata. [#65634](https://github.com/ClickHouse/ClickHouse/pull/65634) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
* Improve performance of loading `plain_rewritable` metadata. [#65634](https://github.com/ClickHouse/ClickHouse/pull/65634) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||||
* Attaching tables on read-only disks will use fewer resources by not loading outdated parts. [#65635](https://github.com/ClickHouse/ClickHouse/pull/65635) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
* Attaching tables on read-only disks will use fewer resources by not loading outdated parts. [#65635](https://github.com/ClickHouse/ClickHouse/pull/65635) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||||
* Support minmax hyperrectangle for Set indices. [#65676](https://github.com/ClickHouse/ClickHouse/pull/65676) ([AntiTopQuark](https://github.com/AntiTopQuark)).
|
* Support minmax hyperrectangle for Set indices. [#65676](https://github.com/ClickHouse/ClickHouse/pull/65676) ([AntiTopQuark](https://github.com/AntiTopQuark)).
|
||||||
@ -60,11 +60,10 @@
|
|||||||
* DatabaseCatalog drops tables faster by using up to database_catalog_drop_table_concurrency threads. [#66065](https://github.com/ClickHouse/ClickHouse/pull/66065) ([Sema Checherinda](https://github.com/CheSema)).
|
* DatabaseCatalog drops tables faster by using up to database_catalog_drop_table_concurrency threads. [#66065](https://github.com/ClickHouse/ClickHouse/pull/66065) ([Sema Checherinda](https://github.com/CheSema)).
|
||||||
|
|
||||||
#### Improvement
|
#### Improvement
|
||||||
|
* Improved ZooKeeper load balancing. The current session doesn't expire until the optimal nodes become available despite `fallback_session_lifetime`. Added support for AZ-aware balancing. [#65570](https://github.com/ClickHouse/ClickHouse/pull/65570) ([Alexander Tokmakov](https://github.com/tavplubix)).
|
||||||
* The setting `optimize_trivial_insert_select` is disabled by default. In most cases, it should be beneficial. Nevertheless, if you are seeing slower INSERT SELECT or increased memory usage, you can enable it back or `SET compatibility = '24.6'`. [#58970](https://github.com/ClickHouse/ClickHouse/pull/58970) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
* The setting `optimize_trivial_insert_select` is disabled by default. In most cases, it should be beneficial. Nevertheless, if you are seeing slower INSERT SELECT or increased memory usage, you can enable it back or `SET compatibility = '24.6'`. [#58970](https://github.com/ClickHouse/ClickHouse/pull/58970) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||||
* Print stacktrace and diagnostic info if `clickhouse-client` or `clickhouse-local` crashes. [#61109](https://github.com/ClickHouse/ClickHouse/pull/61109) ([Alexander Tokmakov](https://github.com/tavplubix)).
|
* Print stacktrace and diagnostic info if `clickhouse-client` or `clickhouse-local` crashes. [#61109](https://github.com/ClickHouse/ClickHouse/pull/61109) ([Alexander Tokmakov](https://github.com/tavplubix)).
|
||||||
* The result of `SHOW INDEX | INDEXES | INDICES | KEYS` was previously sorted by the primary key column names. Since this was unintuitive, the result is now sorted by the position of the primary key columns within the primary key. [#61131](https://github.com/ClickHouse/ClickHouse/pull/61131) ([Robert Schulze](https://github.com/rschu1ze)).
|
* The result of `SHOW INDEX | INDEXES | INDICES | KEYS` was previously sorted by the primary key column names. Since this was unintuitive, the result is now sorted by the position of the primary key columns within the primary key. [#61131](https://github.com/ClickHouse/ClickHouse/pull/61131) ([Robert Schulze](https://github.com/rschu1ze)).
|
||||||
* Change how deduplication for Materialized Views works. Fixed a lot of cases like: - on destination table: data is split for 2 or more blocks and that blocks is considered as duplicate when that block is inserted in parallel. - on MV destination table: the equal blocks are deduplicated, that happens when MV often produces equal data as a result for different input data due to performing aggregation. - on MV destination table: the equal blocks which comes from different MV are deduplicated. [#61601](https://github.com/ClickHouse/ClickHouse/pull/61601) ([Sema Checherinda](https://github.com/CheSema)).
|
|
||||||
* Allow matching column names in a case insensitive manner when reading json files (`input_format_json_ignore_key_case`). [#61750](https://github.com/ClickHouse/ClickHouse/pull/61750) ([kevinyhzou](https://github.com/KevinyhZou)).
|
|
||||||
* Support reading partitioned data DeltaLake data. Infer DeltaLake schema by reading metadata instead of data. [#63201](https://github.com/ClickHouse/ClickHouse/pull/63201) ([Kseniia Sumarokova](https://github.com/kssenii)).
|
* Support reading partitioned data DeltaLake data. Infer DeltaLake schema by reading metadata instead of data. [#63201](https://github.com/ClickHouse/ClickHouse/pull/63201) ([Kseniia Sumarokova](https://github.com/kssenii)).
|
||||||
* In composable protocols TLS layer accepted only `certificateFile` and `privateKeyFile` parameters. https://clickhouse.com/docs/en/operations/settings/composable-protocols. [#63985](https://github.com/ClickHouse/ClickHouse/pull/63985) ([Anton Ivashkin](https://github.com/ianton-ru)).
|
* In composable protocols TLS layer accepted only `certificateFile` and `privateKeyFile` parameters. https://clickhouse.com/docs/en/operations/settings/composable-protocols. [#63985](https://github.com/ClickHouse/ClickHouse/pull/63985) ([Anton Ivashkin](https://github.com/ianton-ru)).
|
||||||
* Added profile event `SelectQueriesWithPrimaryKeyUsage` which indicates how many SELECT queries use the primary key to evaluate the WHERE clause. [#64492](https://github.com/ClickHouse/ClickHouse/pull/64492) ([0x01f](https://github.com/0xfei)).
|
* Added profile event `SelectQueriesWithPrimaryKeyUsage` which indicates how many SELECT queries use the primary key to evaluate the WHERE clause. [#64492](https://github.com/ClickHouse/ClickHouse/pull/64492) ([0x01f](https://github.com/0xfei)).
|
||||||
@ -72,7 +71,6 @@
|
|||||||
* Support aliases in parametrized view function (only new analyzer). [#65190](https://github.com/ClickHouse/ClickHouse/pull/65190) ([Kseniia Sumarokova](https://github.com/kssenii)).
|
* Support aliases in parametrized view function (only new analyzer). [#65190](https://github.com/ClickHouse/ClickHouse/pull/65190) ([Kseniia Sumarokova](https://github.com/kssenii)).
|
||||||
* Updated to mask account key in logs in azureBlobStorage. [#65273](https://github.com/ClickHouse/ClickHouse/pull/65273) ([SmitaRKulkarni](https://github.com/SmitaRKulkarni)).
|
* Updated to mask account key in logs in azureBlobStorage. [#65273](https://github.com/ClickHouse/ClickHouse/pull/65273) ([SmitaRKulkarni](https://github.com/SmitaRKulkarni)).
|
||||||
* Partition pruning for `IN` predicates when filter expression is a part of `PARTITION BY` expression. [#65335](https://github.com/ClickHouse/ClickHouse/pull/65335) ([Eduard Karacharov](https://github.com/korowa)).
|
* Partition pruning for `IN` predicates when filter expression is a part of `PARTITION BY` expression. [#65335](https://github.com/ClickHouse/ClickHouse/pull/65335) ([Eduard Karacharov](https://github.com/korowa)).
|
||||||
* Add system tables with main information about all detached tables. [#65400](https://github.com/ClickHouse/ClickHouse/pull/65400) ([Konstantin Morozov](https://github.com/k-morozov)).
|
|
||||||
* `arrayMin`/`arrayMax` can be applicable to all data types that are comparable. [#65455](https://github.com/ClickHouse/ClickHouse/pull/65455) ([pn](https://github.com/chloro-pn)).
|
* `arrayMin`/`arrayMax` can be applicable to all data types that are comparable. [#65455](https://github.com/ClickHouse/ClickHouse/pull/65455) ([pn](https://github.com/chloro-pn)).
|
||||||
* Improved memory accounting for cgroups v2 to exclude the amount occupied by the page cache. [#65470](https://github.com/ClickHouse/ClickHouse/pull/65470) ([Nikita Taranov](https://github.com/nickitat)).
|
* Improved memory accounting for cgroups v2 to exclude the amount occupied by the page cache. [#65470](https://github.com/ClickHouse/ClickHouse/pull/65470) ([Nikita Taranov](https://github.com/nickitat)).
|
||||||
* Do not create format settings for each row when serializing chunks to insert to EmbeddedRocksDB table. [#65474](https://github.com/ClickHouse/ClickHouse/pull/65474) ([Duc Canh Le](https://github.com/canhld94)).
|
* Do not create format settings for each row when serializing chunks to insert to EmbeddedRocksDB table. [#65474](https://github.com/ClickHouse/ClickHouse/pull/65474) ([Duc Canh Le](https://github.com/canhld94)).
|
||||||
@ -81,36 +79,35 @@
|
|||||||
* Disable filesystem cache background download by default. It will be enabled back when we fix the issue with possible "Memory limit exceeded" because memory deallocation is done outside of query context (while buffer is allocated inside of query context) if we use background download threads. Plus we need to add a separate setting to define max size to download for background workers (currently it is limited by max_file_segment_size, which might be too big). [#65534](https://github.com/ClickHouse/ClickHouse/pull/65534) ([Kseniia Sumarokova](https://github.com/kssenii)).
|
* Disable filesystem cache background download by default. It will be enabled back when we fix the issue with possible "Memory limit exceeded" because memory deallocation is done outside of query context (while buffer is allocated inside of query context) if we use background download threads. Plus we need to add a separate setting to define max size to download for background workers (currently it is limited by max_file_segment_size, which might be too big). [#65534](https://github.com/ClickHouse/ClickHouse/pull/65534) ([Kseniia Sumarokova](https://github.com/kssenii)).
|
||||||
* Add new option to config `<config_reload_interval_ms>` which allow to specify how often clickhouse will reload config. [#65545](https://github.com/ClickHouse/ClickHouse/pull/65545) ([alesapin](https://github.com/alesapin)).
|
* Add new option to config `<config_reload_interval_ms>` which allow to specify how often clickhouse will reload config. [#65545](https://github.com/ClickHouse/ClickHouse/pull/65545) ([alesapin](https://github.com/alesapin)).
|
||||||
* Implement binary encoding for ClickHouse data types and add its specification in docs. Use it in Dynamic binary serialization, allow to use it in RowBinaryWithNamesAndTypes and Native formats under settings. [#65546](https://github.com/ClickHouse/ClickHouse/pull/65546) ([Kruglov Pavel](https://github.com/Avogar)).
|
* Implement binary encoding for ClickHouse data types and add its specification in docs. Use it in Dynamic binary serialization, allow to use it in RowBinaryWithNamesAndTypes and Native formats under settings. [#65546](https://github.com/ClickHouse/ClickHouse/pull/65546) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||||
* Improved ZooKeeper load balancing. The current session doesn't expire until the optimal nodes become available despite `fallback_session_lifetime`. Added support for AZ-aware balancing. [#65570](https://github.com/ClickHouse/ClickHouse/pull/65570) ([Alexander Tokmakov](https://github.com/tavplubix)).
|
|
||||||
* Server settings `compiled_expression_cache_size` and `compiled_expression_cache_elements_size` are now shown in `system.server_settings`. [#65584](https://github.com/ClickHouse/ClickHouse/pull/65584) ([Robert Schulze](https://github.com/rschu1ze)).
|
* Server settings `compiled_expression_cache_size` and `compiled_expression_cache_elements_size` are now shown in `system.server_settings`. [#65584](https://github.com/ClickHouse/ClickHouse/pull/65584) ([Robert Schulze](https://github.com/rschu1ze)).
|
||||||
* Add support for user identification based on x509 SubjectAltName extension. [#65626](https://github.com/ClickHouse/ClickHouse/pull/65626) ([Anton Kozlov](https://github.com/tonickkozlov)).
|
* Add support for user identification based on x509 SubjectAltName extension. [#65626](https://github.com/ClickHouse/ClickHouse/pull/65626) ([Anton Kozlov](https://github.com/tonickkozlov)).
|
||||||
* `clickhouse-local` will respect the `max_server_memory_usage` and `max_server_memory_usage_to_ram_ratio` from the configuration file. It will also set the max memory usage to 90% of the system memory by default, like `clickhouse-server` does. [#65697](https://github.com/ClickHouse/ClickHouse/pull/65697) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
* `clickhouse-local` will respect the `max_server_memory_usage` and `max_server_memory_usage_to_ram_ratio` from the configuration file. It will also set the max memory usage to 90% of the system memory by default, like `clickhouse-server` does. [#65697](https://github.com/ClickHouse/ClickHouse/pull/65697) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||||
* Add a script to backup your files to ClickHouse. [#65699](https://github.com/ClickHouse/ClickHouse/pull/65699) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
* Add a script to backup your files to ClickHouse. [#65699](https://github.com/ClickHouse/ClickHouse/pull/65699) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||||
* PostgreSQL source support cancel. [#65722](https://github.com/ClickHouse/ClickHouse/pull/65722) ([Maksim Kita](https://github.com/kitaisreal)).
|
* PostgreSQL source to support query cancellations. [#65722](https://github.com/ClickHouse/ClickHouse/pull/65722) ([Maksim Kita](https://github.com/kitaisreal)).
|
||||||
* Make allow_experimental_analyzer be controlled by the initiator for distributed queries. This ensures compatibility and correctness during operations in mixed version clusters. [#65777](https://github.com/ClickHouse/ClickHouse/pull/65777) ([Nikita Mikhaylov](https://github.com/nikitamikhaylov)).
|
* Make `allow_experimental_analyzer` be controlled by the initiator for distributed queries. This ensures compatibility and correctness during operations in mixed version clusters. [#65777](https://github.com/ClickHouse/ClickHouse/pull/65777) ([Nikita Mikhaylov](https://github.com/nikitamikhaylov)).
|
||||||
* Respect cgroup CPU limit in Keeper. [#65819](https://github.com/ClickHouse/ClickHouse/pull/65819) ([Antonio Andelic](https://github.com/antonio2368)).
|
* Respect cgroup CPU limit in Keeper. [#65819](https://github.com/ClickHouse/ClickHouse/pull/65819) ([Antonio Andelic](https://github.com/antonio2368)).
|
||||||
* Allow to use `concat` function with empty arguments ``` sql :) select concat();. [#65887](https://github.com/ClickHouse/ClickHouse/pull/65887) ([李扬](https://github.com/taiyang-li)).
|
* Allow to use `concat` function with empty arguments `:) select concat();`. [#65887](https://github.com/ClickHouse/ClickHouse/pull/65887) ([李扬](https://github.com/taiyang-li)).
|
||||||
* Allow controlling named collections in clickhouse-local. [#65973](https://github.com/ClickHouse/ClickHouse/pull/65973) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
* Allow controlling named collections in `clickhouse-local`. [#65973](https://github.com/ClickHouse/ClickHouse/pull/65973) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||||
* Improve Azure profile events. [#65999](https://github.com/ClickHouse/ClickHouse/pull/65999) ([alesapin](https://github.com/alesapin)).
|
* Improve Azure-related profile events. [#65999](https://github.com/ClickHouse/ClickHouse/pull/65999) ([alesapin](https://github.com/alesapin)).
|
||||||
* Support ORC file read by writer time zone. [#66025](https://github.com/ClickHouse/ClickHouse/pull/66025) ([kevinyhzou](https://github.com/KevinyhZou)).
|
* Support ORC file read by writer's time zone. [#66025](https://github.com/ClickHouse/ClickHouse/pull/66025) ([kevinyhzou](https://github.com/KevinyhZou)).
|
||||||
* Add settings to control connection to the PostgreSQL. * Setting `postgresql_connection_attempt_timeout` specifies the value passed to `connect_timeout` parameter of connection URL. * Setting `postgresql_connection_pool_retries` specifies the number of retries to establish a connection to the PostgreSQL end-point. [#66232](https://github.com/ClickHouse/ClickHouse/pull/66232) ([Dmitry Novik](https://github.com/novikd)).
|
* Add settings to control connections to PostgreSQL. The setting `postgresql_connection_attempt_timeout` specifies the value passed to `connect_timeout` parameter of connection URL. The setting `postgresql_connection_pool_retries` specifies the number of retries to establish a connection to the PostgreSQL end-point. [#66232](https://github.com/ClickHouse/ClickHouse/pull/66232) ([Dmitry Novik](https://github.com/novikd)).
|
||||||
* Reduce inaccuracy of input_wait_elapsed_us/input_wait_elapsed_us/elapsed_us. [#66239](https://github.com/ClickHouse/ClickHouse/pull/66239) ([Azat Khuzhin](https://github.com/azat)).
|
* Reduce inaccuracy of `input_wait_elapsed_us`/`elapsed_us` in the `system.processors_profile_log`. [#66239](https://github.com/ClickHouse/ClickHouse/pull/66239) ([Azat Khuzhin](https://github.com/azat)).
|
||||||
* Improve FilesystemCache ProfileEvents. [#66249](https://github.com/ClickHouse/ClickHouse/pull/66249) ([zhukai](https://github.com/nauu)).
|
* Improve ProfileEvents for the filesystem cache. [#66249](https://github.com/ClickHouse/ClickHouse/pull/66249) ([zhukai](https://github.com/nauu)).
|
||||||
* Add settings to ignore ON CLUSTER clause in queries for named collection management with replicated storage. [#66288](https://github.com/ClickHouse/ClickHouse/pull/66288) ([MikhailBurdukov](https://github.com/MikhailBurdukov)).
|
* Add settings to ignore the `ON CLUSTER` clause in queries for named collection management with the replicated storage. [#66288](https://github.com/ClickHouse/ClickHouse/pull/66288) ([MikhailBurdukov](https://github.com/MikhailBurdukov)).
|
||||||
* Function `generateSnowflakeID` now allows to specify a machine ID as a parameter to prevent collisions in large clusters. [#66374](https://github.com/ClickHouse/ClickHouse/pull/66374) ([ZAWA_ll](https://github.com/Zawa-ll)).
|
* Function `generateSnowflakeID` now allows to specify a machine ID as a parameter to prevent collisions in large clusters. [#66374](https://github.com/ClickHouse/ClickHouse/pull/66374) ([ZAWA_ll](https://github.com/Zawa-ll)).
|
||||||
* Disable suspending on Ctrl+Z in interactive mode. This is a common trap and is not expected behavior for almost all users. I imagine only a few extreme power users could appreciate suspending terminal applications to the background, but I don't know any. [#66511](https://github.com/ClickHouse/ClickHouse/pull/66511) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
* Disable suspending on `Ctrl+Z` in interactive mode. This is a common trap and is not expected behavior for almost all users. I imagine only a few extreme power users could appreciate suspending terminal applications to the background, but I don't know any. [#66511](https://github.com/ClickHouse/ClickHouse/pull/66511) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||||
* Add option for validating the Primary key type in Dictionaries. Without this option for simple layouts any column type will be implicitly converted to UInt64. ### Documentation entry for user-facing changes. [#66595](https://github.com/ClickHouse/ClickHouse/pull/66595) ([MikhailBurdukov](https://github.com/MikhailBurdukov)).
|
* Add option for validating the primary key type in Dictionaries. Without this option for simple layouts any column type will be implicitly converted to UInt64. [#66595](https://github.com/ClickHouse/ClickHouse/pull/66595) ([MikhailBurdukov](https://github.com/MikhailBurdukov)).
|
||||||
|
|
||||||
#### Bug Fix (user-visible misbehavior in an official stable release)
|
#### Bug Fix (user-visible misbehavior in an official stable release)
|
||||||
* Fix unexpected size of low cardinality column in function calls. [#65298](https://github.com/ClickHouse/ClickHouse/pull/65298) ([Raúl Marín](https://github.com/Algunenano)).
|
|
||||||
* Check cyclic dependencies on CREATE/REPLACE/RENAME/EXCHANGE queries and throw an exception if there is a cyclic dependency. Previously such cyclic dependencies could lead to a deadlock during server startup. Also fix some bugs in dependencies creation. [#65405](https://github.com/ClickHouse/ClickHouse/pull/65405) ([Kruglov Pavel](https://github.com/Avogar)).
|
* Check cyclic dependencies on CREATE/REPLACE/RENAME/EXCHANGE queries and throw an exception if there is a cyclic dependency. Previously such cyclic dependencies could lead to a deadlock during server startup. Also fix some bugs in dependencies creation. [#65405](https://github.com/ClickHouse/ClickHouse/pull/65405) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||||
|
* Fix unexpected sizes of `LowCardinality` columns in function calls. [#65298](https://github.com/ClickHouse/ClickHouse/pull/65298) ([Raúl Marín](https://github.com/Algunenano)).
|
||||||
* Fix crash in maxIntersections. [#65689](https://github.com/ClickHouse/ClickHouse/pull/65689) ([Raúl Marín](https://github.com/Algunenano)).
|
* Fix crash in maxIntersections. [#65689](https://github.com/ClickHouse/ClickHouse/pull/65689) ([Raúl Marín](https://github.com/Algunenano)).
|
||||||
* Fix the VALID UNTIL clause in the user definition resetting after a restart. [#66409](https://github.com/ClickHouse/ClickHouse/pull/66409) ([Nikolay Degterinsky](https://github.com/evillique)).
|
* Fix the `VALID UNTIL` clause in the user definition resetting after a restart. [#66409](https://github.com/ClickHouse/ClickHouse/pull/66409) ([Nikolay Degterinsky](https://github.com/evillique)).
|
||||||
* Fix SHOW MERGES remaining time. [#66735](https://github.com/ClickHouse/ClickHouse/pull/66735) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
* Fix the remaining time column in `SHOW MERGES`. [#66735](https://github.com/ClickHouse/ClickHouse/pull/66735) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||||
* `Query was cancelled` might have been printed twice in clickhouse-client. This behaviour is fixed. [#66005](https://github.com/ClickHouse/ClickHouse/pull/66005) ([Nikita Mikhaylov](https://github.com/nikitamikhaylov)).
|
* `Query was cancelled` might have been printed twice in clickhouse-client. This behaviour is fixed. [#66005](https://github.com/ClickHouse/ClickHouse/pull/66005) ([Nikita Mikhaylov](https://github.com/nikitamikhaylov)).
|
||||||
* Fixed crash while using MaterializedMySQL with TABLE OVERRIDE that maps MySQL NULL field into ClickHouse not NULL field. [#54649](https://github.com/ClickHouse/ClickHouse/pull/54649) ([Filipp Ozinov](https://github.com/bakwc)).
|
* Fixed crash while using `MaterializedMySQL` (which is an unsupported, experimental feature) with TABLE OVERRIDE that maps MySQL NULL field into ClickHouse not NULL field. [#54649](https://github.com/ClickHouse/ClickHouse/pull/54649) ([Filipp Ozinov](https://github.com/bakwc)).
|
||||||
* Fix logical error when PREWHERE expression read no columns and table has no adaptive index granularity (very old table). [#59173](https://github.com/ClickHouse/ClickHouse/pull/59173) ([Alexander Gololobov](https://github.com/davenger)).
|
* Fix logical error when `PREWHERE` expression read no columns and table has no adaptive index granularity (very old table). [#59173](https://github.com/ClickHouse/ClickHouse/pull/59173) ([Alexander Gololobov](https://github.com/davenger)).
|
||||||
* Fix bug with cancellation buffer when canceling a query. [#64478](https://github.com/ClickHouse/ClickHouse/pull/64478) ([Sema Checherinda](https://github.com/CheSema)).
|
* Fix bug with the cancellation buffer when canceling a query. [#64478](https://github.com/ClickHouse/ClickHouse/pull/64478) ([Sema Checherinda](https://github.com/CheSema)).
|
||||||
* Fix filling parts columns from metadata (when columns.txt does not exists). [#64757](https://github.com/ClickHouse/ClickHouse/pull/64757) ([Azat Khuzhin](https://github.com/azat)).
|
* Fix filling parts columns from metadata (when columns.txt does not exists). [#64757](https://github.com/ClickHouse/ClickHouse/pull/64757) ([Azat Khuzhin](https://github.com/azat)).
|
||||||
* Fix crash for `ALTER TABLE ... ON CLUSTER ... MODIFY SQL SECURITY`. [#64957](https://github.com/ClickHouse/ClickHouse/pull/64957) ([pufit](https://github.com/pufit)).
|
* Fix crash for `ALTER TABLE ... ON CLUSTER ... MODIFY SQL SECURITY`. [#64957](https://github.com/ClickHouse/ClickHouse/pull/64957) ([pufit](https://github.com/pufit)).
|
||||||
* Fix crash on destroying AccessControl: add explicit shutdown. [#64993](https://github.com/ClickHouse/ClickHouse/pull/64993) ([Vitaly Baranov](https://github.com/vitlibar)).
|
* Fix crash on destroying AccessControl: add explicit shutdown. [#64993](https://github.com/ClickHouse/ClickHouse/pull/64993) ([Vitaly Baranov](https://github.com/vitlibar)).
|
||||||
@ -157,7 +154,7 @@
|
|||||||
* Fixed how the ClickHouse server detects the maximum number of usable CPU cores as specified by cgroups v2 if the server runs in a container such as Docker. In more detail, containers often run their process in the root cgroup which has an empty name. In that case, ClickHouse ignored the CPU limits set by cgroups v2. [#66237](https://github.com/ClickHouse/ClickHouse/pull/66237) ([filimonov](https://github.com/filimonov)).
|
* Fixed how the ClickHouse server detects the maximum number of usable CPU cores as specified by cgroups v2 if the server runs in a container such as Docker. In more detail, containers often run their process in the root cgroup which has an empty name. In that case, ClickHouse ignored the CPU limits set by cgroups v2. [#66237](https://github.com/ClickHouse/ClickHouse/pull/66237) ([filimonov](https://github.com/filimonov)).
|
||||||
* Fix the `Not-ready set` error when a subquery with `IN` is used in the constraint. [#66261](https://github.com/ClickHouse/ClickHouse/pull/66261) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
|
* Fix the `Not-ready set` error when a subquery with `IN` is used in the constraint. [#66261](https://github.com/ClickHouse/ClickHouse/pull/66261) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
|
||||||
* Fix error reporting while copying to S3 or AzureBlobStorage. [#66295](https://github.com/ClickHouse/ClickHouse/pull/66295) ([Vitaly Baranov](https://github.com/vitlibar)).
|
* Fix error reporting while copying to S3 or AzureBlobStorage. [#66295](https://github.com/ClickHouse/ClickHouse/pull/66295) ([Vitaly Baranov](https://github.com/vitlibar)).
|
||||||
* Prevent watchdog from keeping descriptors of unlinked(rotated) log files. [#66334](https://github.com/ClickHouse/ClickHouse/pull/66334) ([Aleksei Filatov](https://github.com/aalexfvk)).
|
* Prevent watchdog from keeping descriptors of unlinked (rotated) log files. [#66334](https://github.com/ClickHouse/ClickHouse/pull/66334) ([Aleksei Filatov](https://github.com/aalexfvk)).
|
||||||
* Fix the bug that logicalexpressionoptimizerpass lost logical type of constant. [#66344](https://github.com/ClickHouse/ClickHouse/pull/66344) ([pn](https://github.com/chloro-pn)).
|
* Fix the bug that logicalexpressionoptimizerpass lost logical type of constant. [#66344](https://github.com/ClickHouse/ClickHouse/pull/66344) ([pn](https://github.com/chloro-pn)).
|
||||||
* Fix `Column identifier is already registered` error with `group_by_use_nulls=true` and new analyzer. [#66400](https://github.com/ClickHouse/ClickHouse/pull/66400) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
|
* Fix `Column identifier is already registered` error with `group_by_use_nulls=true` and new analyzer. [#66400](https://github.com/ClickHouse/ClickHouse/pull/66400) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
|
||||||
* Fix possible incorrect result for queries joining and filtering table external engine (like PostgreSQL), due to too aggressive filter pushdown. Since now, conditions from where section won't be send to external database in case of outer join with external table. [#66402](https://github.com/ClickHouse/ClickHouse/pull/66402) ([vdimir](https://github.com/vdimir)).
|
* Fix possible incorrect result for queries joining and filtering table external engine (like PostgreSQL), due to too aggressive filter pushdown. Since now, conditions from where section won't be send to external database in case of outer join with external table. [#66402](https://github.com/ClickHouse/ClickHouse/pull/66402) ([vdimir](https://github.com/vdimir)).
|
||||||
@ -179,9 +176,6 @@
|
|||||||
* Fix `indexHint` function case found by fuzzer. [#66286](https://github.com/ClickHouse/ClickHouse/pull/66286) ([Anton Popov](https://github.com/CurtizJ)).
|
* Fix `indexHint` function case found by fuzzer. [#66286](https://github.com/ClickHouse/ClickHouse/pull/66286) ([Anton Popov](https://github.com/CurtizJ)).
|
||||||
* Fix AST formatting of 'create table b empty as a'. [#64951](https://github.com/ClickHouse/ClickHouse/pull/64951) ([Michael Kolupaev](https://github.com/al13n321)).
|
* Fix AST formatting of 'create table b empty as a'. [#64951](https://github.com/ClickHouse/ClickHouse/pull/64951) ([Michael Kolupaev](https://github.com/al13n321)).
|
||||||
|
|
||||||
#### Build/Testing/Packaging Improvement
|
|
||||||
* Instantiate template methods ahead in different .cpp files, avoid too large translation units during compiling. [#64818](https://github.com/ClickHouse/ClickHouse/pull/64818) ([lgbo](https://github.com/lgbo-ustc)).
|
|
||||||
|
|
||||||
### <a id="246"></a> ClickHouse release 24.6, 2024-07-01
|
### <a id="246"></a> ClickHouse release 24.6, 2024-07-01
|
||||||
|
|
||||||
#### Backward Incompatible Change
|
#### Backward Incompatible Change
|
||||||
|
@ -34,17 +34,13 @@ curl https://clickhouse.com/ | sh
|
|||||||
|
|
||||||
Every month we get together with the community (users, contributors, customers, those interested in learning more about ClickHouse) to discuss what is coming in the latest release. If you are interested in sharing what you've built on ClickHouse, let us know.
|
Every month we get together with the community (users, contributors, customers, those interested in learning more about ClickHouse) to discuss what is coming in the latest release. If you are interested in sharing what you've built on ClickHouse, let us know.
|
||||||
|
|
||||||
* [v24.7 Community Call](https://clickhouse.com/company/events/v24-7-community-release-call) - Jul 30
|
* [v24.8 Community Call](https://clickhouse.com/company/events/v24-8-community-release-call) - August 29
|
||||||
|
|
||||||
## Upcoming Events
|
## Upcoming Events
|
||||||
|
|
||||||
Keep an eye out for upcoming meetups and events around the world. Somewhere else you want us to be? Please feel free to reach out to tyler `<at>` clickhouse `<dot>` com. You can also peruse [ClickHouse Events](https://clickhouse.com/company/news-events) for a list of all upcoming trainings, meetups, speaking engagements, etc.
|
Keep an eye out for upcoming meetups and events around the world. Somewhere else you want us to be? Please feel free to reach out to tyler `<at>` clickhouse `<dot>` com. You can also peruse [ClickHouse Events](https://clickhouse.com/company/news-events) for a list of all upcoming trainings, meetups, speaking engagements, etc.
|
||||||
|
|
||||||
* [ClickHouse Meetup in Paris](https://www.meetup.com/clickhouse-france-user-group/events/300783448/) - Jul 9
|
* MORE COMING SOON!
|
||||||
* [ClickHouse Cloud - Live Update Call](https://clickhouse.com/company/events/202407-cloud-update-live) - Jul 9
|
|
||||||
* [ClickHouse Meetup @ Ramp - New York City](https://www.meetup.com/clickhouse-new-york-user-group/events/300595845/) - Jul 9
|
|
||||||
* [AWS Summit in New York](https://clickhouse.com/company/events/2024-07-awssummit-nyc) - Jul 10
|
|
||||||
* [ClickHouse Meetup @ Klaviyo - Boston](https://www.meetup.com/clickhouse-boston-user-group/events/300907870) - Jul 11
|
|
||||||
|
|
||||||
## Recent Recordings
|
## Recent Recordings
|
||||||
* **Recent Meetup Videos**: [Meetup Playlist](https://www.youtube.com/playlist?list=PL0Z2YDlm0b3iNDUzpY1S3L_iV4nARda_U) Whenever possible recordings of the ClickHouse Community Meetups are edited and presented as individual talks. Current featuring "Modern SQL in 2023", "Fast, Concurrent, and Consistent Asynchronous INSERTS in ClickHouse", and "Full-Text Indices: Design and Experiments"
|
* **Recent Meetup Videos**: [Meetup Playlist](https://www.youtube.com/playlist?list=PL0Z2YDlm0b3iNDUzpY1S3L_iV4nARda_U) Whenever possible recordings of the ClickHouse Community Meetups are edited and presented as individual talks. Current featuring "Modern SQL in 2023", "Fast, Concurrent, and Consistent Asynchronous INSERTS in ClickHouse", and "Full-Text Indices: Design and Experiments"
|
||||||
|
@ -14,9 +14,10 @@ The following versions of ClickHouse server are currently supported with securit
|
|||||||
|
|
||||||
| Version | Supported |
|
| Version | Supported |
|
||||||
|:-|:-|
|
|:-|:-|
|
||||||
|
| 24.7 | ✔️ |
|
||||||
| 24.6 | ✔️ |
|
| 24.6 | ✔️ |
|
||||||
| 24.5 | ✔️ |
|
| 24.5 | ✔️ |
|
||||||
| 24.4 | ✔️ |
|
| 24.4 | ❌ |
|
||||||
| 24.3 | ✔️ |
|
| 24.3 | ✔️ |
|
||||||
| 24.2 | ❌ |
|
| 24.2 | ❌ |
|
||||||
| 24.1 | ❌ |
|
| 24.1 | ❌ |
|
||||||
|
@ -32,6 +32,7 @@ set (SRCS
|
|||||||
StringRef.cpp
|
StringRef.cpp
|
||||||
safeExit.cpp
|
safeExit.cpp
|
||||||
throwError.cpp
|
throwError.cpp
|
||||||
|
Numa.cpp
|
||||||
)
|
)
|
||||||
|
|
||||||
add_library (common ${SRCS})
|
add_library (common ${SRCS})
|
||||||
@ -46,6 +47,10 @@ if (TARGET ch_contrib::crc32_s390x)
|
|||||||
target_link_libraries(common PUBLIC ch_contrib::crc32_s390x)
|
target_link_libraries(common PUBLIC ch_contrib::crc32_s390x)
|
||||||
endif()
|
endif()
|
||||||
|
|
||||||
|
if (TARGET ch_contrib::numactl)
|
||||||
|
target_link_libraries(common PUBLIC ch_contrib::numactl)
|
||||||
|
endif()
|
||||||
|
|
||||||
target_include_directories(common PUBLIC .. "${CMAKE_CURRENT_BINARY_DIR}/..")
|
target_include_directories(common PUBLIC .. "${CMAKE_CURRENT_BINARY_DIR}/..")
|
||||||
|
|
||||||
target_link_libraries (common
|
target_link_libraries (common
|
||||||
|
37
base/base/Numa.cpp
Normal file
37
base/base/Numa.cpp
Normal file
@ -0,0 +1,37 @@
|
|||||||
|
#include <base/Numa.h>
|
||||||
|
|
||||||
|
#include "config.h"
|
||||||
|
|
||||||
|
#if USE_NUMACTL
|
||||||
|
# include <numa.h>
|
||||||
|
#endif
|
||||||
|
|
||||||
|
namespace DB
|
||||||
|
{
|
||||||
|
|
||||||
|
std::optional<size_t> getNumaNodesTotalMemory()
|
||||||
|
{
|
||||||
|
std::optional<size_t> total_memory;
|
||||||
|
#if USE_NUMACTL
|
||||||
|
if (numa_available() != -1)
|
||||||
|
{
|
||||||
|
auto * membind = numa_get_membind();
|
||||||
|
if (!numa_bitmask_equal(membind, numa_all_nodes_ptr))
|
||||||
|
{
|
||||||
|
total_memory.emplace(0);
|
||||||
|
auto max_node = numa_max_node();
|
||||||
|
for (int i = 0; i <= max_node; ++i)
|
||||||
|
{
|
||||||
|
if (numa_bitmask_isbitset(membind, i))
|
||||||
|
*total_memory += numa_node_size(i, nullptr);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
numa_bitmask_free(membind);
|
||||||
|
}
|
||||||
|
|
||||||
|
#endif
|
||||||
|
return total_memory;
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
12
base/base/Numa.h
Normal file
12
base/base/Numa.h
Normal file
@ -0,0 +1,12 @@
|
|||||||
|
#pragma once
|
||||||
|
|
||||||
|
#include <optional>
|
||||||
|
|
||||||
|
namespace DB
|
||||||
|
{
|
||||||
|
|
||||||
|
/// return total memory of NUMA nodes the process is bound to
|
||||||
|
/// if NUMA is not supported or process can use all nodes, std::nullopt is returned
|
||||||
|
std::optional<size_t> getNumaNodesTotalMemory();
|
||||||
|
|
||||||
|
}
|
@ -2,15 +2,14 @@
|
|||||||
|
|
||||||
#include <base/cgroupsv2.h>
|
#include <base/cgroupsv2.h>
|
||||||
#include <base/getPageSize.h>
|
#include <base/getPageSize.h>
|
||||||
|
#include <base/Numa.h>
|
||||||
|
|
||||||
#include <fstream>
|
#include <fstream>
|
||||||
#include <stdexcept>
|
|
||||||
|
|
||||||
#include <unistd.h>
|
#include <unistd.h>
|
||||||
#include <sys/types.h>
|
#include <sys/types.h>
|
||||||
#include <sys/param.h>
|
#include <sys/param.h>
|
||||||
|
|
||||||
|
|
||||||
namespace
|
namespace
|
||||||
{
|
{
|
||||||
|
|
||||||
@ -63,6 +62,9 @@ uint64_t getMemoryAmountOrZero()
|
|||||||
|
|
||||||
uint64_t memory_amount = num_pages * page_size;
|
uint64_t memory_amount = num_pages * page_size;
|
||||||
|
|
||||||
|
if (auto total_numa_memory = DB::getNumaNodesTotalMemory(); total_numa_memory.has_value())
|
||||||
|
memory_amount = *total_numa_memory;
|
||||||
|
|
||||||
/// Respect the memory limit set by cgroups v2.
|
/// Respect the memory limit set by cgroups v2.
|
||||||
auto limit_v2 = getCgroupsV2MemoryLimit();
|
auto limit_v2 = getCgroupsV2MemoryLimit();
|
||||||
if (limit_v2.has_value() && *limit_v2 < memory_amount)
|
if (limit_v2.has_value() && *limit_v2 < memory_amount)
|
||||||
|
@ -18,6 +18,16 @@ if (GLIBC_COMPATIBILITY)
|
|||||||
message (FATAL_ERROR "glibc_compatibility can only be used on x86_64 or aarch64.")
|
message (FATAL_ERROR "glibc_compatibility can only be used on x86_64 or aarch64.")
|
||||||
endif ()
|
endif ()
|
||||||
|
|
||||||
|
if (SANITIZE STREQUAL thread)
|
||||||
|
# Disable TSAN instrumentation that conflicts with re-exec due to high ASLR entropy using getauxval
|
||||||
|
# See longer comment in __auxv_init_procfs
|
||||||
|
# In the case of tsan we need to make sure getauxval is not instrumented as that would introduce tsan
|
||||||
|
# internal calls to functions that depend on a state that isn't initialized yet
|
||||||
|
set_source_files_properties(
|
||||||
|
musl/getauxval.c
|
||||||
|
PROPERTIES COMPILE_FLAGS "-mllvm -tsan-instrument-func-entry-exit=false")
|
||||||
|
endif()
|
||||||
|
|
||||||
# Need to omit frame pointers to match the performance of glibc
|
# Need to omit frame pointers to match the performance of glibc
|
||||||
set (CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -fomit-frame-pointer")
|
set (CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -fomit-frame-pointer")
|
||||||
|
|
||||||
|
@ -75,6 +75,44 @@ unsigned long NO_SANITIZE_THREAD __getauxval_procfs(unsigned long type)
|
|||||||
}
|
}
|
||||||
static unsigned long NO_SANITIZE_THREAD __auxv_init_procfs(unsigned long type)
|
static unsigned long NO_SANITIZE_THREAD __auxv_init_procfs(unsigned long type)
|
||||||
{
|
{
|
||||||
|
#if defined(__x86_64__) && defined(__has_feature)
|
||||||
|
# if __has_feature(memory_sanitizer) || __has_feature(thread_sanitizer)
|
||||||
|
/// Sanitizers are not compatible with high ASLR entropy, which is the default on modern Linux distributions, and
|
||||||
|
/// to workaround this limitation, TSAN and MSAN (couldn't see other sanitizers doing the same), re-exec the binary
|
||||||
|
/// without ASLR (see https://github.com/llvm/llvm-project/commit/0784b1eefa36d4acbb0dacd2d18796e26313b6c5)
|
||||||
|
|
||||||
|
/// The problem we face is that, in order to re-exec, the sanitizer wants to use the original pathname in the call
|
||||||
|
/// and to get its value it uses getauxval (https://github.com/llvm/llvm-project/blob/20eff684203287828d6722fc860b9d3621429542/compiler-rt/lib/sanitizer_common/sanitizer_linux_libcdep.cpp#L985-L988).
|
||||||
|
/// Since we provide getauxval ourselves (to minimize the version dependency on runtime glibc), we are the ones
|
||||||
|
// being called and we fail horribly:
|
||||||
|
///
|
||||||
|
/// ==301455==ERROR: MemorySanitizer: SEGV on unknown address 0x2ffc6d721550 (pc 0x5622c1cc0073 bp 0x000000000003 sp 0x7ffc6d721530 T301455)
|
||||||
|
/// ==301455==The signal is caused by a WRITE memory access.
|
||||||
|
/// #0 0x5622c1cc0073 in __auxv_init_procfs ./ClickHouse/base/glibc-compatibility/musl/getauxval.c:129:5
|
||||||
|
/// #1 0x5622c1cbffe9 in getauxval ./ClickHouse/base/glibc-compatibility/musl/getauxval.c:240:12
|
||||||
|
/// #2 0x5622c0d7bfb4 in __sanitizer::ReExec() crtstuff.c
|
||||||
|
/// #3 0x5622c0df7bfc in __msan::InitShadowWithReExec(bool) crtstuff.c
|
||||||
|
/// #4 0x5622c0d95356 in __msan_init (./ClickHouse/build_msan/contrib/google-protobuf-cmake/protoc+0x256356) (BuildId: 6411d3c88b898ba3f7d49760555977d3e61f0741)
|
||||||
|
/// #5 0x5622c0dfe878 in msan.module_ctor main.cc
|
||||||
|
/// #6 0x5622c1cc156c in __libc_csu_init (./ClickHouse/build_msan/contrib/google-protobuf-cmake/protoc+0x118256c) (BuildId: 6411d3c88b898ba3f7d49760555977d3e61f0741)
|
||||||
|
/// #7 0x73dc05dd7ea3 in __libc_start_main /usr/src/debug/glibc/glibc/csu/../csu/libc-start.c:343:6
|
||||||
|
/// #8 0x5622c0d6b7cd in _start (./ClickHouse/build_msan/contrib/google-protobuf-cmake/protoc+0x22c7cd) (BuildId: 6411d3c88b898ba3f7d49760555977d3e61f0741)
|
||||||
|
|
||||||
|
/// The source of the issue above is that, at this point in time during __msan_init, we can't really do much as
|
||||||
|
/// most global variables aren't initialized or available yet, so we can't initiate the auxiliary vector.
|
||||||
|
/// Normal glibc / musl getauxval doesn't have this problem since they initiate their auxval vector at the very
|
||||||
|
/// start of __libc_start_main (just keeping track of argv+argc+1), but we don't have such option (otherwise
|
||||||
|
/// this complexity of reading "/proc/self/auxv" or using __environ would not be necessary).
|
||||||
|
|
||||||
|
/// To avoid this crashes on the re-exec call (see above how it would fail when creating `aux`, and if we used
|
||||||
|
/// __auxv_init_environ then it would SIGSEV on READing `__environ`) we capture this call for `AT_EXECFN` and
|
||||||
|
/// unconditionally return "/proc/self/exe" without any preparation. Theoretically this should be fine in
|
||||||
|
/// our case, as we don't load any libraries. That's the theory at least.
|
||||||
|
if (type == AT_EXECFN)
|
||||||
|
return (unsigned long)"/proc/self/exe";
|
||||||
|
# endif
|
||||||
|
#endif
|
||||||
|
|
||||||
// For debugging:
|
// For debugging:
|
||||||
// - od -t dL /proc/self/auxv
|
// - od -t dL /proc/self/auxv
|
||||||
// - LD_SHOW_AUX= ls
|
// - LD_SHOW_AUX= ls
|
||||||
@ -199,7 +237,7 @@ static unsigned long NO_SANITIZE_THREAD __auxv_init_environ(unsigned long type)
|
|||||||
// - __auxv_init_procfs -> __auxv_init_environ -> __getauxval_environ
|
// - __auxv_init_procfs -> __auxv_init_environ -> __getauxval_environ
|
||||||
static void * volatile getauxval_func = (void *)__auxv_init_procfs;
|
static void * volatile getauxval_func = (void *)__auxv_init_procfs;
|
||||||
|
|
||||||
unsigned long getauxval(unsigned long type)
|
unsigned long NO_SANITIZE_THREAD getauxval(unsigned long type)
|
||||||
{
|
{
|
||||||
return ((unsigned long (*)(unsigned long))getauxval_func)(type);
|
return ((unsigned long (*)(unsigned long))getauxval_func)(type);
|
||||||
}
|
}
|
||||||
|
@ -261,6 +261,11 @@ namespace Util
|
|||||||
///
|
///
|
||||||
/// Throws a NullPointerException if no Application instance exists.
|
/// Throws a NullPointerException if no Application instance exists.
|
||||||
|
|
||||||
|
static Application * instanceRawPtr();
|
||||||
|
/// Returns a raw pointer to the Application singleton.
|
||||||
|
///
|
||||||
|
/// The caller should check whether the result is nullptr.
|
||||||
|
|
||||||
const Poco::Timestamp & startTime() const;
|
const Poco::Timestamp & startTime() const;
|
||||||
/// Returns the application start time (UTC).
|
/// Returns the application start time (UTC).
|
||||||
|
|
||||||
@ -448,6 +453,12 @@ namespace Util
|
|||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
inline Application * Application::instanceRawPtr()
|
||||||
|
{
|
||||||
|
return _pInstance;
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
inline const Poco::Timestamp & Application::startTime() const
|
inline const Poco::Timestamp & Application::startTime() const
|
||||||
{
|
{
|
||||||
return _startTime;
|
return _startTime;
|
||||||
|
@ -57,7 +57,8 @@ option(WITH_COVERAGE "Instrumentation for code coverage with default implementat
|
|||||||
|
|
||||||
if (WITH_COVERAGE)
|
if (WITH_COVERAGE)
|
||||||
message (STATUS "Enabled instrumentation for code coverage")
|
message (STATUS "Enabled instrumentation for code coverage")
|
||||||
set(COVERAGE_FLAGS "-fprofile-instr-generate -fcoverage-mapping")
|
set(COVERAGE_FLAGS "SHELL:-fprofile-instr-generate -fcoverage-mapping")
|
||||||
|
set(CMAKE_EXE_LINKER_FLAGS "${CMAKE_EXE_LINKER_FLAGS} -fprofile-instr-generate -fcoverage-mapping")
|
||||||
endif()
|
endif()
|
||||||
|
|
||||||
option (SANITIZE_COVERAGE "Instrumentation for code coverage with custom callbacks" OFF)
|
option (SANITIZE_COVERAGE "Instrumentation for code coverage with custom callbacks" OFF)
|
||||||
|
2
contrib/CMakeLists.txt
vendored
2
contrib/CMakeLists.txt
vendored
@ -230,6 +230,8 @@ add_contrib (libssh-cmake libssh)
|
|||||||
|
|
||||||
add_contrib (prometheus-protobufs-cmake prometheus-protobufs prometheus-protobufs-gogo)
|
add_contrib (prometheus-protobufs-cmake prometheus-protobufs prometheus-protobufs-gogo)
|
||||||
|
|
||||||
|
add_contrib(numactl-cmake numactl)
|
||||||
|
|
||||||
# Put all targets defined here and in subdirectories under "contrib/<immediate-subdir>" folders in GUI-based IDEs.
|
# Put all targets defined here and in subdirectories under "contrib/<immediate-subdir>" folders in GUI-based IDEs.
|
||||||
# Some of third-party projects may override CMAKE_FOLDER or FOLDER property of their targets, so they would not appear
|
# Some of third-party projects may override CMAKE_FOLDER or FOLDER property of their targets, so they would not appear
|
||||||
# in "contrib/..." as originally planned, so we workaround this by fixing FOLDER properties of all targets manually,
|
# in "contrib/..." as originally planned, so we workaround this by fixing FOLDER properties of all targets manually,
|
||||||
|
2
contrib/NuRaft
vendored
2
contrib/NuRaft
vendored
@ -1 +1 @@
|
|||||||
Subproject commit cb5dc3c906e80f253e9ce9535807caef827cc2e0
|
Subproject commit c2b0811f164a7948208489562dab4f186eb305ce
|
2
contrib/azure
vendored
2
contrib/azure
vendored
@ -1 +1 @@
|
|||||||
Subproject commit ea3e19a7be08519134c643177d56c7484dfec884
|
Subproject commit 67272b7ee0adff6b69921b26eb071ba1a353062c
|
2
contrib/icu
vendored
2
contrib/icu
vendored
@ -1 +1 @@
|
|||||||
Subproject commit a56dde820dc35665a66f2e9ee8ba58e75049b668
|
Subproject commit 7750081bda4b3bc1768ae03849ec70f67ea10625
|
@ -12,8 +12,6 @@ endif()
|
|||||||
set(ICU_SOURCE_DIR "${ClickHouse_SOURCE_DIR}/contrib/icu/icu4c/source")
|
set(ICU_SOURCE_DIR "${ClickHouse_SOURCE_DIR}/contrib/icu/icu4c/source")
|
||||||
set(ICUDATA_SOURCE_DIR "${ClickHouse_SOURCE_DIR}/contrib/icudata/")
|
set(ICUDATA_SOURCE_DIR "${ClickHouse_SOURCE_DIR}/contrib/icudata/")
|
||||||
|
|
||||||
set (CMAKE_CXX_STANDARD 17)
|
|
||||||
|
|
||||||
# These lists of sources were generated from build log of the original ICU build system (configure + make).
|
# These lists of sources were generated from build log of the original ICU build system (configure + make).
|
||||||
|
|
||||||
set(ICUUC_SOURCES
|
set(ICUUC_SOURCES
|
||||||
@ -462,9 +460,9 @@ file(GENERATE OUTPUT "${CMAKE_CURRENT_BINARY_DIR}/empty.cpp" CONTENT " ")
|
|||||||
enable_language(ASM)
|
enable_language(ASM)
|
||||||
|
|
||||||
if (ARCH_S390X)
|
if (ARCH_S390X)
|
||||||
set(ICUDATA_SOURCE_FILE "${ICUDATA_SOURCE_DIR}/icudt70b_dat.S" )
|
set(ICUDATA_SOURCE_FILE "${ICUDATA_SOURCE_DIR}/icudt75b_dat.S" )
|
||||||
else()
|
else()
|
||||||
set(ICUDATA_SOURCE_FILE "${ICUDATA_SOURCE_DIR}/icudt70l_dat.S" )
|
set(ICUDATA_SOURCE_FILE "${ICUDATA_SOURCE_DIR}/icudt75l_dat.S" )
|
||||||
endif()
|
endif()
|
||||||
|
|
||||||
set(ICUDATA_SOURCES
|
set(ICUDATA_SOURCES
|
||||||
|
2
contrib/icudata
vendored
2
contrib/icudata
vendored
@ -1 +1 @@
|
|||||||
Subproject commit c8e717892a557b4d2852317c7d628aacc0a0e5ab
|
Subproject commit 4904951339a70b4814d2d3723436b20d079cb01b
|
2
contrib/libprotobuf-mutator
vendored
2
contrib/libprotobuf-mutator
vendored
@ -1 +1 @@
|
|||||||
Subproject commit a304ec48dcf15d942607032151f7e9ee504b5dcf
|
Subproject commit 1f95f8083066f5b38fd2db172e7e7f9aa7c49d2d
|
1
contrib/numactl
vendored
Submodule
1
contrib/numactl
vendored
Submodule
@ -0,0 +1 @@
|
|||||||
|
Subproject commit 8d13d63a05f0c3cd88bf777cbb61541202b7da08
|
30
contrib/numactl-cmake/CMakeLists.txt
Normal file
30
contrib/numactl-cmake/CMakeLists.txt
Normal file
@ -0,0 +1,30 @@
|
|||||||
|
if (NOT (
|
||||||
|
OS_LINUX AND (ARCH_AMD64 OR ARCH_AARCH64 OR ARCH_LOONGARCH64))
|
||||||
|
)
|
||||||
|
if (ENABLE_NUMACTL)
|
||||||
|
message (${RECONFIGURE_MESSAGE_LEVEL}
|
||||||
|
"numactl is disabled implicitly because the OS or architecture is not supported. Use -DENABLE_NUMACTL=0")
|
||||||
|
endif ()
|
||||||
|
set (ENABLE_NUMACTL OFF)
|
||||||
|
else()
|
||||||
|
option (ENABLE_NUMACTL "Enable numactl" ${ENABLE_LIBRARIES})
|
||||||
|
endif()
|
||||||
|
|
||||||
|
if (NOT ENABLE_NUMACTL)
|
||||||
|
message (STATUS "Not using numactl")
|
||||||
|
return()
|
||||||
|
endif ()
|
||||||
|
|
||||||
|
set (LIBRARY_DIR "${ClickHouse_SOURCE_DIR}/contrib/numactl")
|
||||||
|
|
||||||
|
set (SRCS
|
||||||
|
"${LIBRARY_DIR}/libnuma.c"
|
||||||
|
"${LIBRARY_DIR}/syscall.c"
|
||||||
|
)
|
||||||
|
|
||||||
|
add_library(_numactl ${SRCS})
|
||||||
|
|
||||||
|
target_include_directories(_numactl SYSTEM PRIVATE include)
|
||||||
|
target_include_directories(_numactl SYSTEM PUBLIC "${LIBRARY_DIR}")
|
||||||
|
|
||||||
|
add_library(ch_contrib::numactl ALIAS _numactl)
|
82
contrib/numactl-cmake/include/config.h
Normal file
82
contrib/numactl-cmake/include/config.h
Normal file
@ -0,0 +1,82 @@
|
|||||||
|
/* config.h. Generated from config.h.in by configure. */
|
||||||
|
/* config.h.in. Generated from configure.ac by autoheader. */
|
||||||
|
|
||||||
|
/* Checking for symver attribute */
|
||||||
|
#define HAVE_ATTRIBUTE_SYMVER 0
|
||||||
|
|
||||||
|
/* Define to 1 if you have the <dlfcn.h> header file. */
|
||||||
|
#define HAVE_DLFCN_H 1
|
||||||
|
|
||||||
|
/* Define to 1 if you have the <inttypes.h> header file. */
|
||||||
|
#define HAVE_INTTYPES_H 1
|
||||||
|
|
||||||
|
/* Define to 1 if you have the <stdint.h> header file. */
|
||||||
|
#define HAVE_STDINT_H 1
|
||||||
|
|
||||||
|
/* Define to 1 if you have the <stdio.h> header file. */
|
||||||
|
#define HAVE_STDIO_H 1
|
||||||
|
|
||||||
|
/* Define to 1 if you have the <stdlib.h> header file. */
|
||||||
|
#define HAVE_STDLIB_H 1
|
||||||
|
|
||||||
|
/* Define to 1 if you have the <strings.h> header file. */
|
||||||
|
#define HAVE_STRINGS_H 1
|
||||||
|
|
||||||
|
/* Define to 1 if you have the <string.h> header file. */
|
||||||
|
#define HAVE_STRING_H 1
|
||||||
|
|
||||||
|
/* Define to 1 if you have the <sys/stat.h> header file. */
|
||||||
|
#define HAVE_SYS_STAT_H 1
|
||||||
|
|
||||||
|
/* Define to 1 if you have the <sys/types.h> header file. */
|
||||||
|
#define HAVE_SYS_TYPES_H 1
|
||||||
|
|
||||||
|
/* Define to 1 if you have the <unistd.h> header file. */
|
||||||
|
#define HAVE_UNISTD_H 1
|
||||||
|
|
||||||
|
/* Define to the sub-directory where libtool stores uninstalled libraries. */
|
||||||
|
#define LT_OBJDIR ".libs/"
|
||||||
|
|
||||||
|
/* Name of package */
|
||||||
|
#define PACKAGE "numactl"
|
||||||
|
|
||||||
|
/* Define to the address where bug reports for this package should be sent. */
|
||||||
|
#define PACKAGE_BUGREPORT ""
|
||||||
|
|
||||||
|
/* Define to the full name of this package. */
|
||||||
|
#define PACKAGE_NAME "numactl"
|
||||||
|
|
||||||
|
/* Define to the full name and version of this package. */
|
||||||
|
#define PACKAGE_STRING "numactl 2.1"
|
||||||
|
|
||||||
|
/* Define to the one symbol short name of this package. */
|
||||||
|
#define PACKAGE_TARNAME "numactl"
|
||||||
|
|
||||||
|
/* Define to the home page for this package. */
|
||||||
|
#define PACKAGE_URL ""
|
||||||
|
|
||||||
|
/* Define to the version of this package. */
|
||||||
|
#define PACKAGE_VERSION "2.1"
|
||||||
|
|
||||||
|
/* Define to 1 if all of the C89 standard headers exist (not just the ones
|
||||||
|
required in a freestanding environment). This macro is provided for
|
||||||
|
backward compatibility; new code need not use it. */
|
||||||
|
#define STDC_HEADERS 1
|
||||||
|
|
||||||
|
/* If the compiler supports a TLS storage class define it to that here */
|
||||||
|
#define TLS __thread
|
||||||
|
|
||||||
|
/* Version number of package */
|
||||||
|
#define VERSION "2.1"
|
||||||
|
|
||||||
|
/* Number of bits in a file offset, on hosts where this is settable. */
|
||||||
|
/* #undef _FILE_OFFSET_BITS */
|
||||||
|
|
||||||
|
/* Define to 1 on platforms where this makes off_t a 64-bit type. */
|
||||||
|
/* #undef _LARGE_FILES */
|
||||||
|
|
||||||
|
/* Number of bits in time_t, on hosts where this is settable. */
|
||||||
|
/* #undef _TIME_BITS */
|
||||||
|
|
||||||
|
/* Define to 1 on platforms where this makes time_t a 64-bit type. */
|
||||||
|
/* #undef __MINGW_USE_VC2005_COMPAT */
|
2
contrib/rocksdb
vendored
2
contrib/rocksdb
vendored
@ -1 +1 @@
|
|||||||
Subproject commit be366233921293bd07a84dc4ea6991858665f202
|
Subproject commit 01e43568fa9f3f7bf107b2b66c00b286b456f33e
|
@ -5,6 +5,9 @@ if (NOT ENABLE_ROCKSDB)
|
|||||||
return()
|
return()
|
||||||
endif()
|
endif()
|
||||||
|
|
||||||
|
# not in original build system, otherwise xxHash.cc fails to compile with ClickHouse C++23 default
|
||||||
|
set (CMAKE_CXX_STANDARD 20)
|
||||||
|
|
||||||
# Always disable jemalloc for rocksdb by default because it introduces non-standard jemalloc APIs
|
# Always disable jemalloc for rocksdb by default because it introduces non-standard jemalloc APIs
|
||||||
option(WITH_JEMALLOC "build with JeMalloc" OFF)
|
option(WITH_JEMALLOC "build with JeMalloc" OFF)
|
||||||
|
|
||||||
@ -16,14 +19,6 @@ option(WITH_LZ4 "build with lz4" ON)
|
|||||||
option(WITH_ZLIB "build with zlib" ON)
|
option(WITH_ZLIB "build with zlib" ON)
|
||||||
option(WITH_ZSTD "build with zstd" ON)
|
option(WITH_ZSTD "build with zstd" ON)
|
||||||
|
|
||||||
# third-party/folly is only validated to work on Linux and Windows for now.
|
|
||||||
# So only turn it on there by default.
|
|
||||||
if(CMAKE_SYSTEM_NAME MATCHES "Linux|Windows")
|
|
||||||
option(WITH_FOLLY_DISTRIBUTED_MUTEX "build with folly::DistributedMutex" ON)
|
|
||||||
else()
|
|
||||||
option(WITH_FOLLY_DISTRIBUTED_MUTEX "build with folly::DistributedMutex" OFF)
|
|
||||||
endif()
|
|
||||||
|
|
||||||
if(WITH_SNAPPY)
|
if(WITH_SNAPPY)
|
||||||
add_definitions(-DSNAPPY)
|
add_definitions(-DSNAPPY)
|
||||||
list(APPEND THIRDPARTY_LIBS ch_contrib::snappy)
|
list(APPEND THIRDPARTY_LIBS ch_contrib::snappy)
|
||||||
@ -44,7 +39,7 @@ if(WITH_ZSTD)
|
|||||||
list(APPEND THIRDPARTY_LIBS ch_contrib::zstd)
|
list(APPEND THIRDPARTY_LIBS ch_contrib::zstd)
|
||||||
endif()
|
endif()
|
||||||
|
|
||||||
option(PORTABLE "build a portable binary" ON)
|
add_definitions(-DROCKSDB_PORTABLE)
|
||||||
|
|
||||||
if(ENABLE_SSE42 AND ENABLE_PCLMULQDQ)
|
if(ENABLE_SSE42 AND ENABLE_PCLMULQDQ)
|
||||||
add_definitions(-DHAVE_SSE42)
|
add_definitions(-DHAVE_SSE42)
|
||||||
@ -59,11 +54,6 @@ if(CMAKE_SYSTEM_PROCESSOR MATCHES "arm64|aarch64|AARCH64")
|
|||||||
# set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -march=armv8-a+crc+crypto -Wno-unused-function")
|
# set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -march=armv8-a+crc+crypto -Wno-unused-function")
|
||||||
endif()
|
endif()
|
||||||
|
|
||||||
set (HAVE_THREAD_LOCAL 1)
|
|
||||||
if(HAVE_THREAD_LOCAL)
|
|
||||||
add_definitions(-DROCKSDB_SUPPORT_THREAD_LOCAL)
|
|
||||||
endif()
|
|
||||||
|
|
||||||
if(CMAKE_SYSTEM_NAME MATCHES "Darwin")
|
if(CMAKE_SYSTEM_NAME MATCHES "Darwin")
|
||||||
add_definitions(-DOS_MACOSX)
|
add_definitions(-DOS_MACOSX)
|
||||||
elseif(CMAKE_SYSTEM_NAME MATCHES "Linux")
|
elseif(CMAKE_SYSTEM_NAME MATCHES "Linux")
|
||||||
@ -89,19 +79,21 @@ set(ROCKSDB_SOURCE_DIR "${ClickHouse_SOURCE_DIR}/contrib/rocksdb")
|
|||||||
|
|
||||||
include_directories(${ROCKSDB_SOURCE_DIR})
|
include_directories(${ROCKSDB_SOURCE_DIR})
|
||||||
include_directories("${ROCKSDB_SOURCE_DIR}/include")
|
include_directories("${ROCKSDB_SOURCE_DIR}/include")
|
||||||
if(WITH_FOLLY_DISTRIBUTED_MUTEX)
|
|
||||||
include_directories("${ROCKSDB_SOURCE_DIR}/third-party/folly")
|
|
||||||
endif()
|
|
||||||
|
|
||||||
set(SOURCES
|
set(SOURCES
|
||||||
${ROCKSDB_SOURCE_DIR}/cache/cache.cc
|
${ROCKSDB_SOURCE_DIR}/cache/cache.cc
|
||||||
${ROCKSDB_SOURCE_DIR}/cache/cache_entry_roles.cc
|
${ROCKSDB_SOURCE_DIR}/cache/cache_entry_roles.cc
|
||||||
${ROCKSDB_SOURCE_DIR}/cache/cache_key.cc
|
${ROCKSDB_SOURCE_DIR}/cache/cache_key.cc
|
||||||
|
${ROCKSDB_SOURCE_DIR}/cache/cache_helpers.cc
|
||||||
${ROCKSDB_SOURCE_DIR}/cache/cache_reservation_manager.cc
|
${ROCKSDB_SOURCE_DIR}/cache/cache_reservation_manager.cc
|
||||||
|
${ROCKSDB_SOURCE_DIR}/cache/charged_cache.cc
|
||||||
${ROCKSDB_SOURCE_DIR}/cache/clock_cache.cc
|
${ROCKSDB_SOURCE_DIR}/cache/clock_cache.cc
|
||||||
|
${ROCKSDB_SOURCE_DIR}/cache/compressed_secondary_cache.cc
|
||||||
${ROCKSDB_SOURCE_DIR}/cache/lru_cache.cc
|
${ROCKSDB_SOURCE_DIR}/cache/lru_cache.cc
|
||||||
|
${ROCKSDB_SOURCE_DIR}/cache/secondary_cache.cc
|
||||||
${ROCKSDB_SOURCE_DIR}/cache/sharded_cache.cc
|
${ROCKSDB_SOURCE_DIR}/cache/sharded_cache.cc
|
||||||
${ROCKSDB_SOURCE_DIR}/db/arena_wrapped_db_iter.cc
|
${ROCKSDB_SOURCE_DIR}/db/arena_wrapped_db_iter.cc
|
||||||
|
${ROCKSDB_SOURCE_DIR}/db/blob/blob_contents.cc
|
||||||
${ROCKSDB_SOURCE_DIR}/db/blob/blob_fetcher.cc
|
${ROCKSDB_SOURCE_DIR}/db/blob/blob_fetcher.cc
|
||||||
${ROCKSDB_SOURCE_DIR}/db/blob/blob_file_addition.cc
|
${ROCKSDB_SOURCE_DIR}/db/blob/blob_file_addition.cc
|
||||||
${ROCKSDB_SOURCE_DIR}/db/blob/blob_file_builder.cc
|
${ROCKSDB_SOURCE_DIR}/db/blob/blob_file_builder.cc
|
||||||
@ -113,6 +105,7 @@ set(SOURCES
|
|||||||
${ROCKSDB_SOURCE_DIR}/db/blob/blob_log_format.cc
|
${ROCKSDB_SOURCE_DIR}/db/blob/blob_log_format.cc
|
||||||
${ROCKSDB_SOURCE_DIR}/db/blob/blob_log_sequential_reader.cc
|
${ROCKSDB_SOURCE_DIR}/db/blob/blob_log_sequential_reader.cc
|
||||||
${ROCKSDB_SOURCE_DIR}/db/blob/blob_log_writer.cc
|
${ROCKSDB_SOURCE_DIR}/db/blob/blob_log_writer.cc
|
||||||
|
${ROCKSDB_SOURCE_DIR}/db/blob/blob_source.cc
|
||||||
${ROCKSDB_SOURCE_DIR}/db/blob/prefetch_buffer_collection.cc
|
${ROCKSDB_SOURCE_DIR}/db/blob/prefetch_buffer_collection.cc
|
||||||
${ROCKSDB_SOURCE_DIR}/db/builder.cc
|
${ROCKSDB_SOURCE_DIR}/db/builder.cc
|
||||||
${ROCKSDB_SOURCE_DIR}/db/c.cc
|
${ROCKSDB_SOURCE_DIR}/db/c.cc
|
||||||
@ -124,7 +117,11 @@ set(SOURCES
|
|||||||
${ROCKSDB_SOURCE_DIR}/db/compaction/compaction_picker_fifo.cc
|
${ROCKSDB_SOURCE_DIR}/db/compaction/compaction_picker_fifo.cc
|
||||||
${ROCKSDB_SOURCE_DIR}/db/compaction/compaction_picker_level.cc
|
${ROCKSDB_SOURCE_DIR}/db/compaction/compaction_picker_level.cc
|
||||||
${ROCKSDB_SOURCE_DIR}/db/compaction/compaction_picker_universal.cc
|
${ROCKSDB_SOURCE_DIR}/db/compaction/compaction_picker_universal.cc
|
||||||
|
${ROCKSDB_SOURCE_DIR}/db/compaction/compaction_service_job.cc
|
||||||
|
${ROCKSDB_SOURCE_DIR}/db/compaction/compaction_state.cc
|
||||||
|
${ROCKSDB_SOURCE_DIR}/db/compaction/compaction_outputs.cc
|
||||||
${ROCKSDB_SOURCE_DIR}/db/compaction/sst_partitioner.cc
|
${ROCKSDB_SOURCE_DIR}/db/compaction/sst_partitioner.cc
|
||||||
|
${ROCKSDB_SOURCE_DIR}/db/compaction/subcompaction_state.cc
|
||||||
${ROCKSDB_SOURCE_DIR}/db/convenience.cc
|
${ROCKSDB_SOURCE_DIR}/db/convenience.cc
|
||||||
${ROCKSDB_SOURCE_DIR}/db/db_filesnapshot.cc
|
${ROCKSDB_SOURCE_DIR}/db/db_filesnapshot.cc
|
||||||
${ROCKSDB_SOURCE_DIR}/db/db_impl/compacted_db_impl.cc
|
${ROCKSDB_SOURCE_DIR}/db/db_impl/compacted_db_impl.cc
|
||||||
@ -159,10 +156,11 @@ set(SOURCES
|
|||||||
${ROCKSDB_SOURCE_DIR}/db/merge_helper.cc
|
${ROCKSDB_SOURCE_DIR}/db/merge_helper.cc
|
||||||
${ROCKSDB_SOURCE_DIR}/db/merge_operator.cc
|
${ROCKSDB_SOURCE_DIR}/db/merge_operator.cc
|
||||||
${ROCKSDB_SOURCE_DIR}/db/output_validator.cc
|
${ROCKSDB_SOURCE_DIR}/db/output_validator.cc
|
||||||
${ROCKSDB_SOURCE_DIR}/db/periodic_work_scheduler.cc
|
${ROCKSDB_SOURCE_DIR}/db/periodic_task_scheduler.cc
|
||||||
${ROCKSDB_SOURCE_DIR}/db/range_del_aggregator.cc
|
${ROCKSDB_SOURCE_DIR}/db/range_del_aggregator.cc
|
||||||
${ROCKSDB_SOURCE_DIR}/db/range_tombstone_fragmenter.cc
|
${ROCKSDB_SOURCE_DIR}/db/range_tombstone_fragmenter.cc
|
||||||
${ROCKSDB_SOURCE_DIR}/db/repair.cc
|
${ROCKSDB_SOURCE_DIR}/db/repair.cc
|
||||||
|
${ROCKSDB_SOURCE_DIR}/db/seqno_to_time_mapping.cc
|
||||||
${ROCKSDB_SOURCE_DIR}/db/snapshot_impl.cc
|
${ROCKSDB_SOURCE_DIR}/db/snapshot_impl.cc
|
||||||
${ROCKSDB_SOURCE_DIR}/db/table_cache.cc
|
${ROCKSDB_SOURCE_DIR}/db/table_cache.cc
|
||||||
${ROCKSDB_SOURCE_DIR}/db/table_properties_collector.cc
|
${ROCKSDB_SOURCE_DIR}/db/table_properties_collector.cc
|
||||||
@ -174,6 +172,8 @@ set(SOURCES
|
|||||||
${ROCKSDB_SOURCE_DIR}/db/version_set.cc
|
${ROCKSDB_SOURCE_DIR}/db/version_set.cc
|
||||||
${ROCKSDB_SOURCE_DIR}/db/wal_edit.cc
|
${ROCKSDB_SOURCE_DIR}/db/wal_edit.cc
|
||||||
${ROCKSDB_SOURCE_DIR}/db/wal_manager.cc
|
${ROCKSDB_SOURCE_DIR}/db/wal_manager.cc
|
||||||
|
${ROCKSDB_SOURCE_DIR}/db/wide/wide_column_serialization.cc
|
||||||
|
${ROCKSDB_SOURCE_DIR}/db/wide/wide_columns.cc
|
||||||
${ROCKSDB_SOURCE_DIR}/db/write_batch.cc
|
${ROCKSDB_SOURCE_DIR}/db/write_batch.cc
|
||||||
${ROCKSDB_SOURCE_DIR}/db/write_batch_base.cc
|
${ROCKSDB_SOURCE_DIR}/db/write_batch_base.cc
|
||||||
${ROCKSDB_SOURCE_DIR}/db/write_controller.cc
|
${ROCKSDB_SOURCE_DIR}/db/write_controller.cc
|
||||||
@ -182,7 +182,6 @@ set(SOURCES
|
|||||||
${ROCKSDB_SOURCE_DIR}/env/env.cc
|
${ROCKSDB_SOURCE_DIR}/env/env.cc
|
||||||
${ROCKSDB_SOURCE_DIR}/env/env_chroot.cc
|
${ROCKSDB_SOURCE_DIR}/env/env_chroot.cc
|
||||||
${ROCKSDB_SOURCE_DIR}/env/env_encryption.cc
|
${ROCKSDB_SOURCE_DIR}/env/env_encryption.cc
|
||||||
${ROCKSDB_SOURCE_DIR}/env/env_hdfs.cc
|
|
||||||
${ROCKSDB_SOURCE_DIR}/env/file_system.cc
|
${ROCKSDB_SOURCE_DIR}/env/file_system.cc
|
||||||
${ROCKSDB_SOURCE_DIR}/env/file_system_tracer.cc
|
${ROCKSDB_SOURCE_DIR}/env/file_system_tracer.cc
|
||||||
${ROCKSDB_SOURCE_DIR}/env/fs_remap.cc
|
${ROCKSDB_SOURCE_DIR}/env/fs_remap.cc
|
||||||
@ -233,16 +232,17 @@ set(SOURCES
|
|||||||
${ROCKSDB_SOURCE_DIR}/options/options.cc
|
${ROCKSDB_SOURCE_DIR}/options/options.cc
|
||||||
${ROCKSDB_SOURCE_DIR}/options/options_helper.cc
|
${ROCKSDB_SOURCE_DIR}/options/options_helper.cc
|
||||||
${ROCKSDB_SOURCE_DIR}/options/options_parser.cc
|
${ROCKSDB_SOURCE_DIR}/options/options_parser.cc
|
||||||
|
${ROCKSDB_SOURCE_DIR}/port/mmap.cc
|
||||||
${ROCKSDB_SOURCE_DIR}/port/stack_trace.cc
|
${ROCKSDB_SOURCE_DIR}/port/stack_trace.cc
|
||||||
${ROCKSDB_SOURCE_DIR}/table/adaptive/adaptive_table_factory.cc
|
${ROCKSDB_SOURCE_DIR}/table/adaptive/adaptive_table_factory.cc
|
||||||
${ROCKSDB_SOURCE_DIR}/table/block_based/binary_search_index_reader.cc
|
${ROCKSDB_SOURCE_DIR}/table/block_based/binary_search_index_reader.cc
|
||||||
${ROCKSDB_SOURCE_DIR}/table/block_based/block.cc
|
${ROCKSDB_SOURCE_DIR}/table/block_based/block.cc
|
||||||
${ROCKSDB_SOURCE_DIR}/table/block_based/block_based_filter_block.cc
|
|
||||||
${ROCKSDB_SOURCE_DIR}/table/block_based/block_based_table_builder.cc
|
${ROCKSDB_SOURCE_DIR}/table/block_based/block_based_table_builder.cc
|
||||||
${ROCKSDB_SOURCE_DIR}/table/block_based/block_based_table_factory.cc
|
${ROCKSDB_SOURCE_DIR}/table/block_based/block_based_table_factory.cc
|
||||||
${ROCKSDB_SOURCE_DIR}/table/block_based/block_based_table_iterator.cc
|
${ROCKSDB_SOURCE_DIR}/table/block_based/block_based_table_iterator.cc
|
||||||
${ROCKSDB_SOURCE_DIR}/table/block_based/block_based_table_reader.cc
|
${ROCKSDB_SOURCE_DIR}/table/block_based/block_based_table_reader.cc
|
||||||
${ROCKSDB_SOURCE_DIR}/table/block_based/block_builder.cc
|
${ROCKSDB_SOURCE_DIR}/table/block_based/block_builder.cc
|
||||||
|
${ROCKSDB_SOURCE_DIR}/table/block_based/block_cache.cc
|
||||||
${ROCKSDB_SOURCE_DIR}/table/block_based/block_prefetcher.cc
|
${ROCKSDB_SOURCE_DIR}/table/block_based/block_prefetcher.cc
|
||||||
${ROCKSDB_SOURCE_DIR}/table/block_based/block_prefix_index.cc
|
${ROCKSDB_SOURCE_DIR}/table/block_based/block_prefix_index.cc
|
||||||
${ROCKSDB_SOURCE_DIR}/table/block_based/data_block_hash_index.cc
|
${ROCKSDB_SOURCE_DIR}/table/block_based/data_block_hash_index.cc
|
||||||
@ -300,9 +300,12 @@ set(SOURCES
|
|||||||
${ROCKSDB_SOURCE_DIR}/trace_replay/trace_record_result.cc
|
${ROCKSDB_SOURCE_DIR}/trace_replay/trace_record_result.cc
|
||||||
${ROCKSDB_SOURCE_DIR}/trace_replay/trace_record.cc
|
${ROCKSDB_SOURCE_DIR}/trace_replay/trace_record.cc
|
||||||
${ROCKSDB_SOURCE_DIR}/trace_replay/trace_replay.cc
|
${ROCKSDB_SOURCE_DIR}/trace_replay/trace_replay.cc
|
||||||
|
${ROCKSDB_SOURCE_DIR}/util/async_file_reader.cc
|
||||||
|
${ROCKSDB_SOURCE_DIR}/util/cleanable.cc
|
||||||
${ROCKSDB_SOURCE_DIR}/util/coding.cc
|
${ROCKSDB_SOURCE_DIR}/util/coding.cc
|
||||||
${ROCKSDB_SOURCE_DIR}/util/compaction_job_stats_impl.cc
|
${ROCKSDB_SOURCE_DIR}/util/compaction_job_stats_impl.cc
|
||||||
${ROCKSDB_SOURCE_DIR}/util/comparator.cc
|
${ROCKSDB_SOURCE_DIR}/util/comparator.cc
|
||||||
|
${ROCKSDB_SOURCE_DIR}/util/compression.cc
|
||||||
${ROCKSDB_SOURCE_DIR}/util/compression_context_cache.cc
|
${ROCKSDB_SOURCE_DIR}/util/compression_context_cache.cc
|
||||||
${ROCKSDB_SOURCE_DIR}/util/concurrent_task_limiter_impl.cc
|
${ROCKSDB_SOURCE_DIR}/util/concurrent_task_limiter_impl.cc
|
||||||
${ROCKSDB_SOURCE_DIR}/util/crc32c.cc
|
${ROCKSDB_SOURCE_DIR}/util/crc32c.cc
|
||||||
@ -311,16 +314,17 @@ set(SOURCES
|
|||||||
${ROCKSDB_SOURCE_DIR}/util/murmurhash.cc
|
${ROCKSDB_SOURCE_DIR}/util/murmurhash.cc
|
||||||
${ROCKSDB_SOURCE_DIR}/util/random.cc
|
${ROCKSDB_SOURCE_DIR}/util/random.cc
|
||||||
${ROCKSDB_SOURCE_DIR}/util/rate_limiter.cc
|
${ROCKSDB_SOURCE_DIR}/util/rate_limiter.cc
|
||||||
${ROCKSDB_SOURCE_DIR}/util/regex.cc
|
|
||||||
${ROCKSDB_SOURCE_DIR}/util/ribbon_config.cc
|
${ROCKSDB_SOURCE_DIR}/util/ribbon_config.cc
|
||||||
${ROCKSDB_SOURCE_DIR}/util/slice.cc
|
${ROCKSDB_SOURCE_DIR}/util/slice.cc
|
||||||
${ROCKSDB_SOURCE_DIR}/util/file_checksum_helper.cc
|
${ROCKSDB_SOURCE_DIR}/util/file_checksum_helper.cc
|
||||||
${ROCKSDB_SOURCE_DIR}/util/status.cc
|
${ROCKSDB_SOURCE_DIR}/util/status.cc
|
||||||
|
${ROCKSDB_SOURCE_DIR}/util/stderr_logger.cc
|
||||||
${ROCKSDB_SOURCE_DIR}/util/string_util.cc
|
${ROCKSDB_SOURCE_DIR}/util/string_util.cc
|
||||||
${ROCKSDB_SOURCE_DIR}/util/thread_local.cc
|
${ROCKSDB_SOURCE_DIR}/util/thread_local.cc
|
||||||
${ROCKSDB_SOURCE_DIR}/util/threadpool_imp.cc
|
${ROCKSDB_SOURCE_DIR}/util/threadpool_imp.cc
|
||||||
${ROCKSDB_SOURCE_DIR}/util/xxhash.cc
|
${ROCKSDB_SOURCE_DIR}/util/xxhash.cc
|
||||||
${ROCKSDB_SOURCE_DIR}/utilities/backupable/backupable_db.cc
|
${ROCKSDB_SOURCE_DIR}/utilities/agg_merge/agg_merge.cc
|
||||||
|
${ROCKSDB_SOURCE_DIR}/utilities/backup/backup_engine.cc
|
||||||
${ROCKSDB_SOURCE_DIR}/utilities/blob_db/blob_compaction_filter.cc
|
${ROCKSDB_SOURCE_DIR}/utilities/blob_db/blob_compaction_filter.cc
|
||||||
${ROCKSDB_SOURCE_DIR}/utilities/blob_db/blob_db.cc
|
${ROCKSDB_SOURCE_DIR}/utilities/blob_db/blob_db.cc
|
||||||
${ROCKSDB_SOURCE_DIR}/utilities/blob_db/blob_db_impl.cc
|
${ROCKSDB_SOURCE_DIR}/utilities/blob_db/blob_db_impl.cc
|
||||||
@ -335,6 +339,7 @@ set(SOURCES
|
|||||||
${ROCKSDB_SOURCE_DIR}/utilities/checkpoint/checkpoint_impl.cc
|
${ROCKSDB_SOURCE_DIR}/utilities/checkpoint/checkpoint_impl.cc
|
||||||
${ROCKSDB_SOURCE_DIR}/utilities/compaction_filters.cc
|
${ROCKSDB_SOURCE_DIR}/utilities/compaction_filters.cc
|
||||||
${ROCKSDB_SOURCE_DIR}/utilities/compaction_filters/remove_emptyvalue_compactionfilter.cc
|
${ROCKSDB_SOURCE_DIR}/utilities/compaction_filters/remove_emptyvalue_compactionfilter.cc
|
||||||
|
${ROCKSDB_SOURCE_DIR}/utilities/counted_fs.cc
|
||||||
${ROCKSDB_SOURCE_DIR}/utilities/debug.cc
|
${ROCKSDB_SOURCE_DIR}/utilities/debug.cc
|
||||||
${ROCKSDB_SOURCE_DIR}/utilities/env_mirror.cc
|
${ROCKSDB_SOURCE_DIR}/utilities/env_mirror.cc
|
||||||
${ROCKSDB_SOURCE_DIR}/utilities/env_timed.cc
|
${ROCKSDB_SOURCE_DIR}/utilities/env_timed.cc
|
||||||
@ -422,15 +427,6 @@ list(APPEND SOURCES
|
|||||||
"${ROCKSDB_SOURCE_DIR}/env/fs_posix.cc"
|
"${ROCKSDB_SOURCE_DIR}/env/fs_posix.cc"
|
||||||
"${ROCKSDB_SOURCE_DIR}/env/io_posix.cc")
|
"${ROCKSDB_SOURCE_DIR}/env/io_posix.cc")
|
||||||
|
|
||||||
if(WITH_FOLLY_DISTRIBUTED_MUTEX)
|
|
||||||
list(APPEND SOURCES
|
|
||||||
"${ROCKSDB_SOURCE_DIR}/third-party/folly/folly/detail/Futex.cpp"
|
|
||||||
"${ROCKSDB_SOURCE_DIR}/third-party/folly/folly/synchronization/AtomicNotification.cpp"
|
|
||||||
"${ROCKSDB_SOURCE_DIR}/third-party/folly/folly/synchronization/DistributedMutex.cpp"
|
|
||||||
"${ROCKSDB_SOURCE_DIR}/third-party/folly/folly/synchronization/ParkingLot.cpp"
|
|
||||||
"${ROCKSDB_SOURCE_DIR}/third-party/folly/folly/synchronization/WaitOptions.cpp")
|
|
||||||
endif()
|
|
||||||
|
|
||||||
add_library(_rocksdb ${SOURCES})
|
add_library(_rocksdb ${SOURCES})
|
||||||
add_library(ch_contrib::rocksdb ALIAS _rocksdb)
|
add_library(ch_contrib::rocksdb ALIAS _rocksdb)
|
||||||
target_link_libraries(_rocksdb PRIVATE ${THIRDPARTY_LIBS} ${SYSTEM_LIBS})
|
target_link_libraries(_rocksdb PRIVATE ${THIRDPARTY_LIBS} ${SYSTEM_LIBS})
|
||||||
|
@ -1,16 +1,33 @@
|
|||||||
// Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
|
// Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
|
||||||
/// This file was edited for ClickHouse.
|
|
||||||
|
|
||||||
#include <memory>
|
#include <memory>
|
||||||
|
|
||||||
#include "rocksdb/version.h"
|
#include "rocksdb/version.h"
|
||||||
|
#include "rocksdb/utilities/object_registry.h"
|
||||||
#include "util/string_util.h"
|
#include "util/string_util.h"
|
||||||
|
|
||||||
// The build script may replace these values with real values based
|
// The build script may replace these values with real values based
|
||||||
// on whether or not GIT is available and the platform settings
|
// on whether or not GIT is available and the platform settings
|
||||||
static const std::string rocksdb_build_git_sha = "rocksdb_build_git_sha:0";
|
static const std::string rocksdb_build_git_sha = "rocksdb_build_git_sha:72438a678872544809393b831c7273794c074215";
|
||||||
static const std::string rocksdb_build_git_tag = "rocksdb_build_git_tag:master";
|
static const std::string rocksdb_build_git_tag = "rocksdb_build_git_tag:main";
|
||||||
static const std::string rocksdb_build_date = "rocksdb_build_date:2000-01-01";
|
#define HAS_GIT_CHANGES 0
|
||||||
|
#if HAS_GIT_CHANGES == 0
|
||||||
|
// If HAS_GIT_CHANGES is 0, the GIT date is used.
|
||||||
|
// Use the time the branch/tag was last modified
|
||||||
|
static const std::string rocksdb_build_date = "rocksdb_build_date:2024-07-12 16:01:57";
|
||||||
|
#else
|
||||||
|
// If HAS_GIT_CHANGES is > 0, the branch/tag has modifications.
|
||||||
|
// Use the time the build was created.
|
||||||
|
static const std::string rocksdb_build_date = "rocksdb_build_date:2024-07-13 17:15:50";
|
||||||
|
#endif
|
||||||
|
|
||||||
|
extern "C" {
|
||||||
|
|
||||||
|
} // extern "C"
|
||||||
|
|
||||||
|
std::unordered_map<std::string, ROCKSDB_NAMESPACE::RegistrarFunc> ROCKSDB_NAMESPACE::ObjectRegistry::builtins_ = {
|
||||||
|
|
||||||
|
};
|
||||||
|
|
||||||
namespace ROCKSDB_NAMESPACE {
|
namespace ROCKSDB_NAMESPACE {
|
||||||
static void AddProperty(std::unordered_map<std::string, std::string> *props, const std::string& name) {
|
static void AddProperty(std::unordered_map<std::string, std::string> *props, const std::string& name) {
|
||||||
@ -39,12 +56,12 @@ const std::unordered_map<std::string, std::string>& GetRocksBuildProperties() {
|
|||||||
}
|
}
|
||||||
|
|
||||||
std::string GetRocksVersionAsString(bool with_patch) {
|
std::string GetRocksVersionAsString(bool with_patch) {
|
||||||
std::string version = ToString(ROCKSDB_MAJOR) + "." + ToString(ROCKSDB_MINOR);
|
std::string version = std::to_string(ROCKSDB_MAJOR) + "." + std::to_string(ROCKSDB_MINOR);
|
||||||
if (with_patch) {
|
if (with_patch) {
|
||||||
return version + "." + ToString(ROCKSDB_PATCH);
|
return version + "." + std::to_string(ROCKSDB_PATCH);
|
||||||
} else {
|
} else {
|
||||||
return version;
|
return version;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
std::string GetRocksBuildInfoAsString(const std::string& program, bool verbose) {
|
std::string GetRocksBuildInfoAsString(const std::string& program, bool verbose) {
|
||||||
|
@ -26,7 +26,6 @@ sed -i '/onBrokenMarkdownLinks:/ s/ignore/error/g' docusaurus.config.js
|
|||||||
|
|
||||||
if [[ $# -lt 1 ]] || [[ "$1" == "--"* ]]; then
|
if [[ $# -lt 1 ]] || [[ "$1" == "--"* ]]; then
|
||||||
export CI=true
|
export CI=true
|
||||||
yarn install
|
|
||||||
exec yarn build "$@"
|
exec yarn build "$@"
|
||||||
fi
|
fi
|
||||||
|
|
||||||
|
@ -34,7 +34,7 @@ RUN arch=${TARGETARCH:-amd64} \
|
|||||||
# lts / testing / prestable / etc
|
# lts / testing / prestable / etc
|
||||||
ARG REPO_CHANNEL="stable"
|
ARG REPO_CHANNEL="stable"
|
||||||
ARG REPOSITORY="https://packages.clickhouse.com/tgz/${REPO_CHANNEL}"
|
ARG REPOSITORY="https://packages.clickhouse.com/tgz/${REPO_CHANNEL}"
|
||||||
ARG VERSION="24.6.2.17"
|
ARG VERSION="24.7.2.13"
|
||||||
ARG PACKAGES="clickhouse-keeper"
|
ARG PACKAGES="clickhouse-keeper"
|
||||||
ARG DIRECT_DOWNLOAD_URLS=""
|
ARG DIRECT_DOWNLOAD_URLS=""
|
||||||
|
|
||||||
|
@ -32,7 +32,7 @@ RUN arch=${TARGETARCH:-amd64} \
|
|||||||
# lts / testing / prestable / etc
|
# lts / testing / prestable / etc
|
||||||
ARG REPO_CHANNEL="stable"
|
ARG REPO_CHANNEL="stable"
|
||||||
ARG REPOSITORY="https://packages.clickhouse.com/tgz/${REPO_CHANNEL}"
|
ARG REPOSITORY="https://packages.clickhouse.com/tgz/${REPO_CHANNEL}"
|
||||||
ARG VERSION="24.6.2.17"
|
ARG VERSION="24.7.2.13"
|
||||||
ARG PACKAGES="clickhouse-client clickhouse-server clickhouse-common-static"
|
ARG PACKAGES="clickhouse-client clickhouse-server clickhouse-common-static"
|
||||||
ARG DIRECT_DOWNLOAD_URLS=""
|
ARG DIRECT_DOWNLOAD_URLS=""
|
||||||
|
|
||||||
|
@ -28,7 +28,7 @@ RUN sed -i "s|http://archive.ubuntu.com|${apt_archive}|g" /etc/apt/sources.list
|
|||||||
|
|
||||||
ARG REPO_CHANNEL="stable"
|
ARG REPO_CHANNEL="stable"
|
||||||
ARG REPOSITORY="deb [signed-by=/usr/share/keyrings/clickhouse-keyring.gpg] https://packages.clickhouse.com/deb ${REPO_CHANNEL} main"
|
ARG REPOSITORY="deb [signed-by=/usr/share/keyrings/clickhouse-keyring.gpg] https://packages.clickhouse.com/deb ${REPO_CHANNEL} main"
|
||||||
ARG VERSION="24.6.2.17"
|
ARG VERSION="24.7.2.13"
|
||||||
ARG PACKAGES="clickhouse-client clickhouse-server clickhouse-common-static"
|
ARG PACKAGES="clickhouse-client clickhouse-server clickhouse-common-static"
|
||||||
|
|
||||||
#docker-official-library:off
|
#docker-official-library:off
|
||||||
|
@ -261,9 +261,12 @@ function timeout_with_logging() {
|
|||||||
|
|
||||||
timeout -s TERM --preserve-status "${@}" || exit_code="${?}"
|
timeout -s TERM --preserve-status "${@}" || exit_code="${?}"
|
||||||
|
|
||||||
|
echo "Checking if it is a timeout. The code 124 will indicate a timeout."
|
||||||
if [[ "${exit_code}" -eq "124" ]]
|
if [[ "${exit_code}" -eq "124" ]]
|
||||||
then
|
then
|
||||||
echo "The command 'timeout ${*}' has been killed by timeout"
|
echo "The command 'timeout ${*}' has been killed by timeout."
|
||||||
|
else
|
||||||
|
echo "No, it isn't a timeout."
|
||||||
fi
|
fi
|
||||||
|
|
||||||
return $exit_code
|
return $exit_code
|
||||||
|
@ -13,6 +13,7 @@ entry="/usr/share/clickhouse-test/performance/scripts/entrypoint.sh"
|
|||||||
# https://www.kernel.org/doc/Documentation/filesystems/tmpfs.txt
|
# https://www.kernel.org/doc/Documentation/filesystems/tmpfs.txt
|
||||||
# Double-escaped backslashes are a tribute to the engineering wonder of docker --
|
# Double-escaped backslashes are a tribute to the engineering wonder of docker --
|
||||||
# it gives '/bin/sh: 1: [bash,: not found' otherwise.
|
# it gives '/bin/sh: 1: [bash,: not found' otherwise.
|
||||||
|
numactl --hardware
|
||||||
node=$(( RANDOM % $(numactl --hardware | sed -n 's/^.*available:\(.*\)nodes.*$/\1/p') ));
|
node=$(( RANDOM % $(numactl --hardware | sed -n 's/^.*available:\(.*\)nodes.*$/\1/p') ));
|
||||||
echo Will bind to NUMA node $node;
|
echo Will bind to NUMA node $node;
|
||||||
numactl --cpunodebind=$node --membind=$node $entry
|
numactl --cpunodebind=$node --membind=$node $entry
|
||||||
|
@ -251,9 +251,12 @@ function timeout_with_logging() {
|
|||||||
|
|
||||||
timeout -s TERM --preserve-status "${@}" || exit_code="${?}"
|
timeout -s TERM --preserve-status "${@}" || exit_code="${?}"
|
||||||
|
|
||||||
|
echo "Checking if it is a timeout. The code 124 will indicate a timeout."
|
||||||
if [[ "${exit_code}" -eq "124" ]]
|
if [[ "${exit_code}" -eq "124" ]]
|
||||||
then
|
then
|
||||||
echo "The command 'timeout ${*}' has been killed by timeout"
|
echo "The command 'timeout ${*}' has been killed by timeout."
|
||||||
|
else
|
||||||
|
echo "No, it isn't a timeout."
|
||||||
fi
|
fi
|
||||||
|
|
||||||
return $exit_code
|
return $exit_code
|
||||||
|
@ -3,6 +3,12 @@
|
|||||||
# shellcheck disable=SC1091
|
# shellcheck disable=SC1091
|
||||||
source /setup_export_logs.sh
|
source /setup_export_logs.sh
|
||||||
|
|
||||||
|
# shellcheck source=../stateless/stress_tests.lib
|
||||||
|
source /stress_tests.lib
|
||||||
|
|
||||||
|
# Avoid overlaps with previous runs
|
||||||
|
dmesg --clear
|
||||||
|
|
||||||
# fail on errors, verbose and export all env variables
|
# fail on errors, verbose and export all env variables
|
||||||
set -e -x -a
|
set -e -x -a
|
||||||
|
|
||||||
@ -212,6 +218,10 @@ function run_tests()
|
|||||||
ADDITIONAL_OPTIONS+=('--shared-catalog')
|
ADDITIONAL_OPTIONS+=('--shared-catalog')
|
||||||
fi
|
fi
|
||||||
|
|
||||||
|
if [[ "$USE_DISTRIBUTED_CACHE" -eq 1 ]]; then
|
||||||
|
ADDITIONAL_OPTIONS+=('--distributed-cache')
|
||||||
|
fi
|
||||||
|
|
||||||
if [[ "$USE_DATABASE_REPLICATED" -eq 1 ]]; then
|
if [[ "$USE_DATABASE_REPLICATED" -eq 1 ]]; then
|
||||||
ADDITIONAL_OPTIONS+=('--replicated-database')
|
ADDITIONAL_OPTIONS+=('--replicated-database')
|
||||||
# Too many tests fail for DatabaseReplicated in parallel.
|
# Too many tests fail for DatabaseReplicated in parallel.
|
||||||
@ -247,12 +257,22 @@ function run_tests()
|
|||||||
|
|
||||||
try_run_with_retry 10 clickhouse-client -q "insert into system.zookeeper (name, path, value) values ('auxiliary_zookeeper2', '/test/chroot/', '')"
|
try_run_with_retry 10 clickhouse-client -q "insert into system.zookeeper (name, path, value) values ('auxiliary_zookeeper2', '/test/chroot/', '')"
|
||||||
|
|
||||||
|
TIMEOUT=$((MAX_RUN_TIME - 800 > 8400 ? 8400 : MAX_RUN_TIME - 800))
|
||||||
|
START_TIME=${SECONDS}
|
||||||
set +e
|
set +e
|
||||||
timeout -k 60m -s TERM --preserve-status 140m clickhouse-test --testname --shard --zookeeper --check-zookeeper-session --hung-check --print-time \
|
timeout --preserve-status --signal TERM --kill-after 60m ${TIMEOUT}s \
|
||||||
--no-drop-if-fail --test-runs "$NUM_TRIES" "${ADDITIONAL_OPTIONS[@]}" 2>&1 \
|
clickhouse-test --testname --shard --zookeeper --check-zookeeper-session --hung-check --print-time \
|
||||||
|
--no-drop-if-fail --test-runs "$NUM_TRIES" "${ADDITIONAL_OPTIONS[@]}" 2>&1 \
|
||||||
| ts '%Y-%m-%d %H:%M:%S' \
|
| ts '%Y-%m-%d %H:%M:%S' \
|
||||||
| tee -a test_output/test_result.txt
|
| tee -a test_output/test_result.txt
|
||||||
set -e
|
set -e
|
||||||
|
DURATION=$((START_TIME - SECONDS))
|
||||||
|
|
||||||
|
echo "Elapsed ${DURATION} seconds."
|
||||||
|
if [[ $DURATION -ge $TIMEOUT ]]
|
||||||
|
then
|
||||||
|
echo "It looks like the command is terminated by the timeout, which is ${TIMEOUT} seconds."
|
||||||
|
fi
|
||||||
}
|
}
|
||||||
|
|
||||||
export -f run_tests
|
export -f run_tests
|
||||||
@ -264,7 +284,7 @@ if [ "$NUM_TRIES" -gt "1" ]; then
|
|||||||
# We don't run tests with Ordinary database in PRs, only in master.
|
# We don't run tests with Ordinary database in PRs, only in master.
|
||||||
# So run new/changed tests with Ordinary at least once in flaky check.
|
# So run new/changed tests with Ordinary at least once in flaky check.
|
||||||
timeout_with_logging "$TIMEOUT" bash -c 'NUM_TRIES=1; USE_DATABASE_ORDINARY=1; run_tests' \
|
timeout_with_logging "$TIMEOUT" bash -c 'NUM_TRIES=1; USE_DATABASE_ORDINARY=1; run_tests' \
|
||||||
| sed 's/All tests have finished//' | sed 's/No tests were run//' ||:
|
| sed 's/All tests have finished/Redacted: a message about tests finish is deleted/' | sed 's/No tests were run/Redacted: a message about no tests run is deleted/' ||:
|
||||||
fi
|
fi
|
||||||
|
|
||||||
timeout_with_logging "$TIMEOUT" bash -c run_tests ||:
|
timeout_with_logging "$TIMEOUT" bash -c run_tests ||:
|
||||||
@ -285,22 +305,22 @@ stop_logs_replication
|
|||||||
failed_to_save_logs=0
|
failed_to_save_logs=0
|
||||||
for table in query_log zookeeper_log trace_log transactions_info_log metric_log blob_storage_log error_log
|
for table in query_log zookeeper_log trace_log transactions_info_log metric_log blob_storage_log error_log
|
||||||
do
|
do
|
||||||
err=$(clickhouse-client -q "select * from system.$table into outfile '/test_output/$table.tsv.gz' format TSVWithNamesAndTypes")
|
if ! clickhouse-client -q "select * from system.$table into outfile '/test_output/$table.tsv.zst' format TSVWithNamesAndTypes"; then
|
||||||
echo "$err"
|
failed_to_save_logs=1
|
||||||
[[ "0" != "${#err}" ]] && failed_to_save_logs=1
|
fi
|
||||||
if [[ "$USE_DATABASE_REPLICATED" -eq 1 ]]; then
|
if [[ "$USE_DATABASE_REPLICATED" -eq 1 ]]; then
|
||||||
err=$( { clickhouse-client --port 19000 -q "select * from system.$table format TSVWithNamesAndTypes" | zstd --threads=0 > /test_output/$table.1.tsv.zst; } 2>&1 )
|
if ! clickhouse-client --port 19000 -q "select * from system.$table into outfile '/test_output/$table.1.tsv.zst' format TSVWithNamesAndTypes"; then
|
||||||
echo "$err"
|
failed_to_save_logs=1
|
||||||
[[ "0" != "${#err}" ]] && failed_to_save_logs=1
|
fi
|
||||||
err=$( { clickhouse-client --port 29000 -q "select * from system.$table format TSVWithNamesAndTypes" | zstd --threads=0 > /test_output/$table.2.tsv.zst; } 2>&1 )
|
if ! clickhouse-client --port 29000 -q "select * from system.$table into outfile '/test_output/$table.2.tsv.zst' format TSVWithNamesAndTypes"; then
|
||||||
echo "$err"
|
failed_to_save_logs=1
|
||||||
[[ "0" != "${#err}" ]] && failed_to_save_logs=1
|
fi
|
||||||
fi
|
fi
|
||||||
|
|
||||||
if [[ "$USE_SHARED_CATALOG" -eq 1 ]]; then
|
if [[ "$USE_SHARED_CATALOG" -eq 1 ]]; then
|
||||||
err=$( { clickhouse-client --port 19000 -q "select * from system.$table format TSVWithNamesAndTypes" | zstd --threads=0 > /test_output/$table.1.tsv.zst; } 2>&1 )
|
if ! clickhouse-client --port 29000 -q "select * from system.$table into outfile '/test_output/$table.2.tsv.zst' format TSVWithNamesAndTypes"; then
|
||||||
echo "$err"
|
failed_to_save_logs=1
|
||||||
[[ "0" != "${#err}" ]] && failed_to_save_logs=1
|
fi
|
||||||
fi
|
fi
|
||||||
done
|
done
|
||||||
|
|
||||||
@ -406,4 +426,7 @@ if [[ "$USE_SHARED_CATALOG" -eq 1 ]]; then
|
|||||||
tar -chf /test_output/coordination1.tar /var/lib/clickhouse1/coordination ||:
|
tar -chf /test_output/coordination1.tar /var/lib/clickhouse1/coordination ||:
|
||||||
fi
|
fi
|
||||||
|
|
||||||
|
# Grep logs for sanitizer asserts, crashes and other critical errors
|
||||||
|
check_logs_for_critical_errors
|
||||||
|
|
||||||
collect_core_dumps
|
collect_core_dumps
|
||||||
|
@ -242,7 +242,7 @@ function check_server_start()
|
|||||||
function check_logs_for_critical_errors()
|
function check_logs_for_critical_errors()
|
||||||
{
|
{
|
||||||
# Sanitizer asserts
|
# Sanitizer asserts
|
||||||
sed -n '/WARNING:.*anitizer/,/^$/p' /var/log/clickhouse-server/stderr.log >> /test_output/tmp
|
sed -n '/WARNING:.*anitizer/,/^$/p' /var/log/clickhouse-server/stderr*.log >> /test_output/tmp
|
||||||
rg -Fav -e "ASan doesn't fully support makecontext/swapcontext functions" -e "DB::Exception" /test_output/tmp > /dev/null \
|
rg -Fav -e "ASan doesn't fully support makecontext/swapcontext functions" -e "DB::Exception" /test_output/tmp > /dev/null \
|
||||||
&& echo -e "Sanitizer assert (in stderr.log)$FAIL$(head_escaped /test_output/tmp)" >> /test_output/test_results.tsv \
|
&& echo -e "Sanitizer assert (in stderr.log)$FAIL$(head_escaped /test_output/tmp)" >> /test_output/test_results.tsv \
|
||||||
|| echo -e "No sanitizer asserts$OK" >> /test_output/test_results.tsv
|
|| echo -e "No sanitizer asserts$OK" >> /test_output/test_results.tsv
|
||||||
|
@ -45,9 +45,12 @@ function timeout_with_logging() {
|
|||||||
|
|
||||||
timeout -s TERM --preserve-status "${@}" || exit_code="${?}"
|
timeout -s TERM --preserve-status "${@}" || exit_code="${?}"
|
||||||
|
|
||||||
|
echo "Checking if it is a timeout. The code 124 will indicate a timeout."
|
||||||
if [[ "${exit_code}" -eq "124" ]]
|
if [[ "${exit_code}" -eq "124" ]]
|
||||||
then
|
then
|
||||||
echo "The command 'timeout ${*}' has been killed by timeout"
|
echo "The command 'timeout ${*}' has been killed by timeout."
|
||||||
|
else
|
||||||
|
echo "No, it isn't a timeout."
|
||||||
fi
|
fi
|
||||||
|
|
||||||
return $exit_code
|
return $exit_code
|
||||||
|
@ -12,6 +12,7 @@ UNKNOWN_SIGN = "[ UNKNOWN "
|
|||||||
SKIPPED_SIGN = "[ SKIPPED "
|
SKIPPED_SIGN = "[ SKIPPED "
|
||||||
HUNG_SIGN = "Found hung queries in processlist"
|
HUNG_SIGN = "Found hung queries in processlist"
|
||||||
SERVER_DIED_SIGN = "Server died, terminating all processes"
|
SERVER_DIED_SIGN = "Server died, terminating all processes"
|
||||||
|
SERVER_DIED_SIGN2 = "Server does not respond to health check"
|
||||||
DATABASE_SIGN = "Database: "
|
DATABASE_SIGN = "Database: "
|
||||||
|
|
||||||
SUCCESS_FINISH_SIGNS = ["All tests have finished", "No tests were run"]
|
SUCCESS_FINISH_SIGNS = ["All tests have finished", "No tests were run"]
|
||||||
@ -43,7 +44,7 @@ def process_test_log(log_path, broken_tests):
|
|||||||
if HUNG_SIGN in line:
|
if HUNG_SIGN in line:
|
||||||
hung = True
|
hung = True
|
||||||
break
|
break
|
||||||
if SERVER_DIED_SIGN in line:
|
if SERVER_DIED_SIGN in line or SERVER_DIED_SIGN2 in line:
|
||||||
server_died = True
|
server_died = True
|
||||||
if RETRIES_SIGN in line:
|
if RETRIES_SIGN in line:
|
||||||
retries = True
|
retries = True
|
||||||
@ -111,12 +112,12 @@ def process_test_log(log_path, broken_tests):
|
|||||||
# Python does not support TSV, so we have to escape '\t' and '\n' manually
|
# Python does not support TSV, so we have to escape '\t' and '\n' manually
|
||||||
# and hope that complex escape sequences will not break anything
|
# and hope that complex escape sequences will not break anything
|
||||||
test_results = [
|
test_results = [
|
||||||
(
|
[
|
||||||
test[0],
|
test[0],
|
||||||
test[1],
|
test[1],
|
||||||
test[2],
|
test[2],
|
||||||
"".join(test[3])[:4096].replace("\t", "\\t").replace("\n", "\\n"),
|
"".join(test[3])[:4096].replace("\t", "\\t").replace("\n", "\\n"),
|
||||||
)
|
]
|
||||||
for test in test_results
|
for test in test_results
|
||||||
]
|
]
|
||||||
|
|
||||||
@ -170,18 +171,23 @@ def process_result(result_path, broken_tests):
|
|||||||
if hung:
|
if hung:
|
||||||
description = "Some queries hung, "
|
description = "Some queries hung, "
|
||||||
state = "failure"
|
state = "failure"
|
||||||
test_results.append(("Some queries hung", "FAIL", "0", ""))
|
test_results.append(["Some queries hung", "FAIL", "0", ""])
|
||||||
elif server_died:
|
elif server_died:
|
||||||
description = "Server died, "
|
description = "Server died, "
|
||||||
state = "failure"
|
state = "failure"
|
||||||
test_results.append(("Server died", "FAIL", "0", ""))
|
# When ClickHouse server crashes, some tests are still running
|
||||||
|
# and fail because they cannot connect to server
|
||||||
|
for result in test_results:
|
||||||
|
if result[1] == "FAIL":
|
||||||
|
result[1] = "SERVER_DIED"
|
||||||
|
test_results.append(["Server died", "FAIL", "0", ""])
|
||||||
elif not success_finish:
|
elif not success_finish:
|
||||||
description = "Tests are not finished, "
|
description = "Tests are not finished, "
|
||||||
state = "failure"
|
state = "failure"
|
||||||
test_results.append(("Tests are not finished", "FAIL", "0", ""))
|
test_results.append(["Tests are not finished", "FAIL", "0", ""])
|
||||||
elif retries:
|
elif retries:
|
||||||
description = "Some tests restarted, "
|
description = "Some tests restarted, "
|
||||||
test_results.append(("Some tests restarted", "SKIPPED", "0", ""))
|
test_results.append(["Some tests restarted", "SKIPPED", "0", ""])
|
||||||
else:
|
else:
|
||||||
description = ""
|
description = ""
|
||||||
|
|
||||||
@ -233,11 +239,12 @@ if __name__ == "__main__":
|
|||||||
# sort by status then by check name
|
# sort by status then by check name
|
||||||
order = {
|
order = {
|
||||||
"FAIL": 0,
|
"FAIL": 0,
|
||||||
"Timeout": 1,
|
"SERVER_DIED": 1,
|
||||||
"NOT_FAILED": 2,
|
"Timeout": 2,
|
||||||
"BROKEN": 3,
|
"NOT_FAILED": 3,
|
||||||
"OK": 4,
|
"BROKEN": 4,
|
||||||
"SKIPPED": 5,
|
"OK": 5,
|
||||||
|
"SKIPPED": 6,
|
||||||
}
|
}
|
||||||
return order.get(item[1], 10), str(item[0]), item[1]
|
return order.get(item[1], 10), str(item[0]), item[1]
|
||||||
|
|
||||||
|
35
docs/changelogs/v23.8.16.40-lts.md
Normal file
35
docs/changelogs/v23.8.16.40-lts.md
Normal file
@ -0,0 +1,35 @@
|
|||||||
|
---
|
||||||
|
sidebar_position: 1
|
||||||
|
sidebar_label: 2024
|
||||||
|
---
|
||||||
|
|
||||||
|
# 2024 Changelog
|
||||||
|
|
||||||
|
### ClickHouse release v23.8.16.40-lts (e143a9039ba) FIXME as compared to v23.8.15.35-lts (060ff8e813a)
|
||||||
|
|
||||||
|
#### Improvement
|
||||||
|
* Backported in [#66962](https://github.com/ClickHouse/ClickHouse/issues/66962): Added support for parameterized view with analyzer to not analyze create parameterized view. Refactor existing parameterized view logic to not analyze create parameterized view. [#54211](https://github.com/ClickHouse/ClickHouse/pull/54211) ([SmitaRKulkarni](https://github.com/SmitaRKulkarni)).
|
||||||
|
* Backported in [#65461](https://github.com/ClickHouse/ClickHouse/issues/65461): Reload certificate chain during certificate reload. [#61671](https://github.com/ClickHouse/ClickHouse/pull/61671) ([Pervakov Grigorii](https://github.com/GrigoryPervakov)).
|
||||||
|
* Backported in [#65880](https://github.com/ClickHouse/ClickHouse/issues/65880): Always start Keeper with sufficient amount of threads in global thread pool. [#64444](https://github.com/ClickHouse/ClickHouse/pull/64444) ([Duc Canh Le](https://github.com/canhld94)).
|
||||||
|
* Backported in [#65912](https://github.com/ClickHouse/ClickHouse/issues/65912): Respect cgroup CPU limit in Keeper. [#65819](https://github.com/ClickHouse/ClickHouse/pull/65819) ([Antonio Andelic](https://github.com/antonio2368)).
|
||||||
|
|
||||||
|
#### Critical Bug Fix (crash, LOGICAL_ERROR, data loss, RBAC)
|
||||||
|
* Backported in [#65281](https://github.com/ClickHouse/ClickHouse/issues/65281): Fix crash with UniqInjectiveFunctionsEliminationPass and uniqCombined. [#65188](https://github.com/ClickHouse/ClickHouse/pull/65188) ([Raúl Marín](https://github.com/Algunenano)).
|
||||||
|
* Backported in [#65368](https://github.com/ClickHouse/ClickHouse/issues/65368): Fix a bug in ClickHouse Keeper that causes digest mismatch during closing session. [#65198](https://github.com/ClickHouse/ClickHouse/pull/65198) ([Aleksei Filatov](https://github.com/aalexfvk)).
|
||||||
|
* Backported in [#65743](https://github.com/ClickHouse/ClickHouse/issues/65743): Fix crash in maxIntersections. [#65689](https://github.com/ClickHouse/ClickHouse/pull/65689) ([Raúl Marín](https://github.com/Algunenano)).
|
||||||
|
|
||||||
|
#### Bug Fix (user-visible misbehavior in an official stable release)
|
||||||
|
* Backported in [#65351](https://github.com/ClickHouse/ClickHouse/issues/65351): Fix possible abort on uncaught exception in ~WriteBufferFromFileDescriptor in StatusFile. [#64206](https://github.com/ClickHouse/ClickHouse/pull/64206) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||||
|
* Backported in [#66037](https://github.com/ClickHouse/ClickHouse/issues/66037): Fix crash on destroying AccessControl: add explicit shutdown. [#64993](https://github.com/ClickHouse/ClickHouse/pull/64993) ([Vitaly Baranov](https://github.com/vitlibar)).
|
||||||
|
* Backported in [#65782](https://github.com/ClickHouse/ClickHouse/issues/65782): Fixed bug in MergeJoin. Column in sparse serialisation might be treated as a column of its nested type though the required conversion wasn't performed. [#65632](https://github.com/ClickHouse/ClickHouse/pull/65632) ([Nikita Taranov](https://github.com/nickitat)).
|
||||||
|
* Backported in [#65926](https://github.com/ClickHouse/ClickHouse/issues/65926): For queries that read from `PostgreSQL`, cancel the internal `PostgreSQL` query if the ClickHouse query is finished. Otherwise, `ClickHouse` query cannot be canceled until the internal `PostgreSQL` query is finished. [#65771](https://github.com/ClickHouse/ClickHouse/pull/65771) ([Maksim Kita](https://github.com/kitaisreal)).
|
||||||
|
* Backported in [#65822](https://github.com/ClickHouse/ClickHouse/issues/65822): Fix a bug in short circuit logic when old analyzer and dictGetOrDefault is used. [#65802](https://github.com/ClickHouse/ClickHouse/pull/65802) ([jsc0218](https://github.com/jsc0218)).
|
||||||
|
* Backported in [#66449](https://github.com/ClickHouse/ClickHouse/issues/66449): Fixed a bug in ZooKeeper client: a session could get stuck in unusable state after receiving a hardware error from ZooKeeper. For example, this might happen due to "soft memory limit" in ClickHouse Keeper. [#66140](https://github.com/ClickHouse/ClickHouse/pull/66140) ([Alexander Tokmakov](https://github.com/tavplubix)).
|
||||||
|
* Backported in [#66717](https://github.com/ClickHouse/ClickHouse/issues/66717): Correctly track memory for `Allocator::realloc`. [#66548](https://github.com/ClickHouse/ClickHouse/pull/66548) ([Antonio Andelic](https://github.com/antonio2368)).
|
||||||
|
|
||||||
|
#### NOT FOR CHANGELOG / INSIGNIFICANT
|
||||||
|
|
||||||
|
* Backported in [#65080](https://github.com/ClickHouse/ClickHouse/issues/65080): Follow up to [#56541](https://github.com/ClickHouse/ClickHouse/issues/56541). [#57141](https://github.com/ClickHouse/ClickHouse/pull/57141) ([Kseniia Sumarokova](https://github.com/kssenii)).
|
||||||
|
* Backported in [#65913](https://github.com/ClickHouse/ClickHouse/issues/65913): Fix bug with session closing in Keeper. [#65735](https://github.com/ClickHouse/ClickHouse/pull/65735) ([Antonio Andelic](https://github.com/antonio2368)).
|
||||||
|
* Backported in [#66853](https://github.com/ClickHouse/ClickHouse/issues/66853): Fix data race in S3::ClientCache. [#66644](https://github.com/ClickHouse/ClickHouse/pull/66644) ([Konstantin Morozov](https://github.com/k-morozov)).
|
||||||
|
|
40
docs/changelogs/v24.3.5.46-lts.md
Normal file
40
docs/changelogs/v24.3.5.46-lts.md
Normal file
@ -0,0 +1,40 @@
|
|||||||
|
---
|
||||||
|
sidebar_position: 1
|
||||||
|
sidebar_label: 2024
|
||||||
|
---
|
||||||
|
|
||||||
|
# 2024 Changelog
|
||||||
|
|
||||||
|
### ClickHouse release v24.3.5.46-lts (fe54cead6b6) FIXME as compared to v24.3.4.147-lts (31a7bdc346d)
|
||||||
|
|
||||||
|
#### Improvement
|
||||||
|
* Backported in [#65463](https://github.com/ClickHouse/ClickHouse/issues/65463): Reload certificate chain during certificate reload. [#61671](https://github.com/ClickHouse/ClickHouse/pull/61671) ([Pervakov Grigorii](https://github.com/GrigoryPervakov)).
|
||||||
|
* Backported in [#65882](https://github.com/ClickHouse/ClickHouse/issues/65882): Always start Keeper with sufficient amount of threads in global thread pool. [#64444](https://github.com/ClickHouse/ClickHouse/pull/64444) ([Duc Canh Le](https://github.com/canhld94)).
|
||||||
|
* Backported in [#65302](https://github.com/ClickHouse/ClickHouse/issues/65302): Returned back the behaviour of how ClickHouse works and interprets Tuples in CSV format. This change effectively reverts https://github.com/ClickHouse/ClickHouse/pull/60994 and makes it available only under a few settings: `output_format_csv_serialize_tuple_into_separate_columns`, `input_format_csv_deserialize_separate_columns_into_tuple` and `input_format_csv_try_infer_strings_from_quoted_tuples`. [#65170](https://github.com/ClickHouse/ClickHouse/pull/65170) ([Nikita Mikhaylov](https://github.com/nikitamikhaylov)).
|
||||||
|
* Backported in [#65892](https://github.com/ClickHouse/ClickHouse/issues/65892): Respect cgroup CPU limit in Keeper. [#65819](https://github.com/ClickHouse/ClickHouse/pull/65819) ([Antonio Andelic](https://github.com/antonio2368)).
|
||||||
|
|
||||||
|
#### Critical Bug Fix (crash, LOGICAL_ERROR, data loss, RBAC)
|
||||||
|
* Backported in [#65283](https://github.com/ClickHouse/ClickHouse/issues/65283): Fix crash with UniqInjectiveFunctionsEliminationPass and uniqCombined. [#65188](https://github.com/ClickHouse/ClickHouse/pull/65188) ([Raúl Marín](https://github.com/Algunenano)).
|
||||||
|
* Backported in [#65370](https://github.com/ClickHouse/ClickHouse/issues/65370): Fix a bug in ClickHouse Keeper that causes digest mismatch during closing session. [#65198](https://github.com/ClickHouse/ClickHouse/pull/65198) ([Aleksei Filatov](https://github.com/aalexfvk)).
|
||||||
|
* Backported in [#65446](https://github.com/ClickHouse/ClickHouse/issues/65446): Use correct memory alignment for Distinct combinator. Previously, crash could happen because of invalid memory allocation when the combinator was used. [#65379](https://github.com/ClickHouse/ClickHouse/pull/65379) ([Antonio Andelic](https://github.com/antonio2368)).
|
||||||
|
* Backported in [#65708](https://github.com/ClickHouse/ClickHouse/issues/65708): Fix crash in maxIntersections. [#65689](https://github.com/ClickHouse/ClickHouse/pull/65689) ([Raúl Marín](https://github.com/Algunenano)).
|
||||||
|
|
||||||
|
#### Bug Fix (user-visible misbehavior in an official stable release)
|
||||||
|
* Backported in [#65352](https://github.com/ClickHouse/ClickHouse/issues/65352): Fix possible abort on uncaught exception in ~WriteBufferFromFileDescriptor in StatusFile. [#64206](https://github.com/ClickHouse/ClickHouse/pull/64206) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||||
|
* Backported in [#65327](https://github.com/ClickHouse/ClickHouse/issues/65327): Fix the crash loop when restoring from backup is blocked by creating an MV with a definer that hasn't been restored yet. [#64595](https://github.com/ClickHouse/ClickHouse/pull/64595) ([pufit](https://github.com/pufit)).
|
||||||
|
* Backported in [#65538](https://github.com/ClickHouse/ClickHouse/issues/65538): Fix crash for `ALTER TABLE ... ON CLUSTER ... MODIFY SQL SECURITY`. [#64957](https://github.com/ClickHouse/ClickHouse/pull/64957) ([pufit](https://github.com/pufit)).
|
||||||
|
* Backported in [#65576](https://github.com/ClickHouse/ClickHouse/issues/65576): Fix crash on destroying AccessControl: add explicit shutdown. [#64993](https://github.com/ClickHouse/ClickHouse/pull/64993) ([Vitaly Baranov](https://github.com/vitlibar)).
|
||||||
|
* Backported in [#65159](https://github.com/ClickHouse/ClickHouse/issues/65159): Fix pushing arithmetic operations out of aggregation. In the new analyzer, optimization was applied only once. [#65104](https://github.com/ClickHouse/ClickHouse/pull/65104) ([Dmitry Novik](https://github.com/novikd)).
|
||||||
|
* Backported in [#65615](https://github.com/ClickHouse/ClickHouse/issues/65615): Fix aggregate function name rewriting in the new analyzer. [#65110](https://github.com/ClickHouse/ClickHouse/pull/65110) ([Dmitry Novik](https://github.com/novikd)).
|
||||||
|
* Backported in [#65728](https://github.com/ClickHouse/ClickHouse/issues/65728): Eliminate injective function in argument of functions `uniq*` recursively. This used to work correctly but was broken in the new analyzer. [#65140](https://github.com/ClickHouse/ClickHouse/pull/65140) ([Duc Canh Le](https://github.com/canhld94)).
|
||||||
|
* Backported in [#65261](https://github.com/ClickHouse/ClickHouse/issues/65261): Fix the bug in Hashed and Hashed_Array dictionary short circuit evaluation, which may read uninitialized number, leading to various errors. [#65256](https://github.com/ClickHouse/ClickHouse/pull/65256) ([jsc0218](https://github.com/jsc0218)).
|
||||||
|
* Backported in [#65667](https://github.com/ClickHouse/ClickHouse/issues/65667): Disable `non-intersecting-parts` optimization for queries with `FINAL` in case of `read-in-order` optimization was enabled. This could lead to an incorrect query result. As a workaround, disable `do_not_merge_across_partitions_select_final` and `split_parts_ranges_into_intersecting_and_non_intersecting_final` before this fix is merged. [#65505](https://github.com/ClickHouse/ClickHouse/pull/65505) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
|
||||||
|
* Backported in [#65784](https://github.com/ClickHouse/ClickHouse/issues/65784): Fixed bug in MergeJoin. Column in sparse serialisation might be treated as a column of its nested type though the required conversion wasn't performed. [#65632](https://github.com/ClickHouse/ClickHouse/pull/65632) ([Nikita Taranov](https://github.com/nickitat)).
|
||||||
|
* Backported in [#65929](https://github.com/ClickHouse/ClickHouse/issues/65929): For queries that read from `PostgreSQL`, cancel the internal `PostgreSQL` query if the ClickHouse query is finished. Otherwise, `ClickHouse` query cannot be canceled until the internal `PostgreSQL` query is finished. [#65771](https://github.com/ClickHouse/ClickHouse/pull/65771) ([Maksim Kita](https://github.com/kitaisreal)).
|
||||||
|
* Backported in [#65824](https://github.com/ClickHouse/ClickHouse/issues/65824): Fix a bug in short circuit logic when old analyzer and dictGetOrDefault is used. [#65802](https://github.com/ClickHouse/ClickHouse/pull/65802) ([jsc0218](https://github.com/jsc0218)).
|
||||||
|
|
||||||
|
#### NOT FOR CHANGELOG / INSIGNIFICANT
|
||||||
|
|
||||||
|
* Backported in [#65223](https://github.com/ClickHouse/ClickHouse/issues/65223): Capture weak_ptr of ContextAccess for safety. [#65051](https://github.com/ClickHouse/ClickHouse/pull/65051) ([Alexander Gololobov](https://github.com/davenger)).
|
||||||
|
* Backported in [#65901](https://github.com/ClickHouse/ClickHouse/issues/65901): Fix bug with session closing in Keeper. [#65735](https://github.com/ClickHouse/ClickHouse/pull/65735) ([Antonio Andelic](https://github.com/antonio2368)).
|
||||||
|
|
39
docs/changelogs/v24.3.6.48-lts.md
Normal file
39
docs/changelogs/v24.3.6.48-lts.md
Normal file
@ -0,0 +1,39 @@
|
|||||||
|
---
|
||||||
|
sidebar_position: 1
|
||||||
|
sidebar_label: 2024
|
||||||
|
---
|
||||||
|
|
||||||
|
# 2024 Changelog
|
||||||
|
|
||||||
|
### ClickHouse release v24.3.6.48-lts (b2d33c3c45d) FIXME as compared to v24.3.5.46-lts (fe54cead6b6)
|
||||||
|
|
||||||
|
#### Critical Bug Fix (crash, LOGICAL_ERROR, data loss, RBAC)
|
||||||
|
* Backported in [#66889](https://github.com/ClickHouse/ClickHouse/issues/66889): Fix unexpeced size of low cardinality column in function calls. [#65298](https://github.com/ClickHouse/ClickHouse/pull/65298) ([Raúl Marín](https://github.com/Algunenano)).
|
||||||
|
* Backported in [#66687](https://github.com/ClickHouse/ClickHouse/issues/66687): Fix the VALID UNTIL clause in the user definition resetting after a restart. Closes [#66405](https://github.com/ClickHouse/ClickHouse/issues/66405). [#66409](https://github.com/ClickHouse/ClickHouse/pull/66409) ([Nikolay Degterinsky](https://github.com/evillique)).
|
||||||
|
* Backported in [#67497](https://github.com/ClickHouse/ClickHouse/issues/67497): Fix crash in DistributedAsyncInsert when connection is empty. [#67219](https://github.com/ClickHouse/ClickHouse/pull/67219) ([Pablo Marcos](https://github.com/pamarcos)).
|
||||||
|
|
||||||
|
#### Bug Fix (user-visible misbehavior in an official stable release)
|
||||||
|
* Backported in [#66324](https://github.com/ClickHouse/ClickHouse/issues/66324): Add missing settings `input_format_csv_skip_first_lines/input_format_tsv_skip_first_lines/input_format_csv_try_infer_numbers_from_strings/input_format_csv_try_infer_strings_from_quoted_tuples` in schema inference cache because they can change the resulting schema. It prevents from incorrect result of schema inference with these settings changed. [#65980](https://github.com/ClickHouse/ClickHouse/pull/65980) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||||
|
* Backported in [#66151](https://github.com/ClickHouse/ClickHouse/issues/66151): Fixed buffer overflow bug in `unbin`/`unhex` implementation. [#66106](https://github.com/ClickHouse/ClickHouse/pull/66106) ([Nikita Taranov](https://github.com/nickitat)).
|
||||||
|
* Backported in [#66451](https://github.com/ClickHouse/ClickHouse/issues/66451): Fixed a bug in ZooKeeper client: a session could get stuck in unusable state after receiving a hardware error from ZooKeeper. For example, this might happen due to "soft memory limit" in ClickHouse Keeper. [#66140](https://github.com/ClickHouse/ClickHouse/pull/66140) ([Alexander Tokmakov](https://github.com/tavplubix)).
|
||||||
|
* Backported in [#66222](https://github.com/ClickHouse/ClickHouse/issues/66222): Fix issue in SumIfToCountIfVisitor and signed integers. [#66146](https://github.com/ClickHouse/ClickHouse/pull/66146) ([Raúl Marín](https://github.com/Algunenano)).
|
||||||
|
* Backported in [#66676](https://github.com/ClickHouse/ClickHouse/issues/66676): Fix handling limit for `system.numbers_mt` when no index can be used. [#66231](https://github.com/ClickHouse/ClickHouse/pull/66231) ([János Benjamin Antal](https://github.com/antaljanosbenjamin)).
|
||||||
|
* Backported in [#66602](https://github.com/ClickHouse/ClickHouse/issues/66602): Fixed how the ClickHouse server detects the maximum number of usable CPU cores as specified by cgroups v2 if the server runs in a container such as Docker. In more detail, containers often run their process in the root cgroup which has an empty name. In that case, ClickHouse ignored the CPU limits set by cgroups v2. [#66237](https://github.com/ClickHouse/ClickHouse/pull/66237) ([filimonov](https://github.com/filimonov)).
|
||||||
|
* Backported in [#66356](https://github.com/ClickHouse/ClickHouse/issues/66356): Fix the `Not-ready set` error when a subquery with `IN` is used in the constraint. [#66261](https://github.com/ClickHouse/ClickHouse/pull/66261) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
|
||||||
|
* Backported in [#66970](https://github.com/ClickHouse/ClickHouse/issues/66970): Fix `Column identifier is already registered` error with `group_by_use_nulls=true` and new analyzer. [#66400](https://github.com/ClickHouse/ClickHouse/pull/66400) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
|
||||||
|
* Backported in [#66967](https://github.com/ClickHouse/ClickHouse/issues/66967): Fix `Cannot find column` error for queries with constant expression in `GROUP BY` key and new analyzer enabled. [#66433](https://github.com/ClickHouse/ClickHouse/pull/66433) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
|
||||||
|
* Backported in [#66718](https://github.com/ClickHouse/ClickHouse/issues/66718): Correctly track memory for `Allocator::realloc`. [#66548](https://github.com/ClickHouse/ClickHouse/pull/66548) ([Antonio Andelic](https://github.com/antonio2368)).
|
||||||
|
* Backported in [#66949](https://github.com/ClickHouse/ClickHouse/issues/66949): Fix an invalid result for queries with `WINDOW`. This could happen when `PARTITION` columns have sparse serialization and window functions are executed in parallel. [#66579](https://github.com/ClickHouse/ClickHouse/pull/66579) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
|
||||||
|
* Backported in [#66946](https://github.com/ClickHouse/ClickHouse/issues/66946): Fix `Method getResultType is not supported for QUERY query node` error when scalar subquery was used as the first argument of IN (with new analyzer). [#66655](https://github.com/ClickHouse/ClickHouse/pull/66655) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
|
||||||
|
* Backported in [#67629](https://github.com/ClickHouse/ClickHouse/issues/67629): Fix for occasional deadlock in Context::getDDLWorker. [#66843](https://github.com/ClickHouse/ClickHouse/pull/66843) ([Alexander Gololobov](https://github.com/davenger)).
|
||||||
|
* Backported in [#67193](https://github.com/ClickHouse/ClickHouse/issues/67193): TRUNCATE DATABASE used to stop replication as if it was a DROP DATABASE query, it's fixed. [#67129](https://github.com/ClickHouse/ClickHouse/pull/67129) ([Alexander Tokmakov](https://github.com/tavplubix)).
|
||||||
|
* Backported in [#67375](https://github.com/ClickHouse/ClickHouse/issues/67375): Fix error `Cannot convert column because it is non constant in source stream but must be constant in result.` for a query that reads from the `Merge` table over the `Distriburted` table with one shard. [#67146](https://github.com/ClickHouse/ClickHouse/pull/67146) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
|
||||||
|
* Backported in [#67572](https://github.com/ClickHouse/ClickHouse/issues/67572): Fix execution of nested short-circuit functions. [#67520](https://github.com/ClickHouse/ClickHouse/pull/67520) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||||
|
|
||||||
|
#### NOT FOR CHANGELOG / INSIGNIFICANT
|
||||||
|
|
||||||
|
* Backported in [#66422](https://github.com/ClickHouse/ClickHouse/issues/66422): Ignore subquery for IN in DDLLoadingDependencyVisitor. [#66395](https://github.com/ClickHouse/ClickHouse/pull/66395) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
|
||||||
|
* Backported in [#66855](https://github.com/ClickHouse/ClickHouse/issues/66855): Fix data race in S3::ClientCache. [#66644](https://github.com/ClickHouse/ClickHouse/pull/66644) ([Konstantin Morozov](https://github.com/k-morozov)).
|
||||||
|
* Backported in [#67055](https://github.com/ClickHouse/ClickHouse/issues/67055): Increase asio pool size in case the server is tiny. [#66761](https://github.com/ClickHouse/ClickHouse/pull/66761) ([alesapin](https://github.com/alesapin)).
|
||||||
|
* Backported in [#66943](https://github.com/ClickHouse/ClickHouse/issues/66943): Small fix in realloc memory tracking. [#66820](https://github.com/ClickHouse/ClickHouse/pull/66820) ([Antonio Andelic](https://github.com/antonio2368)).
|
||||||
|
|
70
docs/changelogs/v24.4.4.107-stable.md
Normal file
70
docs/changelogs/v24.4.4.107-stable.md
Normal file
@ -0,0 +1,70 @@
|
|||||||
|
---
|
||||||
|
sidebar_position: 1
|
||||||
|
sidebar_label: 2024
|
||||||
|
---
|
||||||
|
|
||||||
|
# 2024 Changelog
|
||||||
|
|
||||||
|
### ClickHouse release v24.4.4.107-stable (af0ed6b197e) FIXME as compared to v24.4.3.25-stable (a915dd4eda4)
|
||||||
|
|
||||||
|
#### Improvement
|
||||||
|
* Backported in [#65884](https://github.com/ClickHouse/ClickHouse/issues/65884): Always start Keeper with sufficient amount of threads in global thread pool. [#64444](https://github.com/ClickHouse/ClickHouse/pull/64444) ([Duc Canh Le](https://github.com/canhld94)).
|
||||||
|
* Backported in [#65303](https://github.com/ClickHouse/ClickHouse/issues/65303): Returned back the behaviour of how ClickHouse works and interprets Tuples in CSV format. This change effectively reverts https://github.com/ClickHouse/ClickHouse/pull/60994 and makes it available only under a few settings: `output_format_csv_serialize_tuple_into_separate_columns`, `input_format_csv_deserialize_separate_columns_into_tuple` and `input_format_csv_try_infer_strings_from_quoted_tuples`. [#65170](https://github.com/ClickHouse/ClickHouse/pull/65170) ([Nikita Mikhaylov](https://github.com/nikitamikhaylov)).
|
||||||
|
* Backported in [#65894](https://github.com/ClickHouse/ClickHouse/issues/65894): Respect cgroup CPU limit in Keeper. [#65819](https://github.com/ClickHouse/ClickHouse/pull/65819) ([Antonio Andelic](https://github.com/antonio2368)).
|
||||||
|
|
||||||
|
#### Critical Bug Fix (crash, LOGICAL_ERROR, data loss, RBAC)
|
||||||
|
* Backported in [#65372](https://github.com/ClickHouse/ClickHouse/issues/65372): Fix a bug in ClickHouse Keeper that causes digest mismatch during closing session. [#65198](https://github.com/ClickHouse/ClickHouse/pull/65198) ([Aleksei Filatov](https://github.com/aalexfvk)).
|
||||||
|
* Backported in [#66883](https://github.com/ClickHouse/ClickHouse/issues/66883): Fix unexpeced size of low cardinality column in function calls. [#65298](https://github.com/ClickHouse/ClickHouse/pull/65298) ([Raúl Marín](https://github.com/Algunenano)).
|
||||||
|
* Backported in [#65435](https://github.com/ClickHouse/ClickHouse/issues/65435): Forbid `QUALIFY` clause in the old analyzer. The old analyzer ignored `QUALIFY`, so it could lead to unexpected data removal in mutations. [#65356](https://github.com/ClickHouse/ClickHouse/pull/65356) ([Dmitry Novik](https://github.com/novikd)).
|
||||||
|
* Backported in [#65448](https://github.com/ClickHouse/ClickHouse/issues/65448): Use correct memory alignment for Distinct combinator. Previously, crash could happen because of invalid memory allocation when the combinator was used. [#65379](https://github.com/ClickHouse/ClickHouse/pull/65379) ([Antonio Andelic](https://github.com/antonio2368)).
|
||||||
|
* Backported in [#65710](https://github.com/ClickHouse/ClickHouse/issues/65710): Fix crash in maxIntersections. [#65689](https://github.com/ClickHouse/ClickHouse/pull/65689) ([Raúl Marín](https://github.com/Algunenano)).
|
||||||
|
* Backported in [#66689](https://github.com/ClickHouse/ClickHouse/issues/66689): Fix the VALID UNTIL clause in the user definition resetting after a restart. Closes [#66405](https://github.com/ClickHouse/ClickHouse/issues/66405). [#66409](https://github.com/ClickHouse/ClickHouse/pull/66409) ([Nikolay Degterinsky](https://github.com/evillique)).
|
||||||
|
|
||||||
|
#### Bug Fix (user-visible misbehavior in an official stable release)
|
||||||
|
* Backported in [#65353](https://github.com/ClickHouse/ClickHouse/issues/65353): Fix possible abort on uncaught exception in ~WriteBufferFromFileDescriptor in StatusFile. [#64206](https://github.com/ClickHouse/ClickHouse/pull/64206) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||||
|
* Backported in [#65060](https://github.com/ClickHouse/ClickHouse/issues/65060): Fix the `Expression nodes list expected 1 projection names` and `Unknown expression or identifier` errors for queries with aliases to `GLOBAL IN.`. [#64517](https://github.com/ClickHouse/ClickHouse/pull/64517) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
|
||||||
|
* Backported in [#65329](https://github.com/ClickHouse/ClickHouse/issues/65329): Fix the crash loop when restoring from backup is blocked by creating an MV with a definer that hasn't been restored yet. [#64595](https://github.com/ClickHouse/ClickHouse/pull/64595) ([pufit](https://github.com/pufit)).
|
||||||
|
* Backported in [#64833](https://github.com/ClickHouse/ClickHouse/issues/64833): Fix bug which could lead to non-working TTLs with expressions. [#64694](https://github.com/ClickHouse/ClickHouse/pull/64694) ([alesapin](https://github.com/alesapin)).
|
||||||
|
* Backported in [#65086](https://github.com/ClickHouse/ClickHouse/issues/65086): Fix removing the `WHERE` and `PREWHERE` expressions, which are always true (for the new analyzer). [#64695](https://github.com/ClickHouse/ClickHouse/pull/64695) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
|
||||||
|
* Backported in [#65540](https://github.com/ClickHouse/ClickHouse/issues/65540): Fix crash for `ALTER TABLE ... ON CLUSTER ... MODIFY SQL SECURITY`. [#64957](https://github.com/ClickHouse/ClickHouse/pull/64957) ([pufit](https://github.com/pufit)).
|
||||||
|
* Backported in [#65578](https://github.com/ClickHouse/ClickHouse/issues/65578): Fix crash on destroying AccessControl: add explicit shutdown. [#64993](https://github.com/ClickHouse/ClickHouse/pull/64993) ([Vitaly Baranov](https://github.com/vitlibar)).
|
||||||
|
* Backported in [#65161](https://github.com/ClickHouse/ClickHouse/issues/65161): Fix pushing arithmetic operations out of aggregation. In the new analyzer, optimization was applied only once. [#65104](https://github.com/ClickHouse/ClickHouse/pull/65104) ([Dmitry Novik](https://github.com/novikd)).
|
||||||
|
* Backported in [#65616](https://github.com/ClickHouse/ClickHouse/issues/65616): Fix aggregate function name rewriting in the new analyzer. [#65110](https://github.com/ClickHouse/ClickHouse/pull/65110) ([Dmitry Novik](https://github.com/novikd)).
|
||||||
|
* Backported in [#65730](https://github.com/ClickHouse/ClickHouse/issues/65730): Eliminate injective function in argument of functions `uniq*` recursively. This used to work correctly but was broken in the new analyzer. [#65140](https://github.com/ClickHouse/ClickHouse/pull/65140) ([Duc Canh Le](https://github.com/canhld94)).
|
||||||
|
* Backported in [#65668](https://github.com/ClickHouse/ClickHouse/issues/65668): Disable `non-intersecting-parts` optimization for queries with `FINAL` in case of `read-in-order` optimization was enabled. This could lead to an incorrect query result. As a workaround, disable `do_not_merge_across_partitions_select_final` and `split_parts_ranges_into_intersecting_and_non_intersecting_final` before this fix is merged. [#65505](https://github.com/ClickHouse/ClickHouse/pull/65505) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
|
||||||
|
* Backported in [#65786](https://github.com/ClickHouse/ClickHouse/issues/65786): Fixed bug in MergeJoin. Column in sparse serialisation might be treated as a column of its nested type though the required conversion wasn't performed. [#65632](https://github.com/ClickHouse/ClickHouse/pull/65632) ([Nikita Taranov](https://github.com/nickitat)).
|
||||||
|
* Backported in [#65810](https://github.com/ClickHouse/ClickHouse/issues/65810): Fix invalid exceptions in function `parseDateTime` with `%F` and `%D` placeholders. [#65768](https://github.com/ClickHouse/ClickHouse/pull/65768) ([Antonio Andelic](https://github.com/antonio2368)).
|
||||||
|
* Backported in [#65931](https://github.com/ClickHouse/ClickHouse/issues/65931): For queries that read from `PostgreSQL`, cancel the internal `PostgreSQL` query if the ClickHouse query is finished. Otherwise, `ClickHouse` query cannot be canceled until the internal `PostgreSQL` query is finished. [#65771](https://github.com/ClickHouse/ClickHouse/pull/65771) ([Maksim Kita](https://github.com/kitaisreal)).
|
||||||
|
* Backported in [#65826](https://github.com/ClickHouse/ClickHouse/issues/65826): Fix a bug in short circuit logic when old analyzer and dictGetOrDefault is used. [#65802](https://github.com/ClickHouse/ClickHouse/pull/65802) ([jsc0218](https://github.com/jsc0218)).
|
||||||
|
* Backported in [#66299](https://github.com/ClickHouse/ClickHouse/issues/66299): Better handling of join conditions involving `IS NULL` checks (for example `ON (a = b AND (a IS NOT NULL) AND (b IS NOT NULL) ) OR ( (a IS NULL) AND (b IS NULL) )` is rewritten to `ON a <=> b`), fix incorrect optimization when condition other then `IS NULL` are present. [#65835](https://github.com/ClickHouse/ClickHouse/pull/65835) ([vdimir](https://github.com/vdimir)).
|
||||||
|
* Backported in [#66326](https://github.com/ClickHouse/ClickHouse/issues/66326): Add missing settings `input_format_csv_skip_first_lines/input_format_tsv_skip_first_lines/input_format_csv_try_infer_numbers_from_strings/input_format_csv_try_infer_strings_from_quoted_tuples` in schema inference cache because they can change the resulting schema. It prevents from incorrect result of schema inference with these settings changed. [#65980](https://github.com/ClickHouse/ClickHouse/pull/65980) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||||
|
* Backported in [#66153](https://github.com/ClickHouse/ClickHouse/issues/66153): Fixed buffer overflow bug in `unbin`/`unhex` implementation. [#66106](https://github.com/ClickHouse/ClickHouse/pull/66106) ([Nikita Taranov](https://github.com/nickitat)).
|
||||||
|
* Backported in [#66459](https://github.com/ClickHouse/ClickHouse/issues/66459): Fixed a bug in ZooKeeper client: a session could get stuck in unusable state after receiving a hardware error from ZooKeeper. For example, this might happen due to "soft memory limit" in ClickHouse Keeper. [#66140](https://github.com/ClickHouse/ClickHouse/pull/66140) ([Alexander Tokmakov](https://github.com/tavplubix)).
|
||||||
|
* Backported in [#66224](https://github.com/ClickHouse/ClickHouse/issues/66224): Fix issue in SumIfToCountIfVisitor and signed integers. [#66146](https://github.com/ClickHouse/ClickHouse/pull/66146) ([Raúl Marín](https://github.com/Algunenano)).
|
||||||
|
* Backported in [#66267](https://github.com/ClickHouse/ClickHouse/issues/66267): Don't throw `TIMEOUT_EXCEEDED` for `none_only_active` mode of `distributed_ddl_output_mode`. [#66218](https://github.com/ClickHouse/ClickHouse/pull/66218) ([Alexander Tokmakov](https://github.com/tavplubix)).
|
||||||
|
* Backported in [#66678](https://github.com/ClickHouse/ClickHouse/issues/66678): Fix handling limit for `system.numbers_mt` when no index can be used. [#66231](https://github.com/ClickHouse/ClickHouse/pull/66231) ([János Benjamin Antal](https://github.com/antaljanosbenjamin)).
|
||||||
|
* Backported in [#66603](https://github.com/ClickHouse/ClickHouse/issues/66603): Fixed how the ClickHouse server detects the maximum number of usable CPU cores as specified by cgroups v2 if the server runs in a container such as Docker. In more detail, containers often run their process in the root cgroup which has an empty name. In that case, ClickHouse ignored the CPU limits set by cgroups v2. [#66237](https://github.com/ClickHouse/ClickHouse/pull/66237) ([filimonov](https://github.com/filimonov)).
|
||||||
|
* Backported in [#66358](https://github.com/ClickHouse/ClickHouse/issues/66358): Fix the `Not-ready set` error when a subquery with `IN` is used in the constraint. [#66261](https://github.com/ClickHouse/ClickHouse/pull/66261) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
|
||||||
|
* Backported in [#66971](https://github.com/ClickHouse/ClickHouse/issues/66971): Fix `Column identifier is already registered` error with `group_by_use_nulls=true` and new analyzer. [#66400](https://github.com/ClickHouse/ClickHouse/pull/66400) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
|
||||||
|
* Backported in [#66968](https://github.com/ClickHouse/ClickHouse/issues/66968): Fix `Cannot find column` error for queries with constant expression in `GROUP BY` key and new analyzer enabled. [#66433](https://github.com/ClickHouse/ClickHouse/pull/66433) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
|
||||||
|
* Backported in [#66719](https://github.com/ClickHouse/ClickHouse/issues/66719): Correctly track memory for `Allocator::realloc`. [#66548](https://github.com/ClickHouse/ClickHouse/pull/66548) ([Antonio Andelic](https://github.com/antonio2368)).
|
||||||
|
* Backported in [#66950](https://github.com/ClickHouse/ClickHouse/issues/66950): Fix an invalid result for queries with `WINDOW`. This could happen when `PARTITION` columns have sparse serialization and window functions are executed in parallel. [#66579](https://github.com/ClickHouse/ClickHouse/pull/66579) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
|
||||||
|
* Backported in [#66947](https://github.com/ClickHouse/ClickHouse/issues/66947): Fix `Method getResultType is not supported for QUERY query node` error when scalar subquery was used as the first argument of IN (with new analyzer). [#66655](https://github.com/ClickHouse/ClickHouse/pull/66655) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
|
||||||
|
* Backported in [#67195](https://github.com/ClickHouse/ClickHouse/issues/67195): TRUNCATE DATABASE used to stop replication as if it was a DROP DATABASE query, it's fixed. [#67129](https://github.com/ClickHouse/ClickHouse/pull/67129) ([Alexander Tokmakov](https://github.com/tavplubix)).
|
||||||
|
* Backported in [#67377](https://github.com/ClickHouse/ClickHouse/issues/67377): Fix error `Cannot convert column because it is non constant in source stream but must be constant in result.` for a query that reads from the `Merge` table over the `Distriburted` table with one shard. [#67146](https://github.com/ClickHouse/ClickHouse/pull/67146) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
|
||||||
|
* Backported in [#67240](https://github.com/ClickHouse/ClickHouse/issues/67240): This closes [#67156](https://github.com/ClickHouse/ClickHouse/issues/67156). This closes [#66447](https://github.com/ClickHouse/ClickHouse/issues/66447). The bug was introduced in https://github.com/ClickHouse/ClickHouse/pull/62907. [#67178](https://github.com/ClickHouse/ClickHouse/pull/67178) ([Maksim Kita](https://github.com/kitaisreal)).
|
||||||
|
|
||||||
|
#### NOT FOR CHANGELOG / INSIGNIFICANT
|
||||||
|
|
||||||
|
* Backported in [#65410](https://github.com/ClickHouse/ClickHouse/issues/65410): Re-enable OpenSSL session caching. [#65111](https://github.com/ClickHouse/ClickHouse/pull/65111) ([Robert Schulze](https://github.com/rschu1ze)).
|
||||||
|
* Backported in [#65903](https://github.com/ClickHouse/ClickHouse/issues/65903): Fix bug with session closing in Keeper. [#65735](https://github.com/ClickHouse/ClickHouse/pull/65735) ([Antonio Andelic](https://github.com/antonio2368)).
|
||||||
|
* Backported in [#66385](https://github.com/ClickHouse/ClickHouse/issues/66385): Disable broken cases from 02911_join_on_nullsafe_optimization. [#66310](https://github.com/ClickHouse/ClickHouse/pull/66310) ([vdimir](https://github.com/vdimir)).
|
||||||
|
* Backported in [#66424](https://github.com/ClickHouse/ClickHouse/issues/66424): Ignore subquery for IN in DDLLoadingDependencyVisitor. [#66395](https://github.com/ClickHouse/ClickHouse/pull/66395) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
|
||||||
|
* Backported in [#66542](https://github.com/ClickHouse/ClickHouse/issues/66542): Add additional log masking in CI. [#66523](https://github.com/ClickHouse/ClickHouse/pull/66523) ([Raúl Marín](https://github.com/Algunenano)).
|
||||||
|
* Backported in [#66857](https://github.com/ClickHouse/ClickHouse/issues/66857): Fix data race in S3::ClientCache. [#66644](https://github.com/ClickHouse/ClickHouse/pull/66644) ([Konstantin Morozov](https://github.com/k-morozov)).
|
||||||
|
* Backported in [#66873](https://github.com/ClickHouse/ClickHouse/issues/66873): Support one more case in JOIN ON ... IS NULL. [#66725](https://github.com/ClickHouse/ClickHouse/pull/66725) ([vdimir](https://github.com/vdimir)).
|
||||||
|
* Backported in [#67057](https://github.com/ClickHouse/ClickHouse/issues/67057): Increase asio pool size in case the server is tiny. [#66761](https://github.com/ClickHouse/ClickHouse/pull/66761) ([alesapin](https://github.com/alesapin)).
|
||||||
|
* Backported in [#66944](https://github.com/ClickHouse/ClickHouse/issues/66944): Small fix in realloc memory tracking. [#66820](https://github.com/ClickHouse/ClickHouse/pull/66820) ([Antonio Andelic](https://github.com/antonio2368)).
|
||||||
|
* Backported in [#67250](https://github.com/ClickHouse/ClickHouse/issues/67250): Followup [#66725](https://github.com/ClickHouse/ClickHouse/issues/66725). [#66869](https://github.com/ClickHouse/ClickHouse/pull/66869) ([vdimir](https://github.com/vdimir)).
|
||||||
|
* Backported in [#67410](https://github.com/ClickHouse/ClickHouse/issues/67410): CI: Fix build results for release branches. [#67402](https://github.com/ClickHouse/ClickHouse/pull/67402) ([Max K.](https://github.com/maxknv)).
|
||||||
|
|
73
docs/changelogs/v24.4.4.113-stable.md
Normal file
73
docs/changelogs/v24.4.4.113-stable.md
Normal file
@ -0,0 +1,73 @@
|
|||||||
|
---
|
||||||
|
sidebar_position: 1
|
||||||
|
sidebar_label: 2024
|
||||||
|
---
|
||||||
|
|
||||||
|
# 2024 Changelog
|
||||||
|
|
||||||
|
### ClickHouse release v24.4.4.113-stable (d63a54957bd) FIXME as compared to v24.4.3.25-stable (a915dd4eda4)
|
||||||
|
|
||||||
|
#### Improvement
|
||||||
|
* Backported in [#65884](https://github.com/ClickHouse/ClickHouse/issues/65884): Always start Keeper with sufficient amount of threads in global thread pool. [#64444](https://github.com/ClickHouse/ClickHouse/pull/64444) ([Duc Canh Le](https://github.com/canhld94)).
|
||||||
|
* Backported in [#65303](https://github.com/ClickHouse/ClickHouse/issues/65303): Returned back the behaviour of how ClickHouse works and interprets Tuples in CSV format. This change effectively reverts https://github.com/ClickHouse/ClickHouse/pull/60994 and makes it available only under a few settings: `output_format_csv_serialize_tuple_into_separate_columns`, `input_format_csv_deserialize_separate_columns_into_tuple` and `input_format_csv_try_infer_strings_from_quoted_tuples`. [#65170](https://github.com/ClickHouse/ClickHouse/pull/65170) ([Nikita Mikhaylov](https://github.com/nikitamikhaylov)).
|
||||||
|
* Backported in [#65894](https://github.com/ClickHouse/ClickHouse/issues/65894): Respect cgroup CPU limit in Keeper. [#65819](https://github.com/ClickHouse/ClickHouse/pull/65819) ([Antonio Andelic](https://github.com/antonio2368)).
|
||||||
|
|
||||||
|
#### Critical Bug Fix (crash, LOGICAL_ERROR, data loss, RBAC)
|
||||||
|
* Backported in [#65372](https://github.com/ClickHouse/ClickHouse/issues/65372): Fix a bug in ClickHouse Keeper that causes digest mismatch during closing session. [#65198](https://github.com/ClickHouse/ClickHouse/pull/65198) ([Aleksei Filatov](https://github.com/aalexfvk)).
|
||||||
|
* Backported in [#66883](https://github.com/ClickHouse/ClickHouse/issues/66883): Fix unexpeced size of low cardinality column in function calls. [#65298](https://github.com/ClickHouse/ClickHouse/pull/65298) ([Raúl Marín](https://github.com/Algunenano)).
|
||||||
|
* Backported in [#65435](https://github.com/ClickHouse/ClickHouse/issues/65435): Forbid `QUALIFY` clause in the old analyzer. The old analyzer ignored `QUALIFY`, so it could lead to unexpected data removal in mutations. [#65356](https://github.com/ClickHouse/ClickHouse/pull/65356) ([Dmitry Novik](https://github.com/novikd)).
|
||||||
|
* Backported in [#65448](https://github.com/ClickHouse/ClickHouse/issues/65448): Use correct memory alignment for Distinct combinator. Previously, crash could happen because of invalid memory allocation when the combinator was used. [#65379](https://github.com/ClickHouse/ClickHouse/pull/65379) ([Antonio Andelic](https://github.com/antonio2368)).
|
||||||
|
* Backported in [#65710](https://github.com/ClickHouse/ClickHouse/issues/65710): Fix crash in maxIntersections. [#65689](https://github.com/ClickHouse/ClickHouse/pull/65689) ([Raúl Marín](https://github.com/Algunenano)).
|
||||||
|
* Backported in [#66689](https://github.com/ClickHouse/ClickHouse/issues/66689): Fix the VALID UNTIL clause in the user definition resetting after a restart. Closes [#66405](https://github.com/ClickHouse/ClickHouse/issues/66405). [#66409](https://github.com/ClickHouse/ClickHouse/pull/66409) ([Nikolay Degterinsky](https://github.com/evillique)).
|
||||||
|
* Backported in [#67499](https://github.com/ClickHouse/ClickHouse/issues/67499): Fix crash in DistributedAsyncInsert when connection is empty. [#67219](https://github.com/ClickHouse/ClickHouse/pull/67219) ([Pablo Marcos](https://github.com/pamarcos)).
|
||||||
|
|
||||||
|
#### Bug Fix (user-visible misbehavior in an official stable release)
|
||||||
|
* Backported in [#65353](https://github.com/ClickHouse/ClickHouse/issues/65353): Fix possible abort on uncaught exception in ~WriteBufferFromFileDescriptor in StatusFile. [#64206](https://github.com/ClickHouse/ClickHouse/pull/64206) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||||
|
* Backported in [#65060](https://github.com/ClickHouse/ClickHouse/issues/65060): Fix the `Expression nodes list expected 1 projection names` and `Unknown expression or identifier` errors for queries with aliases to `GLOBAL IN.`. [#64517](https://github.com/ClickHouse/ClickHouse/pull/64517) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
|
||||||
|
* Backported in [#65329](https://github.com/ClickHouse/ClickHouse/issues/65329): Fix the crash loop when restoring from backup is blocked by creating an MV with a definer that hasn't been restored yet. [#64595](https://github.com/ClickHouse/ClickHouse/pull/64595) ([pufit](https://github.com/pufit)).
|
||||||
|
* Backported in [#64833](https://github.com/ClickHouse/ClickHouse/issues/64833): Fix bug which could lead to non-working TTLs with expressions. [#64694](https://github.com/ClickHouse/ClickHouse/pull/64694) ([alesapin](https://github.com/alesapin)).
|
||||||
|
* Backported in [#65086](https://github.com/ClickHouse/ClickHouse/issues/65086): Fix removing the `WHERE` and `PREWHERE` expressions, which are always true (for the new analyzer). [#64695](https://github.com/ClickHouse/ClickHouse/pull/64695) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
|
||||||
|
* Backported in [#65540](https://github.com/ClickHouse/ClickHouse/issues/65540): Fix crash for `ALTER TABLE ... ON CLUSTER ... MODIFY SQL SECURITY`. [#64957](https://github.com/ClickHouse/ClickHouse/pull/64957) ([pufit](https://github.com/pufit)).
|
||||||
|
* Backported in [#65578](https://github.com/ClickHouse/ClickHouse/issues/65578): Fix crash on destroying AccessControl: add explicit shutdown. [#64993](https://github.com/ClickHouse/ClickHouse/pull/64993) ([Vitaly Baranov](https://github.com/vitlibar)).
|
||||||
|
* Backported in [#65161](https://github.com/ClickHouse/ClickHouse/issues/65161): Fix pushing arithmetic operations out of aggregation. In the new analyzer, optimization was applied only once. [#65104](https://github.com/ClickHouse/ClickHouse/pull/65104) ([Dmitry Novik](https://github.com/novikd)).
|
||||||
|
* Backported in [#65616](https://github.com/ClickHouse/ClickHouse/issues/65616): Fix aggregate function name rewriting in the new analyzer. [#65110](https://github.com/ClickHouse/ClickHouse/pull/65110) ([Dmitry Novik](https://github.com/novikd)).
|
||||||
|
* Backported in [#65730](https://github.com/ClickHouse/ClickHouse/issues/65730): Eliminate injective function in argument of functions `uniq*` recursively. This used to work correctly but was broken in the new analyzer. [#65140](https://github.com/ClickHouse/ClickHouse/pull/65140) ([Duc Canh Le](https://github.com/canhld94)).
|
||||||
|
* Backported in [#65668](https://github.com/ClickHouse/ClickHouse/issues/65668): Disable `non-intersecting-parts` optimization for queries with `FINAL` in case of `read-in-order` optimization was enabled. This could lead to an incorrect query result. As a workaround, disable `do_not_merge_across_partitions_select_final` and `split_parts_ranges_into_intersecting_and_non_intersecting_final` before this fix is merged. [#65505](https://github.com/ClickHouse/ClickHouse/pull/65505) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
|
||||||
|
* Backported in [#65786](https://github.com/ClickHouse/ClickHouse/issues/65786): Fixed bug in MergeJoin. Column in sparse serialisation might be treated as a column of its nested type though the required conversion wasn't performed. [#65632](https://github.com/ClickHouse/ClickHouse/pull/65632) ([Nikita Taranov](https://github.com/nickitat)).
|
||||||
|
* Backported in [#65810](https://github.com/ClickHouse/ClickHouse/issues/65810): Fix invalid exceptions in function `parseDateTime` with `%F` and `%D` placeholders. [#65768](https://github.com/ClickHouse/ClickHouse/pull/65768) ([Antonio Andelic](https://github.com/antonio2368)).
|
||||||
|
* Backported in [#65931](https://github.com/ClickHouse/ClickHouse/issues/65931): For queries that read from `PostgreSQL`, cancel the internal `PostgreSQL` query if the ClickHouse query is finished. Otherwise, `ClickHouse` query cannot be canceled until the internal `PostgreSQL` query is finished. [#65771](https://github.com/ClickHouse/ClickHouse/pull/65771) ([Maksim Kita](https://github.com/kitaisreal)).
|
||||||
|
* Backported in [#65826](https://github.com/ClickHouse/ClickHouse/issues/65826): Fix a bug in short circuit logic when old analyzer and dictGetOrDefault is used. [#65802](https://github.com/ClickHouse/ClickHouse/pull/65802) ([jsc0218](https://github.com/jsc0218)).
|
||||||
|
* Backported in [#66299](https://github.com/ClickHouse/ClickHouse/issues/66299): Better handling of join conditions involving `IS NULL` checks (for example `ON (a = b AND (a IS NOT NULL) AND (b IS NOT NULL) ) OR ( (a IS NULL) AND (b IS NULL) )` is rewritten to `ON a <=> b`), fix incorrect optimization when condition other then `IS NULL` are present. [#65835](https://github.com/ClickHouse/ClickHouse/pull/65835) ([vdimir](https://github.com/vdimir)).
|
||||||
|
* Backported in [#66326](https://github.com/ClickHouse/ClickHouse/issues/66326): Add missing settings `input_format_csv_skip_first_lines/input_format_tsv_skip_first_lines/input_format_csv_try_infer_numbers_from_strings/input_format_csv_try_infer_strings_from_quoted_tuples` in schema inference cache because they can change the resulting schema. It prevents from incorrect result of schema inference with these settings changed. [#65980](https://github.com/ClickHouse/ClickHouse/pull/65980) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||||
|
* Backported in [#66153](https://github.com/ClickHouse/ClickHouse/issues/66153): Fixed buffer overflow bug in `unbin`/`unhex` implementation. [#66106](https://github.com/ClickHouse/ClickHouse/pull/66106) ([Nikita Taranov](https://github.com/nickitat)).
|
||||||
|
* Backported in [#66459](https://github.com/ClickHouse/ClickHouse/issues/66459): Fixed a bug in ZooKeeper client: a session could get stuck in unusable state after receiving a hardware error from ZooKeeper. For example, this might happen due to "soft memory limit" in ClickHouse Keeper. [#66140](https://github.com/ClickHouse/ClickHouse/pull/66140) ([Alexander Tokmakov](https://github.com/tavplubix)).
|
||||||
|
* Backported in [#66224](https://github.com/ClickHouse/ClickHouse/issues/66224): Fix issue in SumIfToCountIfVisitor and signed integers. [#66146](https://github.com/ClickHouse/ClickHouse/pull/66146) ([Raúl Marín](https://github.com/Algunenano)).
|
||||||
|
* Backported in [#66267](https://github.com/ClickHouse/ClickHouse/issues/66267): Don't throw `TIMEOUT_EXCEEDED` for `none_only_active` mode of `distributed_ddl_output_mode`. [#66218](https://github.com/ClickHouse/ClickHouse/pull/66218) ([Alexander Tokmakov](https://github.com/tavplubix)).
|
||||||
|
* Backported in [#66678](https://github.com/ClickHouse/ClickHouse/issues/66678): Fix handling limit for `system.numbers_mt` when no index can be used. [#66231](https://github.com/ClickHouse/ClickHouse/pull/66231) ([János Benjamin Antal](https://github.com/antaljanosbenjamin)).
|
||||||
|
* Backported in [#66603](https://github.com/ClickHouse/ClickHouse/issues/66603): Fixed how the ClickHouse server detects the maximum number of usable CPU cores as specified by cgroups v2 if the server runs in a container such as Docker. In more detail, containers often run their process in the root cgroup which has an empty name. In that case, ClickHouse ignored the CPU limits set by cgroups v2. [#66237](https://github.com/ClickHouse/ClickHouse/pull/66237) ([filimonov](https://github.com/filimonov)).
|
||||||
|
* Backported in [#66358](https://github.com/ClickHouse/ClickHouse/issues/66358): Fix the `Not-ready set` error when a subquery with `IN` is used in the constraint. [#66261](https://github.com/ClickHouse/ClickHouse/pull/66261) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
|
||||||
|
* Backported in [#66971](https://github.com/ClickHouse/ClickHouse/issues/66971): Fix `Column identifier is already registered` error with `group_by_use_nulls=true` and new analyzer. [#66400](https://github.com/ClickHouse/ClickHouse/pull/66400) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
|
||||||
|
* Backported in [#66968](https://github.com/ClickHouse/ClickHouse/issues/66968): Fix `Cannot find column` error for queries with constant expression in `GROUP BY` key and new analyzer enabled. [#66433](https://github.com/ClickHouse/ClickHouse/pull/66433) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
|
||||||
|
* Backported in [#66719](https://github.com/ClickHouse/ClickHouse/issues/66719): Correctly track memory for `Allocator::realloc`. [#66548](https://github.com/ClickHouse/ClickHouse/pull/66548) ([Antonio Andelic](https://github.com/antonio2368)).
|
||||||
|
* Backported in [#66950](https://github.com/ClickHouse/ClickHouse/issues/66950): Fix an invalid result for queries with `WINDOW`. This could happen when `PARTITION` columns have sparse serialization and window functions are executed in parallel. [#66579](https://github.com/ClickHouse/ClickHouse/pull/66579) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
|
||||||
|
* Backported in [#66947](https://github.com/ClickHouse/ClickHouse/issues/66947): Fix `Method getResultType is not supported for QUERY query node` error when scalar subquery was used as the first argument of IN (with new analyzer). [#66655](https://github.com/ClickHouse/ClickHouse/pull/66655) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
|
||||||
|
* Backported in [#67631](https://github.com/ClickHouse/ClickHouse/issues/67631): Fix for occasional deadlock in Context::getDDLWorker. [#66843](https://github.com/ClickHouse/ClickHouse/pull/66843) ([Alexander Gololobov](https://github.com/davenger)).
|
||||||
|
* Backported in [#67195](https://github.com/ClickHouse/ClickHouse/issues/67195): TRUNCATE DATABASE used to stop replication as if it was a DROP DATABASE query, it's fixed. [#67129](https://github.com/ClickHouse/ClickHouse/pull/67129) ([Alexander Tokmakov](https://github.com/tavplubix)).
|
||||||
|
* Backported in [#67377](https://github.com/ClickHouse/ClickHouse/issues/67377): Fix error `Cannot convert column because it is non constant in source stream but must be constant in result.` for a query that reads from the `Merge` table over the `Distriburted` table with one shard. [#67146](https://github.com/ClickHouse/ClickHouse/pull/67146) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
|
||||||
|
* Backported in [#67240](https://github.com/ClickHouse/ClickHouse/issues/67240): This closes [#67156](https://github.com/ClickHouse/ClickHouse/issues/67156). This closes [#66447](https://github.com/ClickHouse/ClickHouse/issues/66447). The bug was introduced in https://github.com/ClickHouse/ClickHouse/pull/62907. [#67178](https://github.com/ClickHouse/ClickHouse/pull/67178) ([Maksim Kita](https://github.com/kitaisreal)).
|
||||||
|
* Backported in [#67574](https://github.com/ClickHouse/ClickHouse/issues/67574): Fix execution of nested short-circuit functions. [#67520](https://github.com/ClickHouse/ClickHouse/pull/67520) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||||
|
|
||||||
|
#### NOT FOR CHANGELOG / INSIGNIFICANT
|
||||||
|
|
||||||
|
* Backported in [#65410](https://github.com/ClickHouse/ClickHouse/issues/65410): Re-enable OpenSSL session caching. [#65111](https://github.com/ClickHouse/ClickHouse/pull/65111) ([Robert Schulze](https://github.com/rschu1ze)).
|
||||||
|
* Backported in [#65903](https://github.com/ClickHouse/ClickHouse/issues/65903): Fix bug with session closing in Keeper. [#65735](https://github.com/ClickHouse/ClickHouse/pull/65735) ([Antonio Andelic](https://github.com/antonio2368)).
|
||||||
|
* Backported in [#66385](https://github.com/ClickHouse/ClickHouse/issues/66385): Disable broken cases from 02911_join_on_nullsafe_optimization. [#66310](https://github.com/ClickHouse/ClickHouse/pull/66310) ([vdimir](https://github.com/vdimir)).
|
||||||
|
* Backported in [#66424](https://github.com/ClickHouse/ClickHouse/issues/66424): Ignore subquery for IN in DDLLoadingDependencyVisitor. [#66395](https://github.com/ClickHouse/ClickHouse/pull/66395) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
|
||||||
|
* Backported in [#66542](https://github.com/ClickHouse/ClickHouse/issues/66542): Add additional log masking in CI. [#66523](https://github.com/ClickHouse/ClickHouse/pull/66523) ([Raúl Marín](https://github.com/Algunenano)).
|
||||||
|
* Backported in [#66857](https://github.com/ClickHouse/ClickHouse/issues/66857): Fix data race in S3::ClientCache. [#66644](https://github.com/ClickHouse/ClickHouse/pull/66644) ([Konstantin Morozov](https://github.com/k-morozov)).
|
||||||
|
* Backported in [#66873](https://github.com/ClickHouse/ClickHouse/issues/66873): Support one more case in JOIN ON ... IS NULL. [#66725](https://github.com/ClickHouse/ClickHouse/pull/66725) ([vdimir](https://github.com/vdimir)).
|
||||||
|
* Backported in [#67057](https://github.com/ClickHouse/ClickHouse/issues/67057): Increase asio pool size in case the server is tiny. [#66761](https://github.com/ClickHouse/ClickHouse/pull/66761) ([alesapin](https://github.com/alesapin)).
|
||||||
|
* Backported in [#66944](https://github.com/ClickHouse/ClickHouse/issues/66944): Small fix in realloc memory tracking. [#66820](https://github.com/ClickHouse/ClickHouse/pull/66820) ([Antonio Andelic](https://github.com/antonio2368)).
|
||||||
|
* Backported in [#67250](https://github.com/ClickHouse/ClickHouse/issues/67250): Followup [#66725](https://github.com/ClickHouse/ClickHouse/issues/66725). [#66869](https://github.com/ClickHouse/ClickHouse/pull/66869) ([vdimir](https://github.com/vdimir)).
|
||||||
|
* Backported in [#67410](https://github.com/ClickHouse/ClickHouse/issues/67410): CI: Fix build results for release branches. [#67402](https://github.com/ClickHouse/ClickHouse/pull/67402) ([Max K.](https://github.com/maxknv)).
|
||||||
|
|
524
docs/changelogs/v24.7.1.2915-stable.md
Normal file
524
docs/changelogs/v24.7.1.2915-stable.md
Normal file
@ -0,0 +1,524 @@
|
|||||||
|
---
|
||||||
|
sidebar_position: 1
|
||||||
|
sidebar_label: 2024
|
||||||
|
---
|
||||||
|
|
||||||
|
# 2024 Changelog
|
||||||
|
|
||||||
|
### ClickHouse release v24.7.1.2915-stable (a37d2d43da7) FIXME as compared to v24.7.1.1-new (aa023477a92)
|
||||||
|
|
||||||
|
#### Backward Incompatible Change
|
||||||
|
* Change binary serialization of Variant data type: add `compact` mode to avoid writing the same discriminator multiple times for granules with single variant or with only NULL values. Add MergeTree setting `use_compact_variant_discriminators_serialization` that is enabled by default. Note that Variant type is still experimental and backward-incompatible change in serialization is ok. [#62774](https://github.com/ClickHouse/ClickHouse/pull/62774) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||||
|
* Forbid `CREATE MATERIALIZED VIEW ... ENGINE Replicated*MergeTree POPULATE AS SELECT ...` with Replicated databases. [#63963](https://github.com/ClickHouse/ClickHouse/pull/63963) ([vdimir](https://github.com/vdimir)).
|
||||||
|
* `clickhouse-keeper-client` will only accept paths in string literals, such as `ls '/hello/world'`, not bare strings such as `ls /hello/world`. [#65494](https://github.com/ClickHouse/ClickHouse/pull/65494) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||||
|
* Metric `KeeperOutstandingRequets` was renamed to `KeeperOutstandingRequests`. This fixes a typo reported in [#66179](https://github.com/ClickHouse/ClickHouse/issues/66179). [#66206](https://github.com/ClickHouse/ClickHouse/pull/66206) ([Robert Schulze](https://github.com/rschu1ze)).
|
||||||
|
* Remove `is_deterministic` field from the `system.functions` table. [#66630](https://github.com/ClickHouse/ClickHouse/pull/66630) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||||
|
|
||||||
|
#### New Feature
|
||||||
|
* Extend function `tuple` to construct named tuples in query. Introduce function `tupleNames` to extract names from tuples. [#54881](https://github.com/ClickHouse/ClickHouse/pull/54881) ([Amos Bird](https://github.com/amosbird)).
|
||||||
|
* `ASOF JOIN` support for `full_sorting_join` algorithm Close [#54493](https://github.com/ClickHouse/ClickHouse/issues/54493). [#55051](https://github.com/ClickHouse/ClickHouse/pull/55051) ([vdimir](https://github.com/vdimir)).
|
||||||
|
* A new table function, `fuzzQuery,` was added. This function allows you to modify a given query string with random variations. Example: `SELECT query FROM fuzzQuery('SELECT 1');`. [#62103](https://github.com/ClickHouse/ClickHouse/pull/62103) ([pufit](https://github.com/pufit)).
|
||||||
|
* Add new window function `percent_rank`. [#62747](https://github.com/ClickHouse/ClickHouse/pull/62747) ([lgbo](https://github.com/lgbo-ustc)).
|
||||||
|
* Support JWT authentication in `clickhouse-client`. [#62829](https://github.com/ClickHouse/ClickHouse/pull/62829) ([Konstantin Bogdanov](https://github.com/thevar1able)).
|
||||||
|
* Add SQL functions `changeYear`, `changeMonth`, `changeDay`, `changeHour`, `changeMinute`, `changeSecond`. For example, `SELECT changeMonth(toDate('2024-06-14'), 7)` returns date `2024-07-14`. [#63186](https://github.com/ClickHouse/ClickHouse/pull/63186) ([cucumber95](https://github.com/cucumber95)).
|
||||||
|
* Introduce startup scripts, which allow the execution of preconfigured queries at the startup stage. [#64889](https://github.com/ClickHouse/ClickHouse/pull/64889) ([pufit](https://github.com/pufit)).
|
||||||
|
* Support accept_invalid_certificate in client's config in order to allow for client to connect over secure TCP to a server running with self-signed certificate - can be used as a shorthand for corresponding `openSSL` client settings `verificationMode=none` + `invalidCertificateHandler.name=AcceptCertificateHandler`. [#65238](https://github.com/ClickHouse/ClickHouse/pull/65238) ([peacewalker122](https://github.com/peacewalker122)).
|
||||||
|
* Add system.error_log which contains history of error values from table system.errors, periodically flushed to disk. [#65381](https://github.com/ClickHouse/ClickHouse/pull/65381) ([Pablo Marcos](https://github.com/pamarcos)).
|
||||||
|
* Add aggregate function `groupConcat`. About the same as `arrayStringConcat( groupArray(column), ',')` Can receive 2 parameters: a string delimiter and the number of elements to be processed. [#65451](https://github.com/ClickHouse/ClickHouse/pull/65451) ([Yarik Briukhovetskyi](https://github.com/yariks5s)).
|
||||||
|
* Add AzureQueue storage. [#65458](https://github.com/ClickHouse/ClickHouse/pull/65458) ([Kseniia Sumarokova](https://github.com/kssenii)).
|
||||||
|
* Add a new setting to disable/enable writing page index into parquet files. [#65475](https://github.com/ClickHouse/ClickHouse/pull/65475) ([lgbo](https://github.com/lgbo-ustc)).
|
||||||
|
* Allow system administrators to configure `logger.console_log_level`. [#65559](https://github.com/ClickHouse/ClickHouse/pull/65559) ([Azat Khuzhin](https://github.com/azat)).
|
||||||
|
* Automatically append a wildcard `*` to the end of a directory path with table function `file`. [#66019](https://github.com/ClickHouse/ClickHouse/pull/66019) ([Zhidong (David) Guo](https://github.com/Gun9niR)).
|
||||||
|
* Add `--memory-usage` option to client in non interactive mode. [#66393](https://github.com/ClickHouse/ClickHouse/pull/66393) ([vdimir](https://github.com/vdimir)).
|
||||||
|
|
||||||
|
#### Performance Improvement
|
||||||
|
* Enable `optimize_functions_to_subcolumns` by default. [#58661](https://github.com/ClickHouse/ClickHouse/pull/58661) ([Anton Popov](https://github.com/CurtizJ)).
|
||||||
|
* Replace int to string algorithm with a faster one (from a modified amdn/itoa to a modified jeaiii/itoa). [#61661](https://github.com/ClickHouse/ClickHouse/pull/61661) ([Raúl Marín](https://github.com/Algunenano)).
|
||||||
|
* Sizes of hash tables created by join (`parallel_hash` algorithm) is collected and cached now. This information will be used to preallocate space in hash tables for subsequent query executions and save time on hash table resizes. [#64553](https://github.com/ClickHouse/ClickHouse/pull/64553) ([Nikita Taranov](https://github.com/nickitat)).
|
||||||
|
* Optimized queries with `ORDER BY` primary key and `WHERE` that have a condition with high selectivity by using of buffering. It is controlled by setting `read_in_order_use_buffering` (enabled by default) and can increase memory usage of query. [#64607](https://github.com/ClickHouse/ClickHouse/pull/64607) ([Anton Popov](https://github.com/CurtizJ)).
|
||||||
|
* Improve performance of loading `plain_rewritable` metadata. [#65634](https://github.com/ClickHouse/ClickHouse/pull/65634) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||||
|
* Attaching tables on read-only disks will use fewer resources by not loading outdated parts. [#65635](https://github.com/ClickHouse/ClickHouse/pull/65635) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||||
|
* Support minmax hyperrectangle for Set indices. [#65676](https://github.com/ClickHouse/ClickHouse/pull/65676) ([AntiTopQuark](https://github.com/AntiTopQuark)).
|
||||||
|
* Unload primary index of outdated parts to reduce total memory usage. [#65852](https://github.com/ClickHouse/ClickHouse/pull/65852) ([Anton Popov](https://github.com/CurtizJ)).
|
||||||
|
* Functions `replaceRegexpAll` and `replaceRegexpOne` are now significantly faster if the pattern is trivial, i.e. contains no metacharacters, pattern classes, flags, grouping characters etc. (Thanks to Taiyang Li). [#66185](https://github.com/ClickHouse/ClickHouse/pull/66185) ([Robert Schulze](https://github.com/rschu1ze)).
|
||||||
|
|
||||||
|
#### Improvement
|
||||||
|
* Support rocksdb as backend storage of keeper. [#56626](https://github.com/ClickHouse/ClickHouse/pull/56626) ([Han Fei](https://github.com/hanfei1991)).
|
||||||
|
* The setting `optimize_trivial_insert_select` is disabled by default. In most cases, it should be beneficial. Nevertheless, if you are seeing slower INSERT SELECT or increased memory usage, you can enable it back or `SET compatibility = '24.6'`. [#58970](https://github.com/ClickHouse/ClickHouse/pull/58970) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||||
|
* Print stacktrace and diagnostic info if `clickhouse-client` or `clickhouse-local` crashes. [#61109](https://github.com/ClickHouse/ClickHouse/pull/61109) ([Alexander Tokmakov](https://github.com/tavplubix)).
|
||||||
|
* The result of `SHOW INDEX | INDEXES | INDICES | KEYS` was previously sorted by the primary key column names. Since this was unintuitive, the result is now sorted by the position of the primary key columns within the primary key. [#61131](https://github.com/ClickHouse/ClickHouse/pull/61131) ([Robert Schulze](https://github.com/rschu1ze)).
|
||||||
|
* This PR changes how deduplication for MV works. Fixed a lot of cases like: - on destination table: data is split for 2 or more blocks and that blocks is considered as duplicate when that block is inserted in parallel. - on MV destination table: the equal blocks are deduplicated, that happens when MV often produces equal data as a result for different input data due to performing aggregation. - on MV destination table: the equal blocks which comes from different MV are deduplicated. [#61601](https://github.com/ClickHouse/ClickHouse/pull/61601) ([Sema Checherinda](https://github.com/CheSema)).
|
||||||
|
* Allow matching column names in a case insensitive manner when reading json files (`input_format_json_case_insensitive_column_matching`). [#61750](https://github.com/ClickHouse/ClickHouse/pull/61750) ([kevinyhzou](https://github.com/KevinyhZou)).
|
||||||
|
* Support reading partitioned data DeltaLake data. Infer DeltaLake schema by reading metadata instead of data. [#63201](https://github.com/ClickHouse/ClickHouse/pull/63201) ([Kseniia Sumarokova](https://github.com/kssenii)).
|
||||||
|
* In composable protocols TLS layer accepted only `certificateFile` and `privateKeyFile` parameters. https://clickhouse.com/docs/en/operations/settings/composable-protocols. [#63985](https://github.com/ClickHouse/ClickHouse/pull/63985) ([Anton Ivashkin](https://github.com/ianton-ru)).
|
||||||
|
* Make an interactive client for clickhouse-disks, add local disk from the local directory. Fixes [#56791](https://github.com/ClickHouse/ClickHouse/issues/56791). [#64446](https://github.com/ClickHouse/ClickHouse/pull/64446) ([Daniil Ivanik](https://github.com/divanik)).
|
||||||
|
* Added profile event `SelectQueriesWithPrimaryKeyUsage` which indicates how many SELECT queries use the primary key to evaluate the WHERE clause. [#64492](https://github.com/ClickHouse/ClickHouse/pull/64492) ([0x01f](https://github.com/0xfei)).
|
||||||
|
* `StorageS3Queue` related fixes and improvements. Deduce a default value of `s3queue_processing_threads_num` according to the number of physical cpu cores on the server (instead of the previous default value as 1). Set default value of `s3queue_loading_retries` to 10. Fix possible vague "Uncaught exception" in exception column of `system.s3queue`. Do not increment retry count on `MEMORY_LIMIT_EXCEEDED` exception. Move files commit to a stage after insertion into table fully finished to avoid files being commited while not inserted. Add settings `s3queue_max_processed_files_before_commit`, `s3queue_max_processed_rows_before_commit`, `s3queue_max_processed_bytes_before_commit`, `s3queue_max_processing_time_sec_before_commit`, to better control commit and flush time. [#65046](https://github.com/ClickHouse/ClickHouse/pull/65046) ([Kseniia Sumarokova](https://github.com/kssenii)).
|
||||||
|
* Fixed broken multiple columns aggregation on s390x. [#65062](https://github.com/ClickHouse/ClickHouse/pull/65062) ([Harry Lee](https://github.com/HarryLeeIBM)).
|
||||||
|
* Support aliases in parametrized view function (only new analyzer). [#65190](https://github.com/ClickHouse/ClickHouse/pull/65190) ([Kseniia Sumarokova](https://github.com/kssenii)).
|
||||||
|
* S3. reduce retires time for queries, increase retries count for backups. 8.5 minutes and 100 retires for queries, 1.2 hours and 1000 retries for backup restore. [#65232](https://github.com/ClickHouse/ClickHouse/pull/65232) ([Sema Checherinda](https://github.com/CheSema)).
|
||||||
|
* Updated to mask account key in logs in azureBlobStorage. [#65273](https://github.com/ClickHouse/ClickHouse/pull/65273) ([SmitaRKulkarni](https://github.com/SmitaRKulkarni)).
|
||||||
|
* Partition pruning for `IN` predicates when filter expression is a part of `PARTITION BY` expression. [#65335](https://github.com/ClickHouse/ClickHouse/pull/65335) ([Eduard Karacharov](https://github.com/korowa)).
|
||||||
|
* Add system tables with main information about all detached tables. [#65400](https://github.com/ClickHouse/ClickHouse/pull/65400) ([Konstantin Morozov](https://github.com/k-morozov)).
|
||||||
|
* Add support for `cluster_for_parallel_replicas` when using custom key parallel replicas. It allows you to use parallel replicas with custom key with MergeTree tables. [#65453](https://github.com/ClickHouse/ClickHouse/pull/65453) ([Antonio Andelic](https://github.com/antonio2368)).
|
||||||
|
* Support query plan LIMIT optimization. Support LIMIT pushdown for PostgreSQL storage and table function. [#65454](https://github.com/ClickHouse/ClickHouse/pull/65454) ([Maksim Kita](https://github.com/kitaisreal)).
|
||||||
|
* Arraymin/max can be applicable to all data types that are comparable. [#65455](https://github.com/ClickHouse/ClickHouse/pull/65455) ([pn](https://github.com/chloro-pn)).
|
||||||
|
* Improved memory accounting for cgroups v2 to exclude the amount occupied by the page cache. [#65470](https://github.com/ClickHouse/ClickHouse/pull/65470) ([Nikita Taranov](https://github.com/nickitat)).
|
||||||
|
* Do not create format settings for each rows when serializing chunks to insert to EmbeddedRocksDB table. [#65474](https://github.com/ClickHouse/ClickHouse/pull/65474) ([Duc Canh Le](https://github.com/canhld94)).
|
||||||
|
* Fixed out-of-range exception in parsing Dwarf5 on s390x. [#65501](https://github.com/ClickHouse/ClickHouse/pull/65501) ([Harry Lee](https://github.com/HarryLeeIBM)).
|
||||||
|
* Reduce `clickhouse-local` prompt to just `:)`. `getFQDNOrHostName()` takes too long on macOS, and we don't want a hostname in the prompt for `clickhouse-local` anyway. [#65510](https://github.com/ClickHouse/ClickHouse/pull/65510) ([Konstantin Bogdanov](https://github.com/thevar1able)).
|
||||||
|
* Avoid printing a message from jemalloc about per-CPU arenas on low-end virtual machines. [#65532](https://github.com/ClickHouse/ClickHouse/pull/65532) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||||
|
* Disable filesystem cache background download by default. It will be enabled back when we fix the issue with possible "Memory limit exceeded" because memory deallocation is done outside of query context (while buffer is allocated inside of query context) if we use background download threads. Plus we need to add a separate setting to define max size to download for background workers (currently it is limited by max_file_segment_size, which might be too big). [#65534](https://github.com/ClickHouse/ClickHouse/pull/65534) ([Kseniia Sumarokova](https://github.com/kssenii)).
|
||||||
|
* Add new option to config `<config_reload_interval_ms>` which allow to specify how often clickhouse will reload config. [#65545](https://github.com/ClickHouse/ClickHouse/pull/65545) ([alesapin](https://github.com/alesapin)).
|
||||||
|
* Implement binary encoding for ClickHouse data types and add its specification in docs. Use it in Dynamic binary serialization, allow to use it in RowBinaryWithNamesAndTypes and Native formats under settings. [#65546](https://github.com/ClickHouse/ClickHouse/pull/65546) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||||
|
* Improved ZooKeeper load balancing. The current session doesn't expire until the optimal nodes become available despite `fallback_session_lifetime`. Added support for AZ-aware balancing. [#65570](https://github.com/ClickHouse/ClickHouse/pull/65570) ([Alexander Tokmakov](https://github.com/tavplubix)).
|
||||||
|
* Server settings `compiled_expression_cache_size` and `compiled_expression_cache_elements_size` are now shown in `system.server_settings`. [#65584](https://github.com/ClickHouse/ClickHouse/pull/65584) ([Robert Schulze](https://github.com/rschu1ze)).
|
||||||
|
* When lightweight delete happens on a table with projection(s), users have choices either throw an exception (by default) or drop the projection lightweight delete would happen. [#65594](https://github.com/ClickHouse/ClickHouse/pull/65594) ([jsc0218](https://github.com/jsc0218)).
|
||||||
|
* Add support for user identification based on x509 SubjectAltName extension. [#65626](https://github.com/ClickHouse/ClickHouse/pull/65626) ([Anton Kozlov](https://github.com/tonickkozlov)).
|
||||||
|
* `clickhouse-local` will respect the `max_server_memory_usage` and `max_server_memory_usage_to_ram_ratio` from the configuration file. It will also set the max memory usage to 90% of the system memory by default, like `clickhouse-server` does. This closes [#65695](https://github.com/ClickHouse/ClickHouse/issues/65695). [#65697](https://github.com/ClickHouse/ClickHouse/pull/65697) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||||
|
* Add a script to backup your files to ClickHouse. This is strange, but works. [#65699](https://github.com/ClickHouse/ClickHouse/pull/65699) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||||
|
* PostgreSQL source support cancel. [#65722](https://github.com/ClickHouse/ClickHouse/pull/65722) ([Maksim Kita](https://github.com/kitaisreal)).
|
||||||
|
* Make allow_experimental_analyzer be controlled by the initiator for distributed queries. This ensures compatibility and correctness during operations in mixed version clusters. [#65777](https://github.com/ClickHouse/ClickHouse/pull/65777) ([Nikita Mikhaylov](https://github.com/nikitamikhaylov)).
|
||||||
|
* Respect cgroup CPU limit in Keeper. [#65819](https://github.com/ClickHouse/ClickHouse/pull/65819) ([Antonio Andelic](https://github.com/antonio2368)).
|
||||||
|
* Allow to use `concat` function with empty arguments ``` sql :) select concat();. [#65887](https://github.com/ClickHouse/ClickHouse/pull/65887) ([李扬](https://github.com/taiyang-li)).
|
||||||
|
* Allow controlling named collections in clickhouse-local. [#65973](https://github.com/ClickHouse/ClickHouse/pull/65973) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||||
|
* Improve Azure profile events. [#65999](https://github.com/ClickHouse/ClickHouse/pull/65999) ([alesapin](https://github.com/alesapin)).
|
||||||
|
* `Query was cancelled` might have been printed twice in clickhouse-client. This behaviour is fixed. [#66005](https://github.com/ClickHouse/ClickHouse/pull/66005) ([Nikita Mikhaylov](https://github.com/nikitamikhaylov)).
|
||||||
|
* Support ORC file read by writer time zone. [#66025](https://github.com/ClickHouse/ClickHouse/pull/66025) ([kevinyhzou](https://github.com/KevinyhZou)).
|
||||||
|
* Refactor JSONExtract functions, support more types including experimental Dynamic type. [#66046](https://github.com/ClickHouse/ClickHouse/pull/66046) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||||
|
* DatabaseCatalog drops tables faster by using up to database_catalog_drop_table_concurrency threads. [#66065](https://github.com/ClickHouse/ClickHouse/pull/66065) ([Sema Checherinda](https://github.com/CheSema)).
|
||||||
|
* This PR changes how deduplication for MV works. Fixed a lot of cases like: - on destination table: data is split for 2 or more blocks and that blocks is considered as duplicate when that block is inserted in parallel. - on MV destination table: the equal blocks are deduplicated, that happens when MV often produces equal data as a result for different input data due to performing aggregation. - on MV destination table: the equal blocks which comes from different MV are deduplicated. [#66144](https://github.com/ClickHouse/ClickHouse/pull/66144) ([Sema Checherinda](https://github.com/CheSema)).
|
||||||
|
* Support null map subcolumn for Variant and Dynamic subcolumns. [#66178](https://github.com/ClickHouse/ClickHouse/pull/66178) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||||
|
* Add settings to control connection to the PostgreSQL. * Setting `postgresql_connection_attempt_timeout` specifies the value passed to `connect_timeout` parameter of connection URL. * Setting `postgresql_connection_pool_retries` specifies the number of retries to establish a connection to the PostgreSQL end-point. [#66232](https://github.com/ClickHouse/ClickHouse/pull/66232) ([Dmitry Novik](https://github.com/novikd)).
|
||||||
|
* Reduce inaccuracy of input_wait_elapsed_us/input_wait_elapsed_us/elapsed_us. [#66239](https://github.com/ClickHouse/ClickHouse/pull/66239) ([Azat Khuzhin](https://github.com/azat)).
|
||||||
|
* Improve FilesystemCache ProfileEvents. [#66249](https://github.com/ClickHouse/ClickHouse/pull/66249) ([zhukai](https://github.com/nauu)).
|
||||||
|
* Add settings to ignore ON CLUSTER clause in queries for named collection management with replicated storage. [#66288](https://github.com/ClickHouse/ClickHouse/pull/66288) ([MikhailBurdukov](https://github.com/MikhailBurdukov)).
|
||||||
|
* Upgraded `pocketfft` dependency to the recent commit https://github.com/mreineck/pocketfft/commit/f4c1aa8aa9ce79ad39e80f2c9c41b92ead90fda3. [#66291](https://github.com/ClickHouse/ClickHouse/pull/66291) ([Nikita Mikhaylov](https://github.com/nikitamikhaylov)).
|
||||||
|
* Upgraded `azure-sdk-for-cpp` to the recent commit https://github.com/ClickHouse/azure-sdk-for-cpp/commit/ea3e19a7be08519134c643177d56c7484dfec884. [#66292](https://github.com/ClickHouse/ClickHouse/pull/66292) ([Nikita Mikhaylov](https://github.com/nikitamikhaylov)).
|
||||||
|
* Function `generateSnowflakeID` now allows to specify a machine ID as a parameter to prevent collisions in large clusters. [#66374](https://github.com/ClickHouse/ClickHouse/pull/66374) ([ZAWA_ll](https://github.com/Zawa-ll)).
|
||||||
|
* Disable suspending on Ctrl+Z in interactive mode. This is a common trap and is not expected behavior for almost all users. I imagine only a few extreme power users could appreciate suspending terminal applications to the background, but I don't know any. [#66511](https://github.com/ClickHouse/ClickHouse/pull/66511) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||||
|
* Add option for validating the Primary key type in Dictionaries. Without this option for simple layouts any column type will be implicitly converted to UInt64. ### Documentation entry for user-facing changes. [#66595](https://github.com/ClickHouse/ClickHouse/pull/66595) ([MikhailBurdukov](https://github.com/MikhailBurdukov)).
|
||||||
|
* Fix SHOW MERGES remaining time. [#66735](https://github.com/ClickHouse/ClickHouse/pull/66735) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||||
|
|
||||||
|
#### Critical Bug Fix (crash, LOGICAL_ERROR, data loss, RBAC)
|
||||||
|
* Fix unexpeced size of low cardinality column in function calls. [#65298](https://github.com/ClickHouse/ClickHouse/pull/65298) ([Raúl Marín](https://github.com/Algunenano)).
|
||||||
|
* Check cyclic dependencies on CREATE/REPLACE/RENAME/EXCHANGE queries and throw an exception if there is a cyclic dependency. Previously such cyclic dependencies could lead to a deadlock during server startup. Closes [#65355](https://github.com/ClickHouse/ClickHouse/issues/65355). Also fix some bugs in dependencies creation. [#65405](https://github.com/ClickHouse/ClickHouse/pull/65405) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||||
|
* Fix crash in maxIntersections. [#65689](https://github.com/ClickHouse/ClickHouse/pull/65689) ([Raúl Marín](https://github.com/Algunenano)).
|
||||||
|
* Fix the VALID UNTIL clause in the user definition resetting after a restart. Closes [#66405](https://github.com/ClickHouse/ClickHouse/issues/66405). [#66409](https://github.com/ClickHouse/ClickHouse/pull/66409) ([Nikolay Degterinsky](https://github.com/evillique)).
|
||||||
|
|
||||||
|
#### Bug Fix (user-visible misbehavior in an official stable release)
|
||||||
|
* Fixed crash while using MaterializedMySQL with TABLE OVERRIDE that maps MySQL NULL field into ClickHouse not NULL field. [#54649](https://github.com/ClickHouse/ClickHouse/pull/54649) ([Filipp Ozinov](https://github.com/bakwc)).
|
||||||
|
* Fix logical error when PREWHERE expression read no columns and table has no adaptive index granularity (very old table). Fix [#56640](https://github.com/ClickHouse/ClickHouse/issues/56640). [#59173](https://github.com/ClickHouse/ClickHouse/pull/59173) ([Alexander Gololobov](https://github.com/davenger)).
|
||||||
|
* Fix bug with cancelation buffer when canceling a query. [#64478](https://github.com/ClickHouse/ClickHouse/pull/64478) ([Sema Checherinda](https://github.com/CheSema)).
|
||||||
|
* Fix filling parts columns from metadata (when columns.txt does not exists). [#64757](https://github.com/ClickHouse/ClickHouse/pull/64757) ([Azat Khuzhin](https://github.com/azat)).
|
||||||
|
* Fix AST formatting of 'create table b empty as a'. [#64951](https://github.com/ClickHouse/ClickHouse/pull/64951) ([Michael Kolupaev](https://github.com/al13n321)).
|
||||||
|
* Fix crash for `ALTER TABLE ... ON CLUSTER ... MODIFY SQL SECURITY`. [#64957](https://github.com/ClickHouse/ClickHouse/pull/64957) ([pufit](https://github.com/pufit)).
|
||||||
|
* Fix crash on destroying AccessControl: add explicit shutdown. [#64993](https://github.com/ClickHouse/ClickHouse/pull/64993) ([Vitaly Baranov](https://github.com/vitlibar)).
|
||||||
|
* Eliminate injective function in argument of functions `uniq*` recursively. This used to work correctly but was broken in the new analyzer. [#65140](https://github.com/ClickHouse/ClickHouse/pull/65140) ([Duc Canh Le](https://github.com/canhld94)).
|
||||||
|
* Fix unexpected projection name when query with CTE. [#65267](https://github.com/ClickHouse/ClickHouse/pull/65267) ([wudidapaopao](https://github.com/wudidapaopao)).
|
||||||
|
* Require `dictGet` privilege when accessing dictionaries via direct query or the `Dictionary` table engine. [#65359](https://github.com/ClickHouse/ClickHouse/pull/65359) ([Joe Lynch](https://github.com/joelynch)).
|
||||||
|
* Fix user-specific S3 auth with incremental backups. [#65481](https://github.com/ClickHouse/ClickHouse/pull/65481) ([Antonio Andelic](https://github.com/antonio2368)).
|
||||||
|
* Disable `non-intersecting-parts` optimization for queries with `FINAL` in case of `read-in-order` optimization was enabled. This could lead to an incorrect query result. As a workaround, disable `do_not_merge_across_partitions_select_final` and `split_parts_ranges_into_intersecting_and_non_intersecting_final` before this fix is merged. [#65505](https://github.com/ClickHouse/ClickHouse/pull/65505) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
|
||||||
|
* Fix getting exception `Index out of bound for blob metadata` in case all files from list batch were filtered out. [#65523](https://github.com/ClickHouse/ClickHouse/pull/65523) ([Kseniia Sumarokova](https://github.com/kssenii)).
|
||||||
|
* Fix NOT_FOUND_COLUMN_IN_BLOCK for deduplicate merge of projection. [#65573](https://github.com/ClickHouse/ClickHouse/pull/65573) ([Yakov Olkhovskiy](https://github.com/yakov-olkhovskiy)).
|
||||||
|
* Fixed bug in MergeJoin. Column in sparse serialisation might be treated as a column of its nested type though the required conversion wasn't performed. [#65632](https://github.com/ClickHouse/ClickHouse/pull/65632) ([Nikita Taranov](https://github.com/nickitat)).
|
||||||
|
* Fixed a bug that compatibility level '23.4' was not properly applied. [#65737](https://github.com/ClickHouse/ClickHouse/pull/65737) ([cw5121](https://github.com/cw5121)).
|
||||||
|
* Fix odbc table with nullable fields. [#65738](https://github.com/ClickHouse/ClickHouse/pull/65738) ([Rodolphe Dugé de Bernonville](https://github.com/RodolpheDuge)).
|
||||||
|
* Fix data race in `TCPHandler`, which could happen on fatal error. [#65744](https://github.com/ClickHouse/ClickHouse/pull/65744) ([Kseniia Sumarokova](https://github.com/kssenii)).
|
||||||
|
* Fix invalid exceptions in function `parseDateTime` with `%F` and `%D` placeholders. [#65768](https://github.com/ClickHouse/ClickHouse/pull/65768) ([Antonio Andelic](https://github.com/antonio2368)).
|
||||||
|
* For queries that read from `PostgreSQL`, cancel the internal `PostgreSQL` query if the ClickHouse query is finished. Otherwise, `ClickHouse` query cannot be canceled until the internal `PostgreSQL` query is finished. [#65771](https://github.com/ClickHouse/ClickHouse/pull/65771) ([Maksim Kita](https://github.com/kitaisreal)).
|
||||||
|
* Fix a bug in short circuit logic when old analyzer and dictGetOrDefault is used. [#65802](https://github.com/ClickHouse/ClickHouse/pull/65802) ([jsc0218](https://github.com/jsc0218)).
|
||||||
|
* Fix a bug leads to EmbeddedRocksDB with TTL write corrupted SST files. [#65816](https://github.com/ClickHouse/ClickHouse/pull/65816) ([Duc Canh Le](https://github.com/canhld94)).
|
||||||
|
* Functions `bitTest`, `bitTestAll`, and `bitTestAny` now return an error if the specified bit index is out-of-bounds (issue [#65517](https://github.com/ClickHouse/ClickHouse/issues/65517)). [#65818](https://github.com/ClickHouse/ClickHouse/pull/65818) ([Pablo Marcos](https://github.com/pamarcos)).
|
||||||
|
* Setting `join_any_take_last_row` is supported in any query with hash join. [#65820](https://github.com/ClickHouse/ClickHouse/pull/65820) ([vdimir](https://github.com/vdimir)).
|
||||||
|
* Better handling of join conditions involving `IS NULL` checks (for example `ON (a = b AND (a IS NOT NULL) AND (b IS NOT NULL) ) OR ( (a IS NULL) AND (b IS NULL) )` is rewritten to `ON a <=> b`), fix incorrect optimization when condition other then `IS NULL` are present. [#65835](https://github.com/ClickHouse/ClickHouse/pull/65835) ([vdimir](https://github.com/vdimir)).
|
||||||
|
* Functions `bitShiftLeft` and `bitShitfRight` return an error for out of bounds shift positions (issue [#65516](https://github.com/ClickHouse/ClickHouse/issues/65516)). [#65838](https://github.com/ClickHouse/ClickHouse/pull/65838) ([Pablo Marcos](https://github.com/pamarcos)).
|
||||||
|
* Fix growing memory usage in S3Queue. [#65839](https://github.com/ClickHouse/ClickHouse/pull/65839) ([Kseniia Sumarokova](https://github.com/kssenii)).
|
||||||
|
* Fix tie handling in `arrayAUC` to match sklearn. [#65840](https://github.com/ClickHouse/ClickHouse/pull/65840) ([gabrielmcg44](https://github.com/gabrielmcg44)).
|
||||||
|
* Fix possible issues with MySQL server protocol TLS connections. [#65917](https://github.com/ClickHouse/ClickHouse/pull/65917) ([Azat Khuzhin](https://github.com/azat)).
|
||||||
|
* Fix possible issues with MySQL client protocol TLS connections. [#65938](https://github.com/ClickHouse/ClickHouse/pull/65938) ([Azat Khuzhin](https://github.com/azat)).
|
||||||
|
* Fix handling of `SSL_ERROR_WANT_READ`/`SSL_ERROR_WANT_WRITE` with zero timeout. [#65941](https://github.com/ClickHouse/ClickHouse/pull/65941) ([Azat Khuzhin](https://github.com/azat)).
|
||||||
|
* Add missing settings `input_format_csv_skip_first_lines/input_format_tsv_skip_first_lines/input_format_csv_try_infer_numbers_from_strings/input_format_csv_try_infer_strings_from_quoted_tuples` in schema inference cache because they can change the resulting schema. It prevents from incorrect result of schema inference with these settings changed. [#65980](https://github.com/ClickHouse/ClickHouse/pull/65980) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||||
|
* Column _size in s3 engine and s3 table function denotes the size of a file inside the archive, not a size of the archive itself. [#65993](https://github.com/ClickHouse/ClickHouse/pull/65993) ([Daniil Ivanik](https://github.com/divanik)).
|
||||||
|
* Fix resolving dynamic subcolumns in analyzer, avoid reading the whole column on dynamic subcolumn reading. [#66004](https://github.com/ClickHouse/ClickHouse/pull/66004) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||||
|
* Fix config merging for from_env with replace overrides. [#66034](https://github.com/ClickHouse/ClickHouse/pull/66034) ([Azat Khuzhin](https://github.com/azat)).
|
||||||
|
* Fix a possible hanging in `GRPCServer` during shutdown. This PR fixes [#65622](https://github.com/ClickHouse/ClickHouse/issues/65622). [#66061](https://github.com/ClickHouse/ClickHouse/pull/66061) ([Vitaly Baranov](https://github.com/vitlibar)).
|
||||||
|
* Fix reading dynamic subcolumns from altered Memory table. Previously if `max_types` peremeter of a Dynamic type was changed in Memory table via alter, further subcolumns reading can return wrong result. [#66066](https://github.com/ClickHouse/ClickHouse/pull/66066) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||||
|
* Fixed several cases in function `has` with non-constant `LowCardinality` arguments. [#66088](https://github.com/ClickHouse/ClickHouse/pull/66088) ([Anton Popov](https://github.com/CurtizJ)).
|
||||||
|
* Fix for `groupArrayIntersect`. It had incorrect behavior in the `merge()` function. Also, fixed behavior in `deserialise()` for numeric and general data. [#66103](https://github.com/ClickHouse/ClickHouse/pull/66103) ([Yarik Briukhovetskyi](https://github.com/yariks5s)).
|
||||||
|
* Fixed buffer overflow bug in `unbin`/`unhex` implementation. [#66106](https://github.com/ClickHouse/ClickHouse/pull/66106) ([Nikita Taranov](https://github.com/nickitat)).
|
||||||
|
* Disable the `merge-filters` optimization introduced in [#64760](https://github.com/ClickHouse/ClickHouse/issues/64760). It may cause an exception if optimization merges two filter expressions and does not apply a short-circuit evaluation. [#66126](https://github.com/ClickHouse/ClickHouse/pull/66126) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
|
||||||
|
* Fixed the issue when the server failed to parse Avro files with negative block size arrays encoded, which is now allowed by the Avro specification. [#66130](https://github.com/ClickHouse/ClickHouse/pull/66130) ([Serge Klochkov](https://github.com/slvrtrn)).
|
||||||
|
* Fixed a bug in ZooKeeper client: a session could get stuck in unusable state after receiving a hardware error from ZooKeeper. For example, this might happen due to "soft memory limit" in ClickHouse Keeper. [#66140](https://github.com/ClickHouse/ClickHouse/pull/66140) ([Alexander Tokmakov](https://github.com/tavplubix)).
|
||||||
|
* Fix issue in SumIfToCountIfVisitor and signed integers. [#66146](https://github.com/ClickHouse/ClickHouse/pull/66146) ([Raúl Marín](https://github.com/Algunenano)).
|
||||||
|
* Fix rare case with missing data in the result of distributed query, close [#61432](https://github.com/ClickHouse/ClickHouse/issues/61432). [#66174](https://github.com/ClickHouse/ClickHouse/pull/66174) ([vdimir](https://github.com/vdimir)).
|
||||||
|
* Fix order of parsing metadata fields in StorageDeltaLake. [#66211](https://github.com/ClickHouse/ClickHouse/pull/66211) ([Kseniia Sumarokova](https://github.com/kssenii)).
|
||||||
|
* Don't throw `TIMEOUT_EXCEEDED` for `none_only_active` mode of `distributed_ddl_output_mode`. [#66218](https://github.com/ClickHouse/ClickHouse/pull/66218) ([Alexander Tokmakov](https://github.com/tavplubix)).
|
||||||
|
* Fix handling limit for `system.numbers_mt` when no index can be used. [#66231](https://github.com/ClickHouse/ClickHouse/pull/66231) ([János Benjamin Antal](https://github.com/antaljanosbenjamin)).
|
||||||
|
* Fixed how the ClickHouse server detects the maximum number of usable CPU cores as specified by cgroups v2 if the server runs in a container such as Docker. In more detail, containers often run their process in the root cgroup which has an empty name. In that case, ClickHouse ignored the CPU limits set by cgroups v2. [#66237](https://github.com/ClickHouse/ClickHouse/pull/66237) ([filimonov](https://github.com/filimonov)).
|
||||||
|
* Fix the `Not-ready set` error when a subquery with `IN` is used in the constraint. [#66261](https://github.com/ClickHouse/ClickHouse/pull/66261) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
|
||||||
|
* Fix indexHint function case found by fuzzer. [#66286](https://github.com/ClickHouse/ClickHouse/pull/66286) ([Anton Popov](https://github.com/CurtizJ)).
|
||||||
|
* Fix error reporting while copying to S3 or AzureBlobStorage. [#66295](https://github.com/ClickHouse/ClickHouse/pull/66295) ([Vitaly Baranov](https://github.com/vitlibar)).
|
||||||
|
* Prevent watchdog from keeping descriptors of unlinked(rotated) log files. [#66334](https://github.com/ClickHouse/ClickHouse/pull/66334) ([Aleksei Filatov](https://github.com/aalexfvk)).
|
||||||
|
* Fix the bug that logicalexpressionoptimizerpass lost logical type of constant. closes [#64487](https://github.com/ClickHouse/ClickHouse/issues/64487). [#66344](https://github.com/ClickHouse/ClickHouse/pull/66344) ([pn](https://github.com/chloro-pn)).
|
||||||
|
* Fix `Column identifier is already registered` error with `group_by_use_nulls=true` and new analyzer. [#66400](https://github.com/ClickHouse/ClickHouse/pull/66400) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
|
||||||
|
* Fix possible incorrect result for queries joining and filtering table external engine (like PostgreSQL), due to too aggressive filter pushdown. Since now, conditions from where section won't be send to external database in case of outer join with external table. [#66402](https://github.com/ClickHouse/ClickHouse/pull/66402) ([vdimir](https://github.com/vdimir)).
|
||||||
|
* Added missing column materialization for cross join. [#66413](https://github.com/ClickHouse/ClickHouse/pull/66413) ([lgbo](https://github.com/lgbo-ustc)).
|
||||||
|
* Fix `Cannot find column` error for queries with constant expression in `GROUP BY` key and new analyzer enabled. [#66433](https://github.com/ClickHouse/ClickHouse/pull/66433) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
|
||||||
|
* Avoid possible logical error during import from Npy format in case of bad array nesting level, fix testing of other kinds of errors. [#66461](https://github.com/ClickHouse/ClickHouse/pull/66461) ([Yarik Briukhovetskyi](https://github.com/yariks5s)).
|
||||||
|
* Fix wrong count() result when there is non-deterministic function in predicate. [#66510](https://github.com/ClickHouse/ClickHouse/pull/66510) ([Duc Canh Le](https://github.com/canhld94)).
|
||||||
|
* Correctly track memory for `Allocator::realloc`. [#66548](https://github.com/ClickHouse/ClickHouse/pull/66548) ([Antonio Andelic](https://github.com/antonio2368)).
|
||||||
|
* Fix reading of uninitialized memory when hashing empty tuples. This closes [#66559](https://github.com/ClickHouse/ClickHouse/issues/66559). [#66562](https://github.com/ClickHouse/ClickHouse/pull/66562) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||||
|
* Fix an invalid result for queries with `WINDOW`. This could happen when `PARTITION` columns have sparse serialization and window functions are executed in parallel. [#66579](https://github.com/ClickHouse/ClickHouse/pull/66579) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
|
||||||
|
* Fix removing named collections in local storage. [#66599](https://github.com/ClickHouse/ClickHouse/pull/66599) ([János Benjamin Antal](https://github.com/antaljanosbenjamin)).
|
||||||
|
* Fix logical error in `PrometheusRequestHandler`. [#66621](https://github.com/ClickHouse/ClickHouse/pull/66621) ([Vitaly Baranov](https://github.com/vitlibar)).
|
||||||
|
* `column_length` is not updated in `ColumnTuple::insertManyFrom`. [#66626](https://github.com/ClickHouse/ClickHouse/pull/66626) ([lgbo](https://github.com/lgbo-ustc)).
|
||||||
|
* Fix `Unknown identifier` and `Column is not under aggregate function` errors for queries with the expression `(column IS NULL).` The bug was triggered by [#65088](https://github.com/ClickHouse/ClickHouse/issues/65088), with the disabled analyzer only. [#66654](https://github.com/ClickHouse/ClickHouse/pull/66654) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
|
||||||
|
* Fix `Method getResultType is not supported for QUERY query node` error when scalar subquery was used as the first argument of IN (with new analyzer). [#66655](https://github.com/ClickHouse/ClickHouse/pull/66655) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
|
||||||
|
* Fix possible PARAMETER_OUT_OF_BOUND error during reading variant subcolumn. [#66659](https://github.com/ClickHouse/ClickHouse/pull/66659) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||||
|
* Fix rare case of stuck merge after drop column. [#66707](https://github.com/ClickHouse/ClickHouse/pull/66707) ([Raúl Marín](https://github.com/Algunenano)).
|
||||||
|
* Fix assertion `isUniqTypes` when insert select from remote sources. [#66722](https://github.com/ClickHouse/ClickHouse/pull/66722) ([Sema Checherinda](https://github.com/CheSema)).
|
||||||
|
* Backported in [#67026](https://github.com/ClickHouse/ClickHouse/issues/67026): In rare cases ClickHouse could consider parts as broken because of some unexpected projections on disk. Now it's fixed. [#66898](https://github.com/ClickHouse/ClickHouse/pull/66898) ([alesapin](https://github.com/alesapin)).
|
||||||
|
* Backported in [#67443](https://github.com/ClickHouse/ClickHouse/issues/67443): Forbid create as select even when database_replicated_allow_heavy_create is set. It was unconditionally forbidden in 23.12 and accidentally allowed under the setting in unreleased 24.7. [#66980](https://github.com/ClickHouse/ClickHouse/pull/66980) ([vdimir](https://github.com/vdimir)).
|
||||||
|
* Backported in [#67201](https://github.com/ClickHouse/ClickHouse/issues/67201): TRUNCATE DATABASE used to stop replication as if it was a DROP DATABASE query, it's fixed. [#67129](https://github.com/ClickHouse/ClickHouse/pull/67129) ([Alexander Tokmakov](https://github.com/tavplubix)).
|
||||||
|
* Backported in [#67383](https://github.com/ClickHouse/ClickHouse/issues/67383): Fix error `Cannot convert column because it is non constant in source stream but must be constant in result.` for a query that reads from the `Merge` table over the `Distriburted` table with one shard. [#67146](https://github.com/ClickHouse/ClickHouse/pull/67146) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
|
||||||
|
* Backported in [#67246](https://github.com/ClickHouse/ClickHouse/issues/67246): This closes [#67156](https://github.com/ClickHouse/ClickHouse/issues/67156). This closes [#66447](https://github.com/ClickHouse/ClickHouse/issues/66447). The bug was introduced in https://github.com/ClickHouse/ClickHouse/pull/62907. [#67178](https://github.com/ClickHouse/ClickHouse/pull/67178) ([Maksim Kita](https://github.com/kitaisreal)).
|
||||||
|
|
||||||
|
#### Build/Testing/Packaging Improvement
|
||||||
|
* Instantiate template methods ahead in different .cpp files, avoid too large translation units during compiling. [#64818](https://github.com/ClickHouse/ClickHouse/pull/64818) ([lgbo](https://github.com/lgbo-ustc)).
|
||||||
|
* Stateless tests: Improve tests speed and decrease number of parallel jobs. [#65186](https://github.com/ClickHouse/ClickHouse/pull/65186) ([Nikita Fomichev](https://github.com/fm4v)).
|
||||||
|
* Add tests for `base64URLEncode` and `base64URLDecode`. Add analyzer tests. [#65979](https://github.com/ClickHouse/ClickHouse/pull/65979) ([Nikita Fomichev](https://github.com/fm4v)).
|
||||||
|
* Fix problem when github terminate instances by timeout and artifacts are not collected and full test report is not generated. [#66036](https://github.com/ClickHouse/ClickHouse/pull/66036) ([Nikita Fomichev](https://github.com/fm4v)).
|
||||||
|
* Fix test [test_grpc_protocol/test.py::test_progress](https://s3.amazonaws.com/clickhouse-test-reports/57695/188f8a3df74caf830ad1ced3c4cf6dfb0aa90093/integration_tests__asan__old_analyzer__[4_6].html). [#66063](https://github.com/ClickHouse/ClickHouse/pull/66063) ([Vitaly Baranov](https://github.com/vitlibar)).
|
||||||
|
* Stateless tests: Improve tests speed and decrease number of parallel jobs. [#66305](https://github.com/ClickHouse/ClickHouse/pull/66305) ([Nikita Fomichev](https://github.com/fm4v)).
|
||||||
|
* Stateless tests: Improve tests speed and decrease number of parallel jobs 3. [#66363](https://github.com/ClickHouse/ClickHouse/pull/66363) ([Nikita Fomichev](https://github.com/fm4v)).
|
||||||
|
* Tests: fix tests hang up in cases when gdb catches error. [#66411](https://github.com/ClickHouse/ClickHouse/pull/66411) ([Nikita Fomichev](https://github.com/fm4v)).
|
||||||
|
* ... since [Release v24.6.1.4423-stable](https://github.com/ClickHouse/ClickHouse/releases/tag/v24.6.1.4423-stable) when build in ppc64le with dynamic openssl build (`cmake -DENABLE_OPENSSL_DYNAMIC=1 -DCMAKE_TOOLCHAIN_FILE= cmake/linux/toolchain-ppc64le.cmake `) got error: ` ld.lld: error: duplicate symbol: OPENSSL_cleanse`. [#66733](https://github.com/ClickHouse/ClickHouse/pull/66733) ([Yong Wang](https://github.com/kashwy)).
|
||||||
|
|
||||||
|
#### NO CL CATEGORY
|
||||||
|
|
||||||
|
* Backported in [#67084](https://github.com/ClickHouse/ClickHouse/issues/67084):. [#67040](https://github.com/ClickHouse/ClickHouse/pull/67040) ([Raúl Marín](https://github.com/Algunenano)).
|
||||||
|
* Backported in [#67452](https://github.com/ClickHouse/ClickHouse/issues/67452):. [#67392](https://github.com/ClickHouse/ClickHouse/pull/67392) ([alesapin](https://github.com/alesapin)).
|
||||||
|
|
||||||
|
#### NO CL ENTRY
|
||||||
|
|
||||||
|
* NO CL ENTRY: 'Revert "Revert "Small fix for 02340_parts_refcnt_mergetree""'. [#65155](https://github.com/ClickHouse/ClickHouse/pull/65155) ([Nikita Taranov](https://github.com/nickitat)).
|
||||||
|
* NO CL ENTRY: 'Revert "Use 1MB HTTP buffers to avoid frequent send syscalls"'. [#65498](https://github.com/ClickHouse/ClickHouse/pull/65498) ([Sergei Trifonov](https://github.com/serxa)).
|
||||||
|
* NO CL ENTRY: 'Revert "Resubmit http_external_tables_memory_tracking test"'. [#65500](https://github.com/ClickHouse/ClickHouse/pull/65500) ([Nikita Taranov](https://github.com/nickitat)).
|
||||||
|
* NO CL ENTRY: 'Revert "Add an assertion in ReplicatedMergeTreeQueue"'. [#65686](https://github.com/ClickHouse/ClickHouse/pull/65686) ([Raúl Marín](https://github.com/Algunenano)).
|
||||||
|
* NO CL ENTRY: 'Revert "insertion deduplication on retries for materialised views"'. [#66134](https://github.com/ClickHouse/ClickHouse/pull/66134) ([Sema Checherinda](https://github.com/CheSema)).
|
||||||
|
|
||||||
|
#### NOT FOR CHANGELOG / INSIGNIFICANT
|
||||||
|
|
||||||
|
* Return and fix 01600_parts_states_metrics_long test. [#58748](https://github.com/ClickHouse/ClickHouse/pull/58748) ([Yakov Olkhovskiy](https://github.com/yakov-olkhovskiy)).
|
||||||
|
* Add azure_cache as storage policy to tests. [#59943](https://github.com/ClickHouse/ClickHouse/pull/59943) ([SmitaRKulkarni](https://github.com/SmitaRKulkarni)).
|
||||||
|
* Minor: replaced expression with LEGACY_MAX_LEVEL. [#61268](https://github.com/ClickHouse/ClickHouse/pull/61268) ([Vasily Nemkov](https://github.com/Enmk)).
|
||||||
|
* Make write to temporary data in cache do all checks and assertions as during write to ordinary cache. [#63348](https://github.com/ClickHouse/ClickHouse/pull/63348) ([Kseniia Sumarokova](https://github.com/kssenii)).
|
||||||
|
* Refactoring near azure blob storage. [#63636](https://github.com/ClickHouse/ClickHouse/pull/63636) ([Anton Popov](https://github.com/CurtizJ)).
|
||||||
|
* Everything should work with Analyzer. [#63643](https://github.com/ClickHouse/ClickHouse/pull/63643) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||||
|
* add some log for non using detached tables. [#64992](https://github.com/ClickHouse/ClickHouse/pull/64992) ([Konstantin Morozov](https://github.com/k-morozov)).
|
||||||
|
* Remove dag flags. [#65234](https://github.com/ClickHouse/ClickHouse/pull/65234) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
|
||||||
|
* Fix flaky autocompletion test. [#65246](https://github.com/ClickHouse/ClickHouse/pull/65246) ([Konstantin Bogdanov](https://github.com/thevar1able)).
|
||||||
|
* Disable userspace page cache by default. [#65305](https://github.com/ClickHouse/ClickHouse/pull/65305) ([Michael Kolupaev](https://github.com/al13n321)).
|
||||||
|
* Update version_date.tsv and changelogs after v24.4.3.25-stable. [#65308](https://github.com/ClickHouse/ClickHouse/pull/65308) ([robot-clickhouse](https://github.com/robot-clickhouse)).
|
||||||
|
* Do not raise a NOT_IMPLEMENTED error when getting s3 metrics with a multiple disk configuration. [#65403](https://github.com/ClickHouse/ClickHouse/pull/65403) ([Elena Torró](https://github.com/elenatorro)).
|
||||||
|
* Dodging reading from wrong table with parallel replicas. [#65417](https://github.com/ClickHouse/ClickHouse/pull/65417) ([Nikita Taranov](https://github.com/nickitat)).
|
||||||
|
* Fix: return error if can't connect to any replicas chosen for query execution. [#65467](https://github.com/ClickHouse/ClickHouse/pull/65467) ([Igor Nikonov](https://github.com/devcrafter)).
|
||||||
|
* Fix `AzureObjectStorage::exists` method. [#65471](https://github.com/ClickHouse/ClickHouse/pull/65471) ([Anton Popov](https://github.com/CurtizJ)).
|
||||||
|
* Update version after release. [#65483](https://github.com/ClickHouse/ClickHouse/pull/65483) ([Raúl Marín](https://github.com/Algunenano)).
|
||||||
|
* Generate 24.6 changelog. [#65485](https://github.com/ClickHouse/ClickHouse/pull/65485) ([Raúl Marín](https://github.com/Algunenano)).
|
||||||
|
* Fix of `PlanSquashingTransform`: pipeline stuck. [#65487](https://github.com/ClickHouse/ClickHouse/pull/65487) ([Yarik Briukhovetskyi](https://github.com/yariks5s)).
|
||||||
|
* Fix bad test `02922_deduplication_with_zero_copy`. [#65492](https://github.com/ClickHouse/ClickHouse/pull/65492) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||||
|
* Disable clang-format in special areas. [#65495](https://github.com/ClickHouse/ClickHouse/pull/65495) ([Nikita Taranov](https://github.com/nickitat)).
|
||||||
|
* Fix `test_keeper_snapshots`. [#65497](https://github.com/ClickHouse/ClickHouse/pull/65497) ([Antonio Andelic](https://github.com/antonio2368)).
|
||||||
|
* Update to libunwind 8.1.7. [#65509](https://github.com/ClickHouse/ClickHouse/pull/65509) ([Michael Kolupaev](https://github.com/al13n321)).
|
||||||
|
* Setting `uniform_snowflake_conversion_functions` (not in any release yet) was replaced by setting `allow_deprecated_snowflake_conversion_functions`. The latter controls if the legacy snowflake conversion functions are available (by default, they are not). [#65522](https://github.com/ClickHouse/ClickHouse/pull/65522) ([Robert Schulze](https://github.com/rschu1ze)).
|
||||||
|
* Try CI without RerunCheck, jobs can be easily rerun manually though extra amount of work in CI will follow on workflow restart. [#65524](https://github.com/ClickHouse/ClickHouse/pull/65524) ([Max K.](https://github.com/maxknv)).
|
||||||
|
* Bump re2 to latest HEAD. [#65526](https://github.com/ClickHouse/ClickHouse/pull/65526) ([Robert Schulze](https://github.com/rschu1ze)).
|
||||||
|
* OpenSSL: Replace temporary fix for unsynchronized access by official fix. [#65529](https://github.com/ClickHouse/ClickHouse/pull/65529) ([Robert Schulze](https://github.com/rschu1ze)).
|
||||||
|
* Update README.md. [#65531](https://github.com/ClickHouse/ClickHouse/pull/65531) ([Tyler Hannan](https://github.com/tylerhannan)).
|
||||||
|
* CI: some time there are timeouts on DROP TABLES for random tests. [#65535](https://github.com/ClickHouse/ClickHouse/pull/65535) ([Sema Checherinda](https://github.com/CheSema)).
|
||||||
|
* Synchronize `MARK_CACHE_SIZE` value in default settings and config. [#65547](https://github.com/ClickHouse/ClickHouse/pull/65547) ([Denny Crane](https://github.com/den-crane)).
|
||||||
|
* CI: Skip removed test files in stateless flaky check job. [#65553](https://github.com/ClickHouse/ClickHouse/pull/65553) ([Max K.](https://github.com/maxknv)).
|
||||||
|
* Renames Build report jobs. [#65554](https://github.com/ClickHouse/ClickHouse/pull/65554) ([Max K.](https://github.com/maxknv)).
|
||||||
|
* Parse user from URL for dashboard.html (useful for sharing). [#65556](https://github.com/ClickHouse/ClickHouse/pull/65556) ([Azat Khuzhin](https://github.com/azat)).
|
||||||
|
* Remove tech debt. [#65561](https://github.com/ClickHouse/ClickHouse/pull/65561) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||||
|
* Maybe fix test `00763_lock_buffer_long.sh`. [#65562](https://github.com/ClickHouse/ClickHouse/pull/65562) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||||
|
* Fix clickhouse-keeper with not system-wide directories and provide override for local development. [#65563](https://github.com/ClickHouse/ClickHouse/pull/65563) ([Azat Khuzhin](https://github.com/azat)).
|
||||||
|
* Re-configure yamllint to allow document-start. [#65565](https://github.com/ClickHouse/ClickHouse/pull/65565) ([Azat Khuzhin](https://github.com/azat)).
|
||||||
|
* Fix flaky test `01254_dict_load_after_detach_attach.sql`. [#65571](https://github.com/ClickHouse/ClickHouse/pull/65571) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||||
|
* Improve flaky test to provide more diagnostics. [#65586](https://github.com/ClickHouse/ClickHouse/pull/65586) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||||
|
* Fix test_parallel_replicas_distributed_skip_shards flakiness. [#65588](https://github.com/ClickHouse/ClickHouse/pull/65588) ([Igor Nikonov](https://github.com/devcrafter)).
|
||||||
|
* Fix an error in the test about metadata_type. [#65592](https://github.com/ClickHouse/ClickHouse/pull/65592) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||||
|
* Fix slow test. [#65593](https://github.com/ClickHouse/ClickHouse/pull/65593) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||||
|
* fix flaky 02864_statistics_uniq. [#65599](https://github.com/ClickHouse/ClickHouse/pull/65599) ([Han Fei](https://github.com/hanfei1991)).
|
||||||
|
* Fix 03172_error_log_table_not_empty. [#65604](https://github.com/ClickHouse/ClickHouse/pull/65604) ([Pablo Marcos](https://github.com/pamarcos)).
|
||||||
|
* Enable realtime digest for Jepsen tests. [#65608](https://github.com/ClickHouse/ClickHouse/pull/65608) ([Antonio Andelic](https://github.com/antonio2368)).
|
||||||
|
* CI: Return Job Rerun check. [#65613](https://github.com/ClickHouse/ClickHouse/pull/65613) ([Max K.](https://github.com/maxknv)).
|
||||||
|
* Update CHANGELOG.md. [#65624](https://github.com/ClickHouse/ClickHouse/pull/65624) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||||
|
* Module is required for authenticating in GH (in cloud). [#65628](https://github.com/ClickHouse/ClickHouse/pull/65628) ([Max K.](https://github.com/maxknv)).
|
||||||
|
* Update IObjectStorage.h. [#65631](https://github.com/ClickHouse/ClickHouse/pull/65631) ([Kseniia Sumarokova](https://github.com/kssenii)).
|
||||||
|
* Fix 02834_apache_arrow_abort flakiness with MSAN. [#65640](https://github.com/ClickHouse/ClickHouse/pull/65640) ([Michael Kolupaev](https://github.com/al13n321)).
|
||||||
|
* Fix overflow in StorageWindowView. [#65641](https://github.com/ClickHouse/ClickHouse/pull/65641) ([Michael Kolupaev](https://github.com/al13n321)).
|
||||||
|
* Fix inconsistent AST formatting when a keyword is used as type name. [#65648](https://github.com/ClickHouse/ClickHouse/pull/65648) ([Michael Kolupaev](https://github.com/al13n321)).
|
||||||
|
* CI: Single point of setting mergeable check status. [#65658](https://github.com/ClickHouse/ClickHouse/pull/65658) ([Max K.](https://github.com/maxknv)).
|
||||||
|
* Miscellaneous and insignificant changes around Client/ClientBase. [#65669](https://github.com/ClickHouse/ClickHouse/pull/65669) ([Nikita Mikhaylov](https://github.com/nikitamikhaylov)).
|
||||||
|
* Add Replicated database names to ZooKeeper for introspection. [#65675](https://github.com/ClickHouse/ClickHouse/pull/65675) ([Alexander Tokmakov](https://github.com/tavplubix)).
|
||||||
|
* Introduce type contract checks in `IColumn`. [#65687](https://github.com/ClickHouse/ClickHouse/pull/65687) ([Nikita Taranov](https://github.com/nickitat)).
|
||||||
|
* Print slightly more information in 02982_aggregation_states_destruction. [#65688](https://github.com/ClickHouse/ClickHouse/pull/65688) ([Michael Kolupaev](https://github.com/al13n321)).
|
||||||
|
* Disable stacktrace collection in GWPAsan by default. [#65701](https://github.com/ClickHouse/ClickHouse/pull/65701) ([Antonio Andelic](https://github.com/antonio2368)).
|
||||||
|
* Build jemalloc with profiler. [#65702](https://github.com/ClickHouse/ClickHouse/pull/65702) ([Antonio Andelic](https://github.com/antonio2368)).
|
||||||
|
* Fix duplicate symbol linkage error. [#65705](https://github.com/ClickHouse/ClickHouse/pull/65705) ([Nikita Taranov](https://github.com/nickitat)).
|
||||||
|
* Fix server restarts in performance tests. [#65717](https://github.com/ClickHouse/ClickHouse/pull/65717) ([Antonio Andelic](https://github.com/antonio2368)).
|
||||||
|
* Update 03002_part_log_rmt_fetch_mutate_error.sql. [#65720](https://github.com/ClickHouse/ClickHouse/pull/65720) ([Alexander Tokmakov](https://github.com/tavplubix)).
|
||||||
|
* Fix bug with session closing in Keeper. [#65735](https://github.com/ClickHouse/ClickHouse/pull/65735) ([Antonio Andelic](https://github.com/antonio2368)).
|
||||||
|
* Fix flaky `02265_column_ttl`. Closes [#65719](https://github.com/ClickHouse/ClickHouse/issues/65719). [#65742](https://github.com/ClickHouse/ClickHouse/pull/65742) ([János Benjamin Antal](https://github.com/antaljanosbenjamin)).
|
||||||
|
* See [#65745](https://github.com/ClickHouse/ClickHouse/issues/65745). It doesn't solve the issue, but helps a bit. [#65746](https://github.com/ClickHouse/ClickHouse/pull/65746) ([Alexander Tokmakov](https://github.com/tavplubix)).
|
||||||
|
* Update CHANGELOG.md. [#65752](https://github.com/ClickHouse/ClickHouse/pull/65752) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||||
|
* document declarative ssh-keys authentication. [#65756](https://github.com/ClickHouse/ClickHouse/pull/65756) ([Tobias Florek](https://github.com/ibotty)).
|
||||||
|
* `base64En/Decode64Url` --> `base64En/Decode64URL`. [#65760](https://github.com/ClickHouse/ClickHouse/pull/65760) ([Robert Schulze](https://github.com/rschu1ze)).
|
||||||
|
* Fix for issue [#65757](https://github.com/ClickHouse/ClickHouse/issues/65757). [#65763](https://github.com/ClickHouse/ClickHouse/pull/65763) ([Yarik Briukhovetskyi](https://github.com/yariks5s)).
|
||||||
|
* Fix flaky `test_replicated_database::test_alter_attach`. [#65766](https://github.com/ClickHouse/ClickHouse/pull/65766) ([Antonio Andelic](https://github.com/antonio2368)).
|
||||||
|
* Fix: progress bar for read in order queries. [#65769](https://github.com/ClickHouse/ClickHouse/pull/65769) ([Igor Nikonov](https://github.com/devcrafter)).
|
||||||
|
* CI: Fix for Builds report job in backports and releases. [#65774](https://github.com/ClickHouse/ClickHouse/pull/65774) ([Max K.](https://github.com/maxknv)).
|
||||||
|
* CI: New create release workflow. [#65775](https://github.com/ClickHouse/ClickHouse/pull/65775) ([Max K.](https://github.com/maxknv)).
|
||||||
|
* fixed misspelled word. [#65778](https://github.com/ClickHouse/ClickHouse/pull/65778) ([Linh Giang](https://github.com/linhgiang24)).
|
||||||
|
* Refactor statistics interface. [#65792](https://github.com/ClickHouse/ClickHouse/pull/65792) ([Robert Schulze](https://github.com/rschu1ze)).
|
||||||
|
* Try to make `test_ldap_external_user_directory` less flaky. [#65794](https://github.com/ClickHouse/ClickHouse/pull/65794) ([Andrey Zvonov](https://github.com/zvonand)).
|
||||||
|
* AMI image with gh and jwt. [#65795](https://github.com/ClickHouse/ClickHouse/pull/65795) ([Max K.](https://github.com/maxknv)).
|
||||||
|
* Forbid join algorithm randomisation for 03094_one_thousand_joins. [#65798](https://github.com/ClickHouse/ClickHouse/pull/65798) ([Nikita Taranov](https://github.com/nickitat)).
|
||||||
|
* Fix 02931_rewrite_sum_column_and_constant flakiness. [#65800](https://github.com/ClickHouse/ClickHouse/pull/65800) ([Michael Kolupaev](https://github.com/al13n321)).
|
||||||
|
* Update StorageMaterializedView.cpp. [#65801](https://github.com/ClickHouse/ClickHouse/pull/65801) ([Nikolay Degterinsky](https://github.com/evillique)).
|
||||||
|
* Fix slow `getFQDNOrHostNameImpl` on macOS. [#65803](https://github.com/ClickHouse/ClickHouse/pull/65803) ([Konstantin Bogdanov](https://github.com/thevar1able)).
|
||||||
|
* No jemalloc profiler for non-Linux. [#65834](https://github.com/ClickHouse/ClickHouse/pull/65834) ([Antonio Andelic](https://github.com/antonio2368)).
|
||||||
|
* Add missing workload identity changes. [#65848](https://github.com/ClickHouse/ClickHouse/pull/65848) ([SmitaRKulkarni](https://github.com/SmitaRKulkarni)).
|
||||||
|
* Fix rocksdb. [#65858](https://github.com/ClickHouse/ClickHouse/pull/65858) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||||
|
* Update the list of easy tasks. [#65865](https://github.com/ClickHouse/ClickHouse/pull/65865) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||||
|
* Update CHANGELOG.md. [#65866](https://github.com/ClickHouse/ClickHouse/pull/65866) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||||
|
* This closes [#43003](https://github.com/ClickHouse/ClickHouse/issues/43003). [#65870](https://github.com/ClickHouse/ClickHouse/pull/65870) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||||
|
* Uninteresting changes. [#65871](https://github.com/ClickHouse/ClickHouse/pull/65871) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||||
|
* Max sessions for user tests improvements. [#65888](https://github.com/ClickHouse/ClickHouse/pull/65888) ([Alexey Gerasimchuck](https://github.com/Demilivor)).
|
||||||
|
* Update version_date.tsv and changelogs after v24.6.1.4423-stable. [#65909](https://github.com/ClickHouse/ClickHouse/pull/65909) ([robot-clickhouse](https://github.com/robot-clickhouse)).
|
||||||
|
* Remove standalone Keeper build. [#65910](https://github.com/ClickHouse/ClickHouse/pull/65910) ([Antonio Andelic](https://github.com/antonio2368)).
|
||||||
|
* Add extra profiling helpers for Keeper. [#65918](https://github.com/ClickHouse/ClickHouse/pull/65918) ([Antonio Andelic](https://github.com/antonio2368)).
|
||||||
|
* PostgreSQL source cancel query comments. [#65919](https://github.com/ClickHouse/ClickHouse/pull/65919) ([Maksim Kita](https://github.com/kitaisreal)).
|
||||||
|
* Remove mysqlxx::Pool::Entry assignment operator. [#65920](https://github.com/ClickHouse/ClickHouse/pull/65920) ([Azat Khuzhin](https://github.com/azat)).
|
||||||
|
* No random settings for a test with `Object(JSON)`. [#65921](https://github.com/ClickHouse/ClickHouse/pull/65921) ([Nikita Mikhaylov](https://github.com/nikitamikhaylov)).
|
||||||
|
* Follow up to [#65046](https://github.com/ClickHouse/ClickHouse/issues/65046). [#65928](https://github.com/ClickHouse/ClickHouse/pull/65928) ([Kseniia Sumarokova](https://github.com/kssenii)).
|
||||||
|
* add restriction for storage join. [#65936](https://github.com/ClickHouse/ClickHouse/pull/65936) ([Han Fei](https://github.com/hanfei1991)).
|
||||||
|
* Update version_date.tsv and changelogs after v24.5.4.49-stable. [#65937](https://github.com/ClickHouse/ClickHouse/pull/65937) ([robot-clickhouse](https://github.com/robot-clickhouse)).
|
||||||
|
* Add table name to MergeTreeSource spans. [#65940](https://github.com/ClickHouse/ClickHouse/pull/65940) ([Nikita Taranov](https://github.com/nickitat)).
|
||||||
|
* Fix SettingsChangesHistory 24.7. [#65945](https://github.com/ClickHouse/ClickHouse/pull/65945) ([Raúl Marín](https://github.com/Algunenano)).
|
||||||
|
* Fix logical error "Expected ReadBufferFromFile, but got DB::EmptyReadBuffer". [#65949](https://github.com/ClickHouse/ClickHouse/pull/65949) ([Kseniia Sumarokova](https://github.com/kssenii)).
|
||||||
|
* Use -Og instead of -O0 for debug builds. [#65953](https://github.com/ClickHouse/ClickHouse/pull/65953) ([Michael Kolupaev](https://github.com/al13n321)).
|
||||||
|
* Fix data race for Keeper snapshot queue. [#65970](https://github.com/ClickHouse/ClickHouse/pull/65970) ([Antonio Andelic](https://github.com/antonio2368)).
|
||||||
|
* Minor changes in CHANGELOG. [#65971](https://github.com/ClickHouse/ClickHouse/pull/65971) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||||
|
* Remove unnatural punctuation from Parquet. [#65972](https://github.com/ClickHouse/ClickHouse/pull/65972) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||||
|
* Try fix "Check timeout expired" without any server logs in report in stateless tests. [#65977](https://github.com/ClickHouse/ClickHouse/pull/65977) ([Kseniia Sumarokova](https://github.com/kssenii)).
|
||||||
|
* Fix support of non-const scale arguments in rounding functions. [#65983](https://github.com/ClickHouse/ClickHouse/pull/65983) ([Mikhail Gorshkov](https://github.com/mgorshkov)).
|
||||||
|
* More aesthetic error messages. [#65985](https://github.com/ClickHouse/ClickHouse/pull/65985) ([Robert Schulze](https://github.com/rschu1ze)).
|
||||||
|
* Fix race in s3queue. [#65986](https://github.com/ClickHouse/ClickHouse/pull/65986) ([Kseniia Sumarokova](https://github.com/kssenii)).
|
||||||
|
* Now it's possible to specify `s3-storage`, `azure-object-storage` and in general `object-storage`. [#65988](https://github.com/ClickHouse/ClickHouse/pull/65988) ([alesapin](https://github.com/alesapin)).
|
||||||
|
* Fix flaky test_storage_s3_queue tests. [#66009](https://github.com/ClickHouse/ClickHouse/pull/66009) ([Kseniia Sumarokova](https://github.com/kssenii)).
|
||||||
|
* Relax the check in 02982_aggregation_states_destruction. [#66011](https://github.com/ClickHouse/ClickHouse/pull/66011) ([Nikita Taranov](https://github.com/nickitat)).
|
||||||
|
* Fix `01158_zookeeper_log_long`. [#66012](https://github.com/ClickHouse/ClickHouse/pull/66012) ([Alexander Tokmakov](https://github.com/tavplubix)).
|
||||||
|
* Remove scary jemalloc log. [#66028](https://github.com/ClickHouse/ClickHouse/pull/66028) ([Antonio Andelic](https://github.com/antonio2368)).
|
||||||
|
* Move experimental settings to the experimental block. [#66030](https://github.com/ClickHouse/ClickHouse/pull/66030) ([Raúl Marín](https://github.com/Algunenano)).
|
||||||
|
* Fix lock-order-inversion in DatabaseCatalog. [#66038](https://github.com/ClickHouse/ClickHouse/pull/66038) ([Nikolay Degterinsky](https://github.com/evillique)).
|
||||||
|
* Try disabling jemalloc background threads. [#66041](https://github.com/ClickHouse/ClickHouse/pull/66041) ([Antonio Andelic](https://github.com/antonio2368)).
|
||||||
|
* Try to avoid conflicts in `SettingsChangesHistory.cpp`. [#66042](https://github.com/ClickHouse/ClickHouse/pull/66042) ([Anton Popov](https://github.com/CurtizJ)).
|
||||||
|
* Add profile events for regex cache. [#66050](https://github.com/ClickHouse/ClickHouse/pull/66050) ([Antonio Andelic](https://github.com/antonio2368)).
|
||||||
|
* Bump vectorscan to 5.4.10.1. [#66056](https://github.com/ClickHouse/ClickHouse/pull/66056) ([Robert Schulze](https://github.com/rschu1ze)).
|
||||||
|
* Remove obsolete comment. [#66059](https://github.com/ClickHouse/ClickHouse/pull/66059) ([Robert Schulze](https://github.com/rschu1ze)).
|
||||||
|
* Maybe fix tsan assert in `test_mysql_killed_while_insert_8_0`. [#66064](https://github.com/ClickHouse/ClickHouse/pull/66064) ([Robert Schulze](https://github.com/rschu1ze)).
|
||||||
|
* Move some of `HTTPHandler` stuff to separate source files in order to reuse it in `PrometheusRequestHandler`. This PR is required for https://github.com/ClickHouse/ClickHouse/pull/64183. [#66067](https://github.com/ClickHouse/ClickHouse/pull/66067) ([Vitaly Baranov](https://github.com/vitlibar)).
|
||||||
|
* Bump rocksdb to v6.23.3. [#66068](https://github.com/ClickHouse/ClickHouse/pull/66068) ([Robert Schulze](https://github.com/rschu1ze)).
|
||||||
|
* Add protobufs for `Prometheus` `remote-write` / `remote-read` protocols to our repository. Fix cmake script for compiling protobufs. [#66069](https://github.com/ClickHouse/ClickHouse/pull/66069) ([Vitaly Baranov](https://github.com/vitlibar)).
|
||||||
|
* Use pinned versions of all python packages in CI docker images. Also makes clang-18.1.8 work with sanitizers and surprisingly fixes [#66049](https://github.com/ClickHouse/ClickHouse/issues/66049). [#66070](https://github.com/ClickHouse/ClickHouse/pull/66070) ([alesapin](https://github.com/alesapin)).
|
||||||
|
* Clean-up custom LLVM 15 patches. [#66072](https://github.com/ClickHouse/ClickHouse/pull/66072) ([Robert Schulze](https://github.com/rschu1ze)).
|
||||||
|
* Minor JWT client fixes. [#66073](https://github.com/ClickHouse/ClickHouse/pull/66073) ([Konstantin Bogdanov](https://github.com/thevar1able)).
|
||||||
|
* Bump vectorscan to 5.4.11. [#66082](https://github.com/ClickHouse/ClickHouse/pull/66082) ([Robert Schulze](https://github.com/rschu1ze)).
|
||||||
|
* Print stacktrace in case of abort after logical error. [#66091](https://github.com/ClickHouse/ClickHouse/pull/66091) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
|
||||||
|
* fix flaky 03172_error_log_table_not_empty. [#66093](https://github.com/ClickHouse/ClickHouse/pull/66093) ([Sema Checherinda](https://github.com/CheSema)).
|
||||||
|
* Bump s2geometry to latest master. [#66094](https://github.com/ClickHouse/ClickHouse/pull/66094) ([Robert Schulze](https://github.com/rschu1ze)).
|
||||||
|
* update keeper bench example config file. [#66095](https://github.com/ClickHouse/ClickHouse/pull/66095) ([Han Fei](https://github.com/hanfei1991)).
|
||||||
|
* Avoid using source directory for generated files. [#66097](https://github.com/ClickHouse/ClickHouse/pull/66097) ([Azat Khuzhin](https://github.com/azat)).
|
||||||
|
* More precise warning message about sanitizers. [#66098](https://github.com/ClickHouse/ClickHouse/pull/66098) ([Anton Popov](https://github.com/CurtizJ)).
|
||||||
|
* Slightly better calculation of primary index. [#66099](https://github.com/ClickHouse/ClickHouse/pull/66099) ([Anton Popov](https://github.com/CurtizJ)).
|
||||||
|
* Bump Azure to 1.12. [#66100](https://github.com/ClickHouse/ClickHouse/pull/66100) ([Robert Schulze](https://github.com/rschu1ze)).
|
||||||
|
* Add a test for [#58998](https://github.com/ClickHouse/ClickHouse/issues/58998). [#66101](https://github.com/ClickHouse/ClickHouse/pull/66101) ([Anton Popov](https://github.com/CurtizJ)).
|
||||||
|
* CI: Fix sync pr merge. [#66105](https://github.com/ClickHouse/ClickHouse/pull/66105) ([Max K.](https://github.com/maxknv)).
|
||||||
|
* Remove flaky case from 02956_rocksdb_bulk_sink. [#66107](https://github.com/ClickHouse/ClickHouse/pull/66107) ([vdimir](https://github.com/vdimir)).
|
||||||
|
* Fix bugfix checker. [#66120](https://github.com/ClickHouse/ClickHouse/pull/66120) ([Raúl Marín](https://github.com/Algunenano)).
|
||||||
|
* Correctly print long processing requests in Keeper. [#66124](https://github.com/ClickHouse/ClickHouse/pull/66124) ([Antonio Andelic](https://github.com/antonio2368)).
|
||||||
|
* Update version_date.tsv and changelogs after v24.6.2.17-stable. [#66127](https://github.com/ClickHouse/ClickHouse/pull/66127) ([robot-clickhouse](https://github.com/robot-clickhouse)).
|
||||||
|
* Bump s2geometry again. [#66136](https://github.com/ClickHouse/ClickHouse/pull/66136) ([Robert Schulze](https://github.com/rschu1ze)).
|
||||||
|
* Switch submodule `contrib/orc` to a proper commit in the [main](https://github.com/ClickHouse/orc/tree/main) branch. Previously a commit from a removed branch was used ([see](https://github.com/ClickHouse/orc/pull/13)). [#66137](https://github.com/ClickHouse/ClickHouse/pull/66137) ([Vitaly Baranov](https://github.com/vitlibar)).
|
||||||
|
* Finalize MergedBlockOutputStream in dtor. [#66138](https://github.com/ClickHouse/ClickHouse/pull/66138) ([Nikita Taranov](https://github.com/nickitat)).
|
||||||
|
* Proper destruction order of AsyncLoader::Pool fields. [#66145](https://github.com/ClickHouse/ClickHouse/pull/66145) ([Sergei Trifonov](https://github.com/serxa)).
|
||||||
|
* Playing minesweeper with build system. [#66147](https://github.com/ClickHouse/ClickHouse/pull/66147) ([Nikita Taranov](https://github.com/nickitat)).
|
||||||
|
* Fix clang-tidy error in BufferWithOwnMemory.h. [#66161](https://github.com/ClickHouse/ClickHouse/pull/66161) ([Nikita Taranov](https://github.com/nickitat)).
|
||||||
|
* Use peak_threads_usage instead of arrayUniq(thread_ids) in tests. [#66162](https://github.com/ClickHouse/ClickHouse/pull/66162) ([Azat Khuzhin](https://github.com/azat)).
|
||||||
|
* Fix crash when adding empty tuple to query cache. [#66168](https://github.com/ClickHouse/ClickHouse/pull/66168) ([Michael Kolupaev](https://github.com/al13n321)).
|
||||||
|
* tests: fix 01563_distributed_query_finish flakiness (due to system.*_log_sender). [#66171](https://github.com/ClickHouse/ClickHouse/pull/66171) ([Azat Khuzhin](https://github.com/azat)).
|
||||||
|
* Refactor `OptimizeIfWithConstantConditionVisitor` using `InDepthNodeVisitor`. [#66184](https://github.com/ClickHouse/ClickHouse/pull/66184) ([zhongyuankai](https://github.com/zhongyuankai)).
|
||||||
|
* Update README.md. [#66186](https://github.com/ClickHouse/ClickHouse/pull/66186) ([Tyler Hannan](https://github.com/tylerhannan)).
|
||||||
|
* Fix 01246_buffer_flush flakiness. [#66188](https://github.com/ClickHouse/ClickHouse/pull/66188) ([Azat Khuzhin](https://github.com/azat)).
|
||||||
|
* Avoid using harmful function `rand()` in grpc. [#66191](https://github.com/ClickHouse/ClickHouse/pull/66191) ([Vitaly Baranov](https://github.com/vitlibar)).
|
||||||
|
* Bump RocksDB. [#66216](https://github.com/ClickHouse/ClickHouse/pull/66216) ([Robert Schulze](https://github.com/rschu1ze)).
|
||||||
|
* Update README.md. [#66217](https://github.com/ClickHouse/ClickHouse/pull/66217) ([Tyler Hannan](https://github.com/tylerhannan)).
|
||||||
|
* Fixes peak_threads_usage metric when materialised views are involved. [#66230](https://github.com/ClickHouse/ClickHouse/pull/66230) ([Sema Checherinda](https://github.com/CheSema)).
|
||||||
|
* Remove test as requested in https://github.com/ClickHouse/ClickHouse/pull/65277#issuecomment-2211361465. [#66233](https://github.com/ClickHouse/ClickHouse/pull/66233) ([Arthur Passos](https://github.com/arthurpassos)).
|
||||||
|
* Fix test `00504_mergetree_arrays_rw.sql`. [#66248](https://github.com/ClickHouse/ClickHouse/pull/66248) ([Anton Popov](https://github.com/CurtizJ)).
|
||||||
|
* CI: Do not finalize CI running status unless all success. [#66276](https://github.com/ClickHouse/ClickHouse/pull/66276) ([Max K.](https://github.com/maxknv)).
|
||||||
|
* Collect core dumps in more tests. [#66281](https://github.com/ClickHouse/ClickHouse/pull/66281) ([Antonio Andelic](https://github.com/antonio2368)).
|
||||||
|
* Add a stateless test for gRPC protocol. [#66284](https://github.com/ClickHouse/ClickHouse/pull/66284) ([Vitaly Baranov](https://github.com/vitlibar)).
|
||||||
|
* Log message: Failed to connect to replica ... [#66289](https://github.com/ClickHouse/ClickHouse/pull/66289) ([Igor Nikonov](https://github.com/devcrafter)).
|
||||||
|
* Update run.sh. [#66290](https://github.com/ClickHouse/ClickHouse/pull/66290) ([Kseniia Sumarokova](https://github.com/kssenii)).
|
||||||
|
* Some changes in the codebase as a preparation for LLVM 18. [#66293](https://github.com/ClickHouse/ClickHouse/pull/66293) ([Nikita Mikhaylov](https://github.com/nikitamikhaylov)).
|
||||||
|
* There's some problem with iptables in parallel tests. [#66304](https://github.com/ClickHouse/ClickHouse/pull/66304) ([Alexander Tokmakov](https://github.com/tavplubix)).
|
||||||
|
* Disable broken cases from 02911_join_on_nullsafe_optimization. [#66310](https://github.com/ClickHouse/ClickHouse/pull/66310) ([vdimir](https://github.com/vdimir)).
|
||||||
|
* Build failure if ENABLE_AWS_S3 is OFF fixed. [#66335](https://github.com/ClickHouse/ClickHouse/pull/66335) ([Ilya Golshtein](https://github.com/ilejn)).
|
||||||
|
* Enable checks in assert_cast under sanitizers. [#66336](https://github.com/ClickHouse/ClickHouse/pull/66336) ([Nikita Taranov](https://github.com/nickitat)).
|
||||||
|
* Create release workflow. [#66339](https://github.com/ClickHouse/ClickHouse/pull/66339) ([Max K.](https://github.com/maxknv)).
|
||||||
|
* Fix invalid XML. [#66342](https://github.com/ClickHouse/ClickHouse/pull/66342) ([János Benjamin Antal](https://github.com/antaljanosbenjamin)).
|
||||||
|
* Fix possible deadlock for jemalloc with enabled profiler. [#66346](https://github.com/ClickHouse/ClickHouse/pull/66346) ([Azat Khuzhin](https://github.com/azat)).
|
||||||
|
* Fix test_parallel_replicas_custom_key. [#66349](https://github.com/ClickHouse/ClickHouse/pull/66349) ([Antonio Andelic](https://github.com/antonio2368)).
|
||||||
|
* Collect logs from `minio` in stateless and statefull tests. [#66353](https://github.com/ClickHouse/ClickHouse/pull/66353) ([János Benjamin Antal](https://github.com/antaljanosbenjamin)).
|
||||||
|
* Fix "Sending a batch of X files to Y (0.00 rows, 0.00 B bytes)." in case of batch restoring. [#66375](https://github.com/ClickHouse/ClickHouse/pull/66375) ([Azat Khuzhin](https://github.com/azat)).
|
||||||
|
* Fix 03030_system_flush_distributed_settings flakiness. [#66376](https://github.com/ClickHouse/ClickHouse/pull/66376) ([Azat Khuzhin](https://github.com/azat)).
|
||||||
|
* PR cleanup: remove redundant code. [#66380](https://github.com/ClickHouse/ClickHouse/pull/66380) ([Igor Nikonov](https://github.com/devcrafter)).
|
||||||
|
* New slack bot to post messages about CI events - Post message if OOM. [#66392](https://github.com/ClickHouse/ClickHouse/pull/66392) ([Max K.](https://github.com/maxknv)).
|
||||||
|
* Ignore subquery for IN in DDLLoadingDependencyVisitor. [#66395](https://github.com/ClickHouse/ClickHouse/pull/66395) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
|
||||||
|
* Update test_storage_rabbitmq/test.py. [#66396](https://github.com/ClickHouse/ClickHouse/pull/66396) ([Kseniia Sumarokova](https://github.com/kssenii)).
|
||||||
|
* Add query elapsed time for non-default format in play UI. [#66398](https://github.com/ClickHouse/ClickHouse/pull/66398) ([Azat Khuzhin](https://github.com/azat)).
|
||||||
|
* Untangle setting headers. [#66404](https://github.com/ClickHouse/ClickHouse/pull/66404) ([Raúl Marín](https://github.com/Algunenano)).
|
||||||
|
* Remove noisy message. [#66406](https://github.com/ClickHouse/ClickHouse/pull/66406) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||||
|
* If job detected as in-progress in PR workflow run - just remove it from todo list, as it not affected by the change. [#66407](https://github.com/ClickHouse/ClickHouse/pull/66407) ([Max K.](https://github.com/maxknv)).
|
||||||
|
* CI: CIBuddy to post from master only. [#66417](https://github.com/ClickHouse/ClickHouse/pull/66417) ([Max K.](https://github.com/maxknv)).
|
||||||
|
* Add a test for [#66333](https://github.com/ClickHouse/ClickHouse/issues/66333). [#66432](https://github.com/ClickHouse/ClickHouse/pull/66432) ([max-vostrikov](https://github.com/max-vostrikov)).
|
||||||
|
* Limit number of linker jobs on arm to avoid OOM during build. [#66435](https://github.com/ClickHouse/ClickHouse/pull/66435) ([Nikita Taranov](https://github.com/nickitat)).
|
||||||
|
* [RFC] Fix jemalloc assertion due to non-monotonic CLOCK_MONOTONIC_COARSE. [#66439](https://github.com/ClickHouse/ClickHouse/pull/66439) ([Azat Khuzhin](https://github.com/azat)).
|
||||||
|
* CI: Do not block CI on few number of test failures. [#66440](https://github.com/ClickHouse/ClickHouse/pull/66440) ([Max K.](https://github.com/maxknv)).
|
||||||
|
* Stateless tests: fix flaky tests 01037_polygon_dicts*. [#66445](https://github.com/ClickHouse/ClickHouse/pull/66445) ([Nikita Fomichev](https://github.com/fm4v)).
|
||||||
|
* Related to https://github.com/ClickHouse/ClickHouse/pull/62067 https://s3.amazonaws.com/clickhouse-test-reports/66410/5557dce188cabc7477bb4e874d47e3b80278ee66/stateless_tests__release_.html ``` 2024-07-12 16:04:29 +Queries for alter_table did not finish automatically after 250+ seconds 2024-07-12 16:04:29 +==================== QUERIES ==================== 2024-07-12 16:04:29 +Row 1: 2024-07-12 16:04:29 +────── 2024-07-12 16:04:29 +is_initial_query: 1 2024-07-12 16:04:29 +user: default 2024-07-12 16:04:29 +query_id: b43ffd7d-aee6-4161-aa82-bf9fff9d78c0 2024-07-12 16:04:29 +address: ::1 2024-07-12 16:04:29 +port: 58360 2024-07-12 16:04:29 +initial_user: default 2024-07-12 16:04:29 +initial_query_id: b43ffd7d-aee6-4161-aa82-bf9fff9d78c0 ... 2024-07-12 16:04:29 +query: OPTIMIZE TABLE alter_table0 FINAL ```. [#66460](https://github.com/ClickHouse/ClickHouse/pull/66460) ([Alexander Tokmakov](https://github.com/tavplubix)).
|
||||||
|
* OOM error was not visible since process is killed and status is not set Change sets ERROR status if job was killed. [#66463](https://github.com/ClickHouse/ClickHouse/pull/66463) ([Max K.](https://github.com/maxknv)).
|
||||||
|
* Add AST fuzzers jobs for CI caching so that they can be skipped in PRs not related to build or tests. [#66468](https://github.com/ClickHouse/ClickHouse/pull/66468) ([Max K.](https://github.com/maxknv)).
|
||||||
|
* If job with the same digest has been seen in master's CI it should be skipped in PR run. [#66471](https://github.com/ClickHouse/ClickHouse/pull/66471) ([Max K.](https://github.com/maxknv)).
|
||||||
|
* CI: Check job's exit status and report if killed. [#66477](https://github.com/ClickHouse/ClickHouse/pull/66477) ([Max K.](https://github.com/maxknv)).
|
||||||
|
* This closes [#37557](https://github.com/ClickHouse/ClickHouse/issues/37557). [#66482](https://github.com/ClickHouse/ClickHouse/pull/66482) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||||
|
* CI: Add retry for GH set_status_comment call. [#66488](https://github.com/ClickHouse/ClickHouse/pull/66488) ([Max K.](https://github.com/maxknv)).
|
||||||
|
* OpenSSL: Minor follow-up to [#66064](https://github.com/ClickHouse/ClickHouse/issues/66064). [#66489](https://github.com/ClickHouse/ClickHouse/pull/66489) ([Robert Schulze](https://github.com/rschu1ze)).
|
||||||
|
* CI: Fix for job filtering in PRs. [#66490](https://github.com/ClickHouse/ClickHouse/pull/66490) ([Max K.](https://github.com/maxknv)).
|
||||||
|
* CI: Create release workflow updates. [#66498](https://github.com/ClickHouse/ClickHouse/pull/66498) ([Max K.](https://github.com/maxknv)).
|
||||||
|
* Add one more revision to ignore. [#66499](https://github.com/ClickHouse/ClickHouse/pull/66499) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||||
|
* Unit tests vomit a ton of garbage, see https://s3.amazonaws.com/clickhouse-test-reports/66457/0c82dc91f07b29ba503d7579c7d3ebecba532b73/unit_tests__tsan_/run.log - remove it. [#66501](https://github.com/ClickHouse/ClickHouse/pull/66501) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||||
|
* Fix double whitespace in EXPLAIN AST CREATE. [#66505](https://github.com/ClickHouse/ClickHouse/pull/66505) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||||
|
* Fix bad test `02530_dictionaries_update_field`. [#66507](https://github.com/ClickHouse/ClickHouse/pull/66507) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||||
|
* Https://play.clickhouse.com/play?user=play#u0vmrunuignozwnrx3n0yxj0x3rpbwusignozwnrx25hbwusihrlc3rfbmftzswgcmvwb3j0x3vybapguk9nignozwnrcwpxsevsrsbjagvja19zdgfydf90aw1lid49ig5vdygpic0gsu5urvjwquwgmjqwiehpvvikicagieforcbwdwxsx3jlcxvlc3rfbnvtymvyid0gmaogicagqu5eihrlc3rfc3rhdhvzice9icdts0lquevejwogicagqu5eihrlc3rfc3rhdhvziexjs0ugj0yljwogicagqu5eignozwnrx3n0yxr1cyahpsanc3vjy2vzcyckicagieforcbwb3npdglvbih0zxn0x25hbwusicdhcgfjagvfyxjyb3cnksa+idakt1jervigqlkgy2hly2tfc3rhcnrfdgltzq==. [#66508](https://github.com/ClickHouse/ClickHouse/pull/66508) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||||
|
* Fix MSan report in GRPC. [#66509](https://github.com/ClickHouse/ClickHouse/pull/66509) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||||
|
* CI: Fix for skipping Builds_2 in PRs' CI. [#66512](https://github.com/ClickHouse/ClickHouse/pull/66512) ([Max K.](https://github.com/maxknv)).
|
||||||
|
* CI: Do not block Tests_3 unless MAX_FAILED_TESTS exceeded. [#66513](https://github.com/ClickHouse/ClickHouse/pull/66513) ([Max K.](https://github.com/maxknv)).
|
||||||
|
* Fix `02918_parallel_replicas_custom_key_unavailable_replica`. [#66516](https://github.com/ClickHouse/ClickHouse/pull/66516) ([Antonio Andelic](https://github.com/antonio2368)).
|
||||||
|
* Stateless tests: improvements related to OOM of test runs. [#66520](https://github.com/ClickHouse/ClickHouse/pull/66520) ([Nikita Fomichev](https://github.com/fm4v)).
|
||||||
|
* Tests: rename bad log names. [#66522](https://github.com/ClickHouse/ClickHouse/pull/66522) ([Nikita Fomichev](https://github.com/fm4v)).
|
||||||
|
* Add additional log masking in CI. [#66523](https://github.com/ClickHouse/ClickHouse/pull/66523) ([Raúl Marín](https://github.com/Algunenano)).
|
||||||
|
* CI: Multiple fixes for handling killed jobs. [#66524](https://github.com/ClickHouse/ClickHouse/pull/66524) ([Max K.](https://github.com/maxknv)).
|
||||||
|
* Allow GWP Asan allocations only when initialization is finished. [#66526](https://github.com/ClickHouse/ClickHouse/pull/66526) ([Alexey Katsman](https://github.com/alexkats)).
|
||||||
|
* Update 02443_detach_attach_partition.sh. [#66529](https://github.com/ClickHouse/ClickHouse/pull/66529) ([Alexander Tokmakov](https://github.com/tavplubix)).
|
||||||
|
* Follow up [#66435](https://github.com/ClickHouse/ClickHouse/issues/66435). [#66530](https://github.com/ClickHouse/ClickHouse/pull/66530) ([Nikita Taranov](https://github.com/nickitat)).
|
||||||
|
* fix log in keeper tcp handler. [#66531](https://github.com/ClickHouse/ClickHouse/pull/66531) ([Han Fei](https://github.com/hanfei1991)).
|
||||||
|
* CI: Report job start and finish to CI DB. [#66533](https://github.com/ClickHouse/ClickHouse/pull/66533) ([Max K.](https://github.com/maxknv)).
|
||||||
|
* Update 01396_inactive_replica_cleanup_nodes_zookeeper.sh. [#66535](https://github.com/ClickHouse/ClickHouse/pull/66535) ([Alexander Tokmakov](https://github.com/tavplubix)).
|
||||||
|
* Add dedicated runner to libfuzzer, update docker. [#66551](https://github.com/ClickHouse/ClickHouse/pull/66551) ([Yakov Olkhovskiy](https://github.com/yakov-olkhovskiy)).
|
||||||
|
* fix tidy build. [#66552](https://github.com/ClickHouse/ClickHouse/pull/66552) ([Sema Checherinda](https://github.com/CheSema)).
|
||||||
|
* No-op env change. [#66553](https://github.com/ClickHouse/ClickHouse/pull/66553) ([Raúl Marín](https://github.com/Algunenano)).
|
||||||
|
* Fix typo in new_delete.cpp. [#66554](https://github.com/ClickHouse/ClickHouse/pull/66554) ([alesapin](https://github.com/alesapin)).
|
||||||
|
* Fix something in Fast Test. [#66558](https://github.com/ClickHouse/ClickHouse/pull/66558) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||||
|
* update trusted contributors. [#66561](https://github.com/ClickHouse/ClickHouse/pull/66561) ([Xu Jia](https://github.com/XuJia0210)).
|
||||||
|
* Delete bad test `02805_distributed_queries_timeouts`. [#66563](https://github.com/ClickHouse/ClickHouse/pull/66563) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||||
|
* More clarity in the test `03001_consider_lwd_when_merge`. [#66564](https://github.com/ClickHouse/ClickHouse/pull/66564) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||||
|
* Stateless tests: temporary disable sequential tests in parallel. [#66585](https://github.com/ClickHouse/ClickHouse/pull/66585) ([Nikita Fomichev](https://github.com/fm4v)).
|
||||||
|
* Move view targets to separate AST class `ASTViewTargets` in order to allow extending it to support more kinds of view targets. [#66590](https://github.com/ClickHouse/ClickHouse/pull/66590) ([Vitaly Baranov](https://github.com/vitlibar)).
|
||||||
|
* Fix bsdtar for @nikitamikhaylov. [#66592](https://github.com/ClickHouse/ClickHouse/pull/66592) ([alesapin](https://github.com/alesapin)).
|
||||||
|
* CI: POC for Auto Releases. [#66593](https://github.com/ClickHouse/ClickHouse/pull/66593) ([Max K.](https://github.com/maxknv)).
|
||||||
|
* Fix clang tidy after [#66402](https://github.com/ClickHouse/ClickHouse/issues/66402). [#66597](https://github.com/ClickHouse/ClickHouse/pull/66597) ([vdimir](https://github.com/vdimir)).
|
||||||
|
* Adjust the runtime of some slow performance test. [#66619](https://github.com/ClickHouse/ClickHouse/pull/66619) ([Robert Schulze](https://github.com/rschu1ze)).
|
||||||
|
* CI: Scale down AutoScaling Groups from runners. [#66622](https://github.com/ClickHouse/ClickHouse/pull/66622) ([Max K.](https://github.com/maxknv)).
|
||||||
|
* Allow to run clang-tidy with clang-19. [#66625](https://github.com/ClickHouse/ClickHouse/pull/66625) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||||
|
* Fix terrible test @arthurpassos. [#66632](https://github.com/ClickHouse/ClickHouse/pull/66632) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||||
|
* Fix bad log message. [#66633](https://github.com/ClickHouse/ClickHouse/pull/66633) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||||
|
* Negative sign in prewhere optimization. [#66637](https://github.com/ClickHouse/ClickHouse/pull/66637) ([cangyin](https://github.com/cangyin)).
|
||||||
|
* Closes [#66639](https://github.com/ClickHouse/ClickHouse/issues/66639#event-13533944949). [#66640](https://github.com/ClickHouse/ClickHouse/pull/66640) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||||
|
* Avoid generating named tuple for special keywords (null, true, false). [#66641](https://github.com/ClickHouse/ClickHouse/pull/66641) ([Amos Bird](https://github.com/amosbird)).
|
||||||
|
* rearrange heavy tests 03008_deduplication. [#66642](https://github.com/ClickHouse/ClickHouse/pull/66642) ([Sema Checherinda](https://github.com/CheSema)).
|
||||||
|
* Fix data race in S3::ClientCache. [#66644](https://github.com/ClickHouse/ClickHouse/pull/66644) ([Konstantin Morozov](https://github.com/k-morozov)).
|
||||||
|
* CI: Remove aws lambda packages from oss. [#66651](https://github.com/ClickHouse/ClickHouse/pull/66651) ([Max K.](https://github.com/maxknv)).
|
||||||
|
* Introduce separate DEBUG_OR_SANITIZER_BUILD macro. [#66652](https://github.com/ClickHouse/ClickHouse/pull/66652) ([Nikita Taranov](https://github.com/nickitat)).
|
||||||
|
* Increase backoff because with slow builds sometimes 100ms is not enough to recover. [#66653](https://github.com/ClickHouse/ClickHouse/pull/66653) ([alesapin](https://github.com/alesapin)).
|
||||||
|
* Fix wrong queries hung error because of 02044_url_glob_parallel_connection_refused. [#66657](https://github.com/ClickHouse/ClickHouse/pull/66657) ([Nikita Taranov](https://github.com/nickitat)).
|
||||||
|
* add log for splitBlockIntoParts. [#66658](https://github.com/ClickHouse/ClickHouse/pull/66658) ([Han Fei](https://github.com/hanfei1991)).
|
||||||
|
* Minor: Make `CaseSensitiveness` an enum class. [#66673](https://github.com/ClickHouse/ClickHouse/pull/66673) ([Robert Schulze](https://github.com/rschu1ze)).
|
||||||
|
* Fix bad test `02210_processors_profile_log`. [#66684](https://github.com/ClickHouse/ClickHouse/pull/66684) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||||
|
* Fix something around clang-tidy. [#66694](https://github.com/ClickHouse/ClickHouse/pull/66694) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||||
|
* ci: dump dmesg in case of OOM. [#66705](https://github.com/ClickHouse/ClickHouse/pull/66705) ([Azat Khuzhin](https://github.com/azat)).
|
||||||
|
* fix clang tidy. [#66706](https://github.com/ClickHouse/ClickHouse/pull/66706) ([Han Fei](https://github.com/hanfei1991)).
|
||||||
|
* Https://s3.amazonaws.com/clickhouse-test-reports/61109/5cf2b53f146c1a4f24d8212f9f810d587c46bfc0/stateless_tests__release_.html. [#66724](https://github.com/ClickHouse/ClickHouse/pull/66724) ([Alexander Tokmakov](https://github.com/tavplubix)).
|
||||||
|
* Support one more case in JOIN ON ... IS NULL. [#66725](https://github.com/ClickHouse/ClickHouse/pull/66725) ([vdimir](https://github.com/vdimir)).
|
||||||
|
* CI: Fix issue with a skipped Build report. [#66726](https://github.com/ClickHouse/ClickHouse/pull/66726) ([Max K.](https://github.com/maxknv)).
|
||||||
|
* relax condition in test, remove unused counters. [#66730](https://github.com/ClickHouse/ClickHouse/pull/66730) ([Sema Checherinda](https://github.com/CheSema)).
|
||||||
|
* Remove bad test `host_resolver_fail_count`. [#66731](https://github.com/ClickHouse/ClickHouse/pull/66731) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||||
|
* Fix bad test `03036_join_filter_push_down_equivalent_sets`. [#66736](https://github.com/ClickHouse/ClickHouse/pull/66736) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||||
|
* Fix bad tests `long_select_and_alter`. [#66737](https://github.com/ClickHouse/ClickHouse/pull/66737) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||||
|
* Add test test_storage_mysql/test.py::test_joins. [#66743](https://github.com/ClickHouse/ClickHouse/pull/66743) ([vdimir](https://github.com/vdimir)).
|
||||||
|
* Disallow build exclusion only by CI settings (ci_include_, ci_exclude_) to avoid running builds in auto sync prs. [#66744](https://github.com/ClickHouse/ClickHouse/pull/66744) ([Max K.](https://github.com/maxknv)).
|
||||||
|
* Use non-existent address to check connection error at table creation. [#66760](https://github.com/ClickHouse/ClickHouse/pull/66760) ([János Benjamin Antal](https://github.com/antaljanosbenjamin)).
|
||||||
|
* Backported in [#67063](https://github.com/ClickHouse/ClickHouse/issues/67063): Increase asio pool size in case the server is tiny. [#66761](https://github.com/ClickHouse/ClickHouse/pull/66761) ([alesapin](https://github.com/alesapin)).
|
||||||
|
* Fix flakiness of async insert tests due to adaptive timeout. [#66771](https://github.com/ClickHouse/ClickHouse/pull/66771) ([Raúl Marín](https://github.com/Algunenano)).
|
||||||
|
* Attempt to fix flakiness of 01194_http_query_id. [#66774](https://github.com/ClickHouse/ClickHouse/pull/66774) ([Raúl Marín](https://github.com/Algunenano)).
|
||||||
|
* Turn off randomization of harmful setting. [#66776](https://github.com/ClickHouse/ClickHouse/pull/66776) ([alesapin](https://github.com/alesapin)).
|
||||||
|
* The number of batches was reduced in https://github.com/ClickHouse/ClickHouse/pull/65186, but then the parallel execution was disabled in https://github.com/ClickHouse/ClickHouse/pull/66585. So now tasks fail with timeout sometimes: https://s3.amazonaws.com/clickhouse-test-reports/66724/36275fdacc34206931f69087fe77539e25bbbedd/stateless_tests__tsan__s3_storage__[2_3].html. [#66783](https://github.com/ClickHouse/ClickHouse/pull/66783) ([Alexander Tokmakov](https://github.com/tavplubix)).
|
||||||
|
* Remove obsolete code from CMakeLists. [#66786](https://github.com/ClickHouse/ClickHouse/pull/66786) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||||
|
* Stateless tests: split parallel tests more evenly. [#66787](https://github.com/ClickHouse/ClickHouse/pull/66787) ([Nikita Fomichev](https://github.com/fm4v)).
|
||||||
|
* Fix test `02724_limit_num_mutations.sh`. [#66788](https://github.com/ClickHouse/ClickHouse/pull/66788) ([Anton Popov](https://github.com/CurtizJ)).
|
||||||
|
* Better diagnostics in `test_disk_configuration`. [#66802](https://github.com/ClickHouse/ClickHouse/pull/66802) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||||
|
* Fix bad test `02950_part_log_bytes_uncompressed`. [#66803](https://github.com/ClickHouse/ClickHouse/pull/66803) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||||
|
* Better diagnostics for test trace_events_stress. [#66804](https://github.com/ClickHouse/ClickHouse/pull/66804) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||||
|
* Make test `00997_set_index_array` lighter. [#66817](https://github.com/ClickHouse/ClickHouse/pull/66817) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||||
|
* Miscellaneous. [#66818](https://github.com/ClickHouse/ClickHouse/pull/66818) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||||
|
* Fix inconsistent formatting of lambda functions inside composite types. [#66819](https://github.com/ClickHouse/ClickHouse/pull/66819) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||||
|
* Small fix in realloc memory tracking. [#66820](https://github.com/ClickHouse/ClickHouse/pull/66820) ([Antonio Andelic](https://github.com/antonio2368)).
|
||||||
|
* Never await in CI on BuildReport - just redo (await can be longer) - Remove BuildReport if no build jobs in workflow (for instance: Docs change) - Do not fail CheckReadyForMerge job if the only non-green status is Cloud Sync. [#66822](https://github.com/ClickHouse/ClickHouse/pull/66822) ([Max K.](https://github.com/maxknv)).
|
||||||
|
* Remove bad tests @azat. [#66823](https://github.com/ClickHouse/ClickHouse/pull/66823) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||||
|
* CI: New Release workflow updates and fixes. [#66830](https://github.com/ClickHouse/ClickHouse/pull/66830) ([Max K.](https://github.com/maxknv)).
|
||||||
|
* Fix signed integer overflow in function `age`. [#66831](https://github.com/ClickHouse/ClickHouse/pull/66831) ([Robert Schulze](https://github.com/rschu1ze)).
|
||||||
|
* Fix RocksDB bs. [#66838](https://github.com/ClickHouse/ClickHouse/pull/66838) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||||
|
* Split a test for index. [#66839](https://github.com/ClickHouse/ClickHouse/pull/66839) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||||
|
* Fix inconsistent formatting of `NOT ((SELECT ...))`. [#66840](https://github.com/ClickHouse/ClickHouse/pull/66840) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||||
|
* Make test `01592_long_window_functions1` lighter. [#66841](https://github.com/ClickHouse/ClickHouse/pull/66841) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||||
|
* What if I will change the test for SSL authentication?. [#66844](https://github.com/ClickHouse/ClickHouse/pull/66844) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||||
|
* Functions `[a-g]*`: Iterate over `input_rows_count` where appropriate. [#66846](https://github.com/ClickHouse/ClickHouse/pull/66846) ([Robert Schulze](https://github.com/rschu1ze)).
|
||||||
|
* Revert libunwind patch. [#66850](https://github.com/ClickHouse/ClickHouse/pull/66850) ([Antonio Andelic](https://github.com/antonio2368)).
|
||||||
|
* Split test 03038_nested_dynamic_merges to avoid timeouts. [#66863](https://github.com/ClickHouse/ClickHouse/pull/66863) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||||
|
* CI: Print instance info in runner's init script. [#66868](https://github.com/ClickHouse/ClickHouse/pull/66868) ([Max K.](https://github.com/maxknv)).
|
||||||
|
* Backported in [#67257](https://github.com/ClickHouse/ClickHouse/issues/67257): Followup [#66725](https://github.com/ClickHouse/ClickHouse/issues/66725). [#66869](https://github.com/ClickHouse/ClickHouse/pull/66869) ([vdimir](https://github.com/vdimir)).
|
||||||
|
* CI: CI Buddy to notify about fatal workflow failures. [#66890](https://github.com/ClickHouse/ClickHouse/pull/66890) ([Max K.](https://github.com/maxknv)).
|
||||||
|
* CI: Add ec2 instance lifecycle metadata to CIDB. [#66918](https://github.com/ClickHouse/ClickHouse/pull/66918) ([Max K.](https://github.com/maxknv)).
|
||||||
|
* CI: Remove ci runners scripts from oss. [#66920](https://github.com/ClickHouse/ClickHouse/pull/66920) ([Max K.](https://github.com/maxknv)).
|
||||||
|
* Backported in [#67209](https://github.com/ClickHouse/ClickHouse/issues/67209): Decrease rate limit in `01923_network_receive_time_metric_insert`. [#66924](https://github.com/ClickHouse/ClickHouse/pull/66924) ([János Benjamin Antal](https://github.com/antaljanosbenjamin)).
|
||||||
|
* Backported in [#67227](https://github.com/ClickHouse/ClickHouse/issues/67227): Grouparrayintersect: fix serialization bug. [#66928](https://github.com/ClickHouse/ClickHouse/pull/66928) ([Raúl Marín](https://github.com/Algunenano)).
|
||||||
|
* Backported in [#67207](https://github.com/ClickHouse/ClickHouse/issues/67207): Un-flake test_runtime_configurable_cache_size. [#66934](https://github.com/ClickHouse/ClickHouse/pull/66934) ([Robert Schulze](https://github.com/rschu1ze)).
|
||||||
|
* Backported in [#66975](https://github.com/ClickHouse/ClickHouse/issues/66975): CI: Fixes docker server build for release branches. [#66955](https://github.com/ClickHouse/ClickHouse/pull/66955) ([Max K.](https://github.com/maxknv)).
|
||||||
|
* Backported in [#67213](https://github.com/ClickHouse/ClickHouse/issues/67213): [CI Fest] Split dynamic tests and rewrite them from sh to sql to avoid timeouts. [#66981](https://github.com/ClickHouse/ClickHouse/pull/66981) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||||
|
* Backported in [#67033](https://github.com/ClickHouse/ClickHouse/issues/67033): [CI Fest] Fix use-of-uninitialized-value in JSONExtract* numeric functions. [#66984](https://github.com/ClickHouse/ClickHouse/pull/66984) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||||
|
* Backported in [#67051](https://github.com/ClickHouse/ClickHouse/issues/67051): CI: Fix for workflow results parsing. [#67000](https://github.com/ClickHouse/ClickHouse/pull/67000) ([Max K.](https://github.com/maxknv)).
|
||||||
|
* Backported in [#67116](https://github.com/ClickHouse/ClickHouse/issues/67116): Disable setting `optimize_functions_to_subcolumns`. [#67046](https://github.com/ClickHouse/ClickHouse/pull/67046) ([Anton Popov](https://github.com/CurtizJ)).
|
||||||
|
* Backported in [#67205](https://github.com/ClickHouse/ClickHouse/issues/67205): Increase max allocation size for sanitizers. [#67049](https://github.com/ClickHouse/ClickHouse/pull/67049) ([János Benjamin Antal](https://github.com/antaljanosbenjamin)).
|
||||||
|
* Backported in [#67124](https://github.com/ClickHouse/ClickHouse/issues/67124): Very sad failure: ``` 2024.07.24 13:28:45.517777 [ 10 ] {08745bf9-4bc1-4946-b9a8-c03d82ec55dc} <Debug> executeQuery: (from 172.16.11.1:55890) OPTIMIZE TABLE replicated_mt FINAL (stage: Complete) 2024.07.24 13:28:45.525945 [ 10 ] {08745bf9-4bc1-4946-b9a8-c03d82ec55dc} <Trace> default.replicated_mt (ReplicatedMergeTreeQueue): Waiting for 4 entries to be processed: queue-0000000004, queue-0000000002, queue-0000000001, queue-0000000000 2024.07.24 13:29:15.528024 [ 10 ] {08745bf9-4bc1-4946-b9a8-c03d82ec55dc} <Debug> default.replicated_mt (6581a6fb-8458-466d-8350-89951eb1ac8e) (MergerMutator): Selected 3 parts from all_0_0_0 to all_2_2_0 2024.07.24 13:29:15.530736 [ 10 ] {08745bf9-4bc1-4946-b9a8-c03d82ec55dc} <Trace> default.replicated_mt (6581a6fb-8458-466d-8350-89951eb1ac8e): Created log entry /clickhouse/tables/replicated_mt/log/log-0000000004 for merge all_0_2_1 2024.07.24 13:29:15.530873 [ 10 ] {08745bf9-4bc1-4946-b9a8-c03d82ec55dc} <Debug> default.replicated_mt (6581a6fb-8458-466d-8350-89951eb1ac8e): Waiting for node1 to process log entry 2024.07.24 13:29:15.530919 [ 10 ] {08745bf9-4bc1-4946-b9a8-c03d82ec55dc} <Debug> default.replicated_mt (6581a6fb-8458-466d-8350-89951eb1ac8e): Waiting for node1 to pull log-0000000004 to queue 2024.07.24 13:29:15.534286 [ 10 ] {08745bf9-4bc1-4946-b9a8-c03d82ec55dc} <Debug> default.replicated_mt (6581a6fb-8458-466d-8350-89951eb1ac8e): Looking for node corresponding to log-0000000004 in node1 queue 2024.07.24 13:29:15.534793 [ 10 ] {08745bf9-4bc1-4946-b9a8-c03d82ec55dc} <Debug> default.replicated_mt (6581a6fb-8458-466d-8350-89951eb1ac8e): Waiting for queue-0000000005 to disappear from node1 queue 2024.07.24 13:29:15.585533 [ 10 ] {08745bf9-4bc1-4946-b9a8-c03d82ec55dc} <Debug> TCPHandler: Processed in 30.067804125 sec. ```. [#67067](https://github.com/ClickHouse/ClickHouse/pull/67067) ([alesapin](https://github.com/alesapin)).
|
||||||
|
* Backported in [#67203](https://github.com/ClickHouse/ClickHouse/issues/67203): Fix flaky `test_seekable_formats_url` and `test_seekable_formats` S3 storage tests. [#67070](https://github.com/ClickHouse/ClickHouse/pull/67070) ([János Benjamin Antal](https://github.com/antaljanosbenjamin)).
|
||||||
|
* Backported in [#67222](https://github.com/ClickHouse/ClickHouse/issues/67222): Fix 2680 flasky. [#67078](https://github.com/ClickHouse/ClickHouse/pull/67078) ([jsc0218](https://github.com/jsc0218)).
|
||||||
|
* Backported in [#67190](https://github.com/ClickHouse/ClickHouse/issues/67190): Attempt to fix flakiness of some window view tests. [#67130](https://github.com/ClickHouse/ClickHouse/pull/67130) ([Robert Schulze](https://github.com/rschu1ze)).
|
||||||
|
* Backported in [#67272](https://github.com/ClickHouse/ClickHouse/issues/67272): Rename (unreleased) bad setting. [#67149](https://github.com/ClickHouse/ClickHouse/pull/67149) ([Raúl Marín](https://github.com/Algunenano)).
|
||||||
|
* Backported in [#67441](https://github.com/ClickHouse/ClickHouse/issues/67441): Try to fix 2572. [#67158](https://github.com/ClickHouse/ClickHouse/pull/67158) ([jsc0218](https://github.com/jsc0218)).
|
||||||
|
* Backported in [#67416](https://github.com/ClickHouse/ClickHouse/issues/67416): CI: Fix build results for release branches. [#67402](https://github.com/ClickHouse/ClickHouse/pull/67402) ([Max K.](https://github.com/maxknv)).
|
||||||
|
|
24
docs/changelogs/v24.7.2.13-stable.md
Normal file
24
docs/changelogs/v24.7.2.13-stable.md
Normal file
@ -0,0 +1,24 @@
|
|||||||
|
---
|
||||||
|
sidebar_position: 1
|
||||||
|
sidebar_label: 2024
|
||||||
|
---
|
||||||
|
|
||||||
|
# 2024 Changelog
|
||||||
|
|
||||||
|
### ClickHouse release v24.7.2.13-stable (6e41f601b2f) FIXME as compared to v24.7.1.2915-stable (a37d2d43da7)
|
||||||
|
|
||||||
|
#### Improvement
|
||||||
|
* Backported in [#67531](https://github.com/ClickHouse/ClickHouse/issues/67531): In pr : https://github.com/ClickHouse/ClickHouse/pull/66025, we introduce a settings `input_format_orc_read_use_writer_time_zone` to fix when read orc file, make the reader use writer timezone, not always use `GMT`. [#67175](https://github.com/ClickHouse/ClickHouse/pull/67175) ([kevinyhzou](https://github.com/KevinyhZou)).
|
||||||
|
|
||||||
|
#### Critical Bug Fix (crash, LOGICAL_ERROR, data loss, RBAC)
|
||||||
|
* Backported in [#67505](https://github.com/ClickHouse/ClickHouse/issues/67505): Fix crash in DistributedAsyncInsert when connection is empty. [#67219](https://github.com/ClickHouse/ClickHouse/pull/67219) ([Pablo Marcos](https://github.com/pamarcos)).
|
||||||
|
|
||||||
|
#### Bug Fix (user-visible misbehavior in an official stable release)
|
||||||
|
* Backported in [#67580](https://github.com/ClickHouse/ClickHouse/issues/67580): Fix execution of nested short-circuit functions. [#67520](https://github.com/ClickHouse/ClickHouse/pull/67520) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||||
|
|
||||||
|
#### NOT FOR CHANGELOG / INSIGNIFICANT
|
||||||
|
|
||||||
|
* Backported in [#67551](https://github.com/ClickHouse/ClickHouse/issues/67551): [Green CI] Fix test test_storage_s3_queue/test.py::test_max_set_age. [#67035](https://github.com/ClickHouse/ClickHouse/pull/67035) ([Pablo Marcos](https://github.com/pamarcos)).
|
||||||
|
* Backported in [#67514](https://github.com/ClickHouse/ClickHouse/issues/67514): Split test 02967_parallel_replicas_join_algo_and_analyzer. [#67211](https://github.com/ClickHouse/ClickHouse/pull/67211) ([Nikita Taranov](https://github.com/nickitat)).
|
||||||
|
* Backported in [#67545](https://github.com/ClickHouse/ClickHouse/issues/67545): [Green CI] Fix WriteBuffer destructor when finalize has failed for MergeTreeDeduplicationLog::shutdown. [#67474](https://github.com/ClickHouse/ClickHouse/pull/67474) ([Alexey Katsman](https://github.com/alexkats)).
|
||||||
|
|
@ -54,7 +54,7 @@ CREATE TABLE keeper_map_table
|
|||||||
`v2` String,
|
`v2` String,
|
||||||
`v3` Float32
|
`v3` Float32
|
||||||
)
|
)
|
||||||
ENGINE = KeeperMap(/keeper_map_table, 4)
|
ENGINE = KeeperMap('/keeper_map_table', 4)
|
||||||
PRIMARY KEY key
|
PRIMARY KEY key
|
||||||
```
|
```
|
||||||
|
|
||||||
|
@ -32,6 +32,7 @@ The supported formats are:
|
|||||||
| [Vertical](#vertical) | ✗ | ✔ |
|
| [Vertical](#vertical) | ✗ | ✔ |
|
||||||
| [JSON](#json) | ✔ | ✔ |
|
| [JSON](#json) | ✔ | ✔ |
|
||||||
| [JSONAsString](#jsonasstring) | ✔ | ✗ |
|
| [JSONAsString](#jsonasstring) | ✔ | ✗ |
|
||||||
|
| [JSONAsObject](#jsonasobject) | ✔ | ✗ |
|
||||||
| [JSONStrings](#jsonstrings) | ✔ | ✔ |
|
| [JSONStrings](#jsonstrings) | ✔ | ✔ |
|
||||||
| [JSONColumns](#jsoncolumns) | ✔ | ✔ |
|
| [JSONColumns](#jsoncolumns) | ✔ | ✔ |
|
||||||
| [JSONColumnsWithMetadata](#jsoncolumnsmonoblock) | ✔ | ✔ |
|
| [JSONColumnsWithMetadata](#jsoncolumnsmonoblock) | ✔ | ✔ |
|
||||||
@ -822,6 +823,67 @@ Result:
|
|||||||
└────────────────────────────┘
|
└────────────────────────────┘
|
||||||
```
|
```
|
||||||
|
|
||||||
|
## JSONAsObject {#jsonasobject}
|
||||||
|
|
||||||
|
In this format, a single JSON object is interpreted as a single [Object('json')](/docs/en/sql-reference/data-types/json.md) value. If the input has several JSON objects (comma separated), they are interpreted as separate rows. If the input data is enclosed in square brackets, it is interpreted as an array of JSONs.
|
||||||
|
|
||||||
|
This format can only be parsed for a table with a single field of type [Object('json')](/docs/en/sql-reference/data-types/json.md). The remaining columns must be set to [DEFAULT](/docs/en/sql-reference/statements/create/table.md/#default) or [MATERIALIZED](/docs/en/sql-reference/statements/create/table.md/#materialized).
|
||||||
|
|
||||||
|
**Examples**
|
||||||
|
|
||||||
|
Query:
|
||||||
|
|
||||||
|
``` sql
|
||||||
|
SET allow_experimental_object_type = 1;
|
||||||
|
CREATE TABLE json_as_object (json Object('json')) ENGINE = Memory;
|
||||||
|
INSERT INTO json_as_object (json) FORMAT JSONAsObject {"foo":{"bar":{"x":"y"},"baz":1}},{},{"any json stucture":1}
|
||||||
|
SELECT * FROM json_as_object FORMAT JSONEachRow;
|
||||||
|
```
|
||||||
|
|
||||||
|
Result:
|
||||||
|
|
||||||
|
``` response
|
||||||
|
{"json":{"any json stucture":0,"foo":{"bar":{"x":"y"},"baz":1}}}
|
||||||
|
{"json":{"any json stucture":0,"foo":{"bar":{"x":""},"baz":0}}}
|
||||||
|
{"json":{"any json stucture":1,"foo":{"bar":{"x":""},"baz":0}}}
|
||||||
|
```
|
||||||
|
|
||||||
|
**An array of JSON objects**
|
||||||
|
|
||||||
|
Query:
|
||||||
|
|
||||||
|
``` sql
|
||||||
|
SET allow_experimental_object_type = 1;
|
||||||
|
CREATE TABLE json_square_brackets (field Object('json')) ENGINE = Memory;
|
||||||
|
INSERT INTO json_square_brackets FORMAT JSONAsObject [{"id": 1, "name": "name1"}, {"id": 2, "name": "name2"}];
|
||||||
|
|
||||||
|
SELECT * FROM json_square_brackets FORMAT JSONEachRow;
|
||||||
|
```
|
||||||
|
|
||||||
|
Result:
|
||||||
|
|
||||||
|
```response
|
||||||
|
{"field":{"id":1,"name":"name1"}}
|
||||||
|
{"field":{"id":2,"name":"name2"}}
|
||||||
|
```
|
||||||
|
|
||||||
|
**Columns with default values**
|
||||||
|
|
||||||
|
```sql
|
||||||
|
SET allow_experimental_object_type = 1;
|
||||||
|
CREATE TABLE json_as_object (json Object('json'), time DateTime MATERIALIZED now()) ENGINE = Memory;
|
||||||
|
INSERT INTO json_as_object (json) FORMAT JSONAsObject {"foo":{"bar":{"x":"y"},"baz":1}};
|
||||||
|
INSERT INTO json_as_object (json) FORMAT JSONAsObject {};
|
||||||
|
INSERT INTO json_as_object (json) FORMAT JSONAsObject {"any json stucture":1}
|
||||||
|
SELECT * FROM json_as_object FORMAT JSONEachRow
|
||||||
|
```
|
||||||
|
|
||||||
|
```resonse
|
||||||
|
{"json":{"any json stucture":0,"foo":{"bar":{"x":"y"},"baz":1}},"time":"2024-07-25 17:02:45"}
|
||||||
|
{"json":{"any json stucture":0,"foo":{"bar":{"x":""},"baz":0}},"time":"2024-07-25 17:02:47"}
|
||||||
|
{"json":{"any json stucture":1,"foo":{"bar":{"x":""},"baz":0}},"time":"2024-07-25 17:02:50"}
|
||||||
|
```
|
||||||
|
|
||||||
## JSONCompact {#jsoncompact}
|
## JSONCompact {#jsoncompact}
|
||||||
|
|
||||||
Differs from JSON only in that data rows are output in arrays, not in objects.
|
Differs from JSON only in that data rows are output in arrays, not in objects.
|
||||||
|
@ -103,8 +103,6 @@ Default: 2
|
|||||||
|
|
||||||
The policy on how to perform a scheduling for background merges and mutations. Possible values are: `round_robin` and `shortest_task_first`.
|
The policy on how to perform a scheduling for background merges and mutations. Possible values are: `round_robin` and `shortest_task_first`.
|
||||||
|
|
||||||
## background_merges_mutations_scheduling_policy
|
|
||||||
|
|
||||||
Algorithm used to select next merge or mutation to be executed by background thread pool. Policy may be changed at runtime without server restart.
|
Algorithm used to select next merge or mutation to be executed by background thread pool. Policy may be changed at runtime without server restart.
|
||||||
Could be applied from the `default` profile for backward compatibility.
|
Could be applied from the `default` profile for backward compatibility.
|
||||||
|
|
||||||
|
@ -119,11 +119,6 @@ Minimum size of blocks of uncompressed data required for compression when writin
|
|||||||
You can also specify this setting in the global settings (see [min_compress_block_size](/docs/en/operations/settings/settings.md/#min-compress-block-size) setting).
|
You can also specify this setting in the global settings (see [min_compress_block_size](/docs/en/operations/settings/settings.md/#min-compress-block-size) setting).
|
||||||
The value specified when table is created overrides the global value for this setting.
|
The value specified when table is created overrides the global value for this setting.
|
||||||
|
|
||||||
## max_partitions_to_read
|
|
||||||
|
|
||||||
Limits the maximum number of partitions that can be accessed in one query.
|
|
||||||
You can also specify setting [max_partitions_to_read](/docs/en/operations/settings/merge-tree-settings.md/#max-partitions-to-read) in the global setting.
|
|
||||||
|
|
||||||
## max_suspicious_broken_parts
|
## max_suspicious_broken_parts
|
||||||
|
|
||||||
If the number of broken parts in a single partition exceeds the `max_suspicious_broken_parts` value, automatic deletion is denied.
|
If the number of broken parts in a single partition exceeds the `max_suspicious_broken_parts` value, automatic deletion is denied.
|
||||||
@ -691,6 +686,8 @@ Possible values:
|
|||||||
|
|
||||||
Default value: -1 (unlimited).
|
Default value: -1 (unlimited).
|
||||||
|
|
||||||
|
You can also specify a query complexity setting [max_partitions_to_read](query-complexity#max-partitions-to-read) at a query / session / profile level.
|
||||||
|
|
||||||
## min_age_to_force_merge_seconds {#min_age_to_force_merge_seconds}
|
## min_age_to_force_merge_seconds {#min_age_to_force_merge_seconds}
|
||||||
|
|
||||||
Merge parts if every part in the range is older than the value of `min_age_to_force_merge_seconds`.
|
Merge parts if every part in the range is older than the value of `min_age_to_force_merge_seconds`.
|
||||||
|
@ -188,7 +188,7 @@ If you set `timeout_before_checking_execution_speed `to 0, ClickHouse will use c
|
|||||||
|
|
||||||
What to do if the query is run longer than `max_execution_time` or the estimated running time is longer than `max_estimated_execution_time`: `throw` or `break`. By default, `throw`.
|
What to do if the query is run longer than `max_execution_time` or the estimated running time is longer than `max_estimated_execution_time`: `throw` or `break`. By default, `throw`.
|
||||||
|
|
||||||
# max_execution_time_leaf
|
## max_execution_time_leaf
|
||||||
|
|
||||||
Similar semantic to `max_execution_time` but only apply on leaf node for distributed or remote queries.
|
Similar semantic to `max_execution_time` but only apply on leaf node for distributed or remote queries.
|
||||||
|
|
||||||
@ -204,7 +204,7 @@ We can use `max_execution_time_leaf` as the query settings:
|
|||||||
SELECT count() FROM cluster(cluster, view(SELECT * FROM t)) SETTINGS max_execution_time_leaf = 10;
|
SELECT count() FROM cluster(cluster, view(SELECT * FROM t)) SETTINGS max_execution_time_leaf = 10;
|
||||||
```
|
```
|
||||||
|
|
||||||
# timeout_overflow_mode_leaf
|
## timeout_overflow_mode_leaf
|
||||||
|
|
||||||
What to do when the query in leaf node run longer than `max_execution_time_leaf`: `throw` or `break`. By default, `throw`.
|
What to do when the query in leaf node run longer than `max_execution_time_leaf`: `throw` or `break`. By default, `throw`.
|
||||||
|
|
||||||
@ -426,3 +426,17 @@ Example:
|
|||||||
```
|
```
|
||||||
|
|
||||||
Default value: 0 (Infinite count of simultaneous sessions).
|
Default value: 0 (Infinite count of simultaneous sessions).
|
||||||
|
|
||||||
|
## max_partitions_to_read {#max-partitions-to-read}
|
||||||
|
|
||||||
|
Limits the maximum number of partitions that can be accessed in one query.
|
||||||
|
|
||||||
|
The setting value specified when the table is created can be overridden via query-level setting.
|
||||||
|
|
||||||
|
Possible values:
|
||||||
|
|
||||||
|
- Any positive integer.
|
||||||
|
|
||||||
|
Default value: -1 (unlimited).
|
||||||
|
|
||||||
|
You can also specify a MergeTree setting [max_partitions_to_read](merge-tree-settings#max-partitions-to-read) in tables' setting.
|
||||||
|
@ -5608,3 +5608,9 @@ Default value: `10000000`.
|
|||||||
Minimal size of block to compress in CROSS JOIN. Zero value means - disable this threshold. This block is compressed when any of the two thresholds (by rows or by bytes) are reached.
|
Minimal size of block to compress in CROSS JOIN. Zero value means - disable this threshold. This block is compressed when any of the two thresholds (by rows or by bytes) are reached.
|
||||||
|
|
||||||
Default value: `1GiB`.
|
Default value: `1GiB`.
|
||||||
|
|
||||||
|
## disable_insertion_and_mutation
|
||||||
|
|
||||||
|
Disable all insert and mutations (alter table update / alter table delete / alter table drop partition). Set to true, can make this node focus on reading queries.
|
||||||
|
|
||||||
|
Default value: `false`.
|
||||||
|
@ -0,0 +1,90 @@
|
|||||||
|
---
|
||||||
|
slug: /en/sql-reference/aggregate-functions/reference/groupconcat
|
||||||
|
sidebar_position: 363
|
||||||
|
sidebar_label: groupConcat
|
||||||
|
title: groupConcat
|
||||||
|
---
|
||||||
|
|
||||||
|
Calculates a concatenated string from a group of strings, optionally separated by a delimiter, and optionally limited by a maximum number of elements.
|
||||||
|
|
||||||
|
**Syntax**
|
||||||
|
|
||||||
|
``` sql
|
||||||
|
groupConcat(expression [, delimiter] [, limit]);
|
||||||
|
```
|
||||||
|
|
||||||
|
**Arguments**
|
||||||
|
|
||||||
|
- `expression` — The expression or column name that outputs strings to be concatenated..
|
||||||
|
- `delimiter` — A [string](../../../sql-reference/data-types/string.md) that will be used to separate concatenated values. This parameter is optional and defaults to an empty string if not specified.
|
||||||
|
- `limit` — A positive [integer](../../../sql-reference/data-types/int-uint.md) specifying the maximum number of elements to concatenate. If more elements are present, excess elements are ignored. This parameter is optional.
|
||||||
|
|
||||||
|
:::note
|
||||||
|
If delimiter is specified without limit, it must be the first parameter following the expression. If both delimiter and limit are specified, delimiter must precede limit.
|
||||||
|
:::
|
||||||
|
|
||||||
|
**Returned value**
|
||||||
|
|
||||||
|
- Returns a [string](../../../sql-reference/data-types/string.md) consisting of the concatenated values of the column or expression. If the group has no elements or only null elements, and the function does not specify a handling for only null values, the result is a nullable string with a null value.
|
||||||
|
|
||||||
|
**Examples**
|
||||||
|
|
||||||
|
Input table:
|
||||||
|
|
||||||
|
``` text
|
||||||
|
┌─id─┬─name─┐
|
||||||
|
│ 1 │ John│
|
||||||
|
│ 2 │ Jane│
|
||||||
|
│ 3 │ Bob│
|
||||||
|
└────┴──────┘
|
||||||
|
```
|
||||||
|
|
||||||
|
1. Basic usage without a delimiter:
|
||||||
|
|
||||||
|
Query:
|
||||||
|
|
||||||
|
``` sql
|
||||||
|
SELECT groupConcat(Name) FROM Employees;
|
||||||
|
```
|
||||||
|
|
||||||
|
Result:
|
||||||
|
|
||||||
|
``` text
|
||||||
|
JohnJaneBob
|
||||||
|
```
|
||||||
|
|
||||||
|
This concatenates all names into one continuous string without any separator.
|
||||||
|
|
||||||
|
|
||||||
|
2. Using comma as a delimiter:
|
||||||
|
|
||||||
|
Query:
|
||||||
|
|
||||||
|
``` sql
|
||||||
|
SELECT groupConcat(Name, ', ', 2) FROM Employees;
|
||||||
|
```
|
||||||
|
|
||||||
|
Result:
|
||||||
|
|
||||||
|
``` text
|
||||||
|
John, Jane, Bob
|
||||||
|
```
|
||||||
|
|
||||||
|
This output shows the names separated by a comma followed by a space.
|
||||||
|
|
||||||
|
|
||||||
|
3. Limiting the number of concatenated elements
|
||||||
|
|
||||||
|
Query:
|
||||||
|
|
||||||
|
``` sql
|
||||||
|
SELECT groupConcat(Name, ', ', 2) FROM Employees;
|
||||||
|
```
|
||||||
|
|
||||||
|
Result:
|
||||||
|
|
||||||
|
``` text
|
||||||
|
John, Jane
|
||||||
|
```
|
||||||
|
|
||||||
|
This query limits the output to the first two names, even though there are more names in the table.
|
@ -346,7 +346,9 @@ Result:
|
|||||||
## materialize
|
## materialize
|
||||||
|
|
||||||
Turns a constant into a full column containing a single value.
|
Turns a constant into a full column containing a single value.
|
||||||
Full columns and constants are represented differently in memory. Functions usually execute different code for normal and constant arguments, although the result should typically be the same. This function can be used to debug this behavior.
|
Full columns and constants are represented differently in memory.
|
||||||
|
Functions usually execute different code for normal and constant arguments, although the result should typically be the same.
|
||||||
|
This function can be used to debug this behavior.
|
||||||
|
|
||||||
**Syntax**
|
**Syntax**
|
||||||
|
|
||||||
@ -354,6 +356,34 @@ Full columns and constants are represented differently in memory. Functions usua
|
|||||||
materialize(x)
|
materialize(x)
|
||||||
```
|
```
|
||||||
|
|
||||||
|
**Parameters**
|
||||||
|
|
||||||
|
- `x` — A constant. [Constant](../functions/index.md/#constants).
|
||||||
|
|
||||||
|
**Returned value**
|
||||||
|
|
||||||
|
- A column containing a single value `x`.
|
||||||
|
|
||||||
|
**Example**
|
||||||
|
|
||||||
|
In the example below the `countMatches` function expects a constant second argument.
|
||||||
|
This behaviour can be debugged by using the `materialize` function to turn a constant into a full column,
|
||||||
|
verifying that the function throws an error for a non-constant argument.
|
||||||
|
|
||||||
|
Query:
|
||||||
|
|
||||||
|
```sql
|
||||||
|
SELECT countMatches('foobarfoo', 'foo');
|
||||||
|
SELECT countMatches('foobarfoo', materialize('foo'));
|
||||||
|
```
|
||||||
|
|
||||||
|
Result:
|
||||||
|
|
||||||
|
```response
|
||||||
|
2
|
||||||
|
Code: 44. DB::Exception: Received from localhost:9000. DB::Exception: Illegal type of argument #2 'pattern' of function countMatches, expected constant String, got String
|
||||||
|
```
|
||||||
|
|
||||||
## ignore
|
## ignore
|
||||||
|
|
||||||
Accepts any arguments, including `NULL` and does nothing. Always returns 0.
|
Accepts any arguments, including `NULL` and does nothing. Always returns 0.
|
||||||
@ -2102,14 +2132,14 @@ Result:
|
|||||||
└─────────────────┘
|
└─────────────────┘
|
||||||
```
|
```
|
||||||
|
|
||||||
## filesystemFree
|
## filesystemUnreserved
|
||||||
|
|
||||||
Returns the total amount of the free space on the filesystem hosting the database persistence. See also `filesystemAvailable`
|
Returns the total amount of the free space on the filesystem hosting the database persistence. (previously `filesystemFree`). See also [`filesystemAvailable`](#filesystemavailable).
|
||||||
|
|
||||||
**Syntax**
|
**Syntax**
|
||||||
|
|
||||||
```sql
|
```sql
|
||||||
filesystemFree()
|
filesystemUnreserved()
|
||||||
```
|
```
|
||||||
|
|
||||||
**Returned value**
|
**Returned value**
|
||||||
@ -2121,7 +2151,7 @@ filesystemFree()
|
|||||||
Query:
|
Query:
|
||||||
|
|
||||||
```sql
|
```sql
|
||||||
SELECT formatReadableSize(filesystemFree()) AS "Free space";
|
SELECT formatReadableSize(filesystemUnreserved()) AS "Free space";
|
||||||
```
|
```
|
||||||
|
|
||||||
Result:
|
Result:
|
||||||
@ -2449,11 +2479,11 @@ As you can see, `runningAccumulate` merges states for each group of rows separat
|
|||||||
|
|
||||||
## joinGet
|
## joinGet
|
||||||
|
|
||||||
The function lets you extract data from the table the same way as from a [dictionary](../../sql-reference/dictionaries/index.md).
|
The function lets you extract data from the table the same way as from a [dictionary](../../sql-reference/dictionaries/index.md). Gets the data from [Join](../../engines/table-engines/special/join.md#creating-a-table) tables using the specified join key.
|
||||||
|
|
||||||
Gets the data from [Join](../../engines/table-engines/special/join.md#creating-a-table) tables using the specified join key.
|
|
||||||
|
|
||||||
|
:::note
|
||||||
Only supports tables created with the `ENGINE = Join(ANY, LEFT, <join_keys>)` statement.
|
Only supports tables created with the `ENGINE = Join(ANY, LEFT, <join_keys>)` statement.
|
||||||
|
:::
|
||||||
|
|
||||||
**Syntax**
|
**Syntax**
|
||||||
|
|
||||||
@ -2463,26 +2493,32 @@ joinGet(join_storage_table_name, `value_column`, join_keys)
|
|||||||
|
|
||||||
**Arguments**
|
**Arguments**
|
||||||
|
|
||||||
- `join_storage_table_name` — an [identifier](../../sql-reference/syntax.md#syntax-identifiers) indicating where the search is performed. The identifier is searched in the default database (see setting `default_database` in the config file). To override the default database, use `USE db_name` or specify the database and the table through the separator `db_name.db_table` as in the example.
|
- `join_storage_table_name` — an [identifier](../../sql-reference/syntax.md#syntax-identifiers) indicating where the search is performed.
|
||||||
- `value_column` — name of the column of the table that contains required data.
|
- `value_column` — name of the column of the table that contains required data.
|
||||||
- `join_keys` — list of keys.
|
- `join_keys` — list of keys.
|
||||||
|
|
||||||
|
:::note
|
||||||
|
The identifier is searched for in the default database (see setting `default_database` in the config file). To override the default database, use `USE db_name` or specify the database and the table through the separator `db_name.db_table` as in the example.
|
||||||
|
:::
|
||||||
|
|
||||||
**Returned value**
|
**Returned value**
|
||||||
|
|
||||||
Returns a list of values corresponded to list of keys.
|
- Returns a list of values corresponded to the list of keys.
|
||||||
|
|
||||||
If certain does not exist in source table then `0` or `null` will be returned based on [join_use_nulls](../../operations/settings/settings.md#join_use_nulls) setting.
|
|
||||||
|
|
||||||
|
:::note
|
||||||
|
If a certain key does not exist in source table then `0` or `null` will be returned based on [join_use_nulls](../../operations/settings/settings.md#join_use_nulls) setting during table creation.
|
||||||
More info about `join_use_nulls` in [Join operation](../../engines/table-engines/special/join.md).
|
More info about `join_use_nulls` in [Join operation](../../engines/table-engines/special/join.md).
|
||||||
|
:::
|
||||||
|
|
||||||
**Example**
|
**Example**
|
||||||
|
|
||||||
Input table:
|
Input table:
|
||||||
|
|
||||||
```sql
|
```sql
|
||||||
CREATE DATABASE db_test
|
CREATE DATABASE db_test;
|
||||||
CREATE TABLE db_test.id_val(`id` UInt32, `val` UInt32) ENGINE = Join(ANY, LEFT, id) SETTINGS join_use_nulls = 1
|
CREATE TABLE db_test.id_val(`id` UInt32, `val` UInt32) ENGINE = Join(ANY, LEFT, id);
|
||||||
INSERT INTO db_test.id_val VALUES (1,11)(2,12)(4,13)
|
INSERT INTO db_test.id_val VALUES (1, 11)(2, 12)(4, 13);
|
||||||
|
SELECT * FROM db_test.id_val;
|
||||||
```
|
```
|
||||||
|
|
||||||
```text
|
```text
|
||||||
@ -2496,18 +2532,116 @@ INSERT INTO db_test.id_val VALUES (1,11)(2,12)(4,13)
|
|||||||
Query:
|
Query:
|
||||||
|
|
||||||
```sql
|
```sql
|
||||||
SELECT joinGet(db_test.id_val, 'val', toUInt32(number)) from numbers(4) SETTINGS join_use_nulls = 1
|
SELECT number, joinGet(db_test.id_val, 'val', toUInt32(number)) from numbers(4);
|
||||||
```
|
```
|
||||||
|
|
||||||
Result:
|
Result:
|
||||||
|
|
||||||
```text
|
```text
|
||||||
┌─joinGet(db_test.id_val, 'val', toUInt32(number))─┐
|
┌─number─┬─joinGet('db_test.id_val', 'val', toUInt32(number))─┐
|
||||||
│ 0 │
|
1. │ 0 │ 0 │
|
||||||
│ 11 │
|
2. │ 1 │ 11 │
|
||||||
│ 12 │
|
3. │ 2 │ 12 │
|
||||||
│ 0 │
|
4. │ 3 │ 0 │
|
||||||
└──────────────────────────────────────────────────┘
|
└────────┴────────────────────────────────────────────────────┘
|
||||||
|
```
|
||||||
|
|
||||||
|
Setting `join_use_nulls` can be used during table creation to change the behaviour of what gets returned if no key exists in the source table.
|
||||||
|
|
||||||
|
```sql
|
||||||
|
CREATE DATABASE db_test;
|
||||||
|
CREATE TABLE db_test.id_val_nulls(`id` UInt32, `val` UInt32) ENGINE = Join(ANY, LEFT, id) SETTINGS join_use_nulls=1;
|
||||||
|
INSERT INTO db_test.id_val_nulls VALUES (1, 11)(2, 12)(4, 13);
|
||||||
|
SELECT * FROM db_test.id_val_nulls;
|
||||||
|
```
|
||||||
|
|
||||||
|
```text
|
||||||
|
┌─id─┬─val─┐
|
||||||
|
│ 4 │ 13 │
|
||||||
|
│ 2 │ 12 │
|
||||||
|
│ 1 │ 11 │
|
||||||
|
└────┴─────┘
|
||||||
|
```
|
||||||
|
|
||||||
|
Query:
|
||||||
|
|
||||||
|
```sql
|
||||||
|
SELECT number, joinGet(db_test.id_val_nulls, 'val', toUInt32(number)) from numbers(4);
|
||||||
|
```
|
||||||
|
|
||||||
|
Result:
|
||||||
|
|
||||||
|
```text
|
||||||
|
┌─number─┬─joinGet('db_test.id_val_nulls', 'val', toUInt32(number))─┐
|
||||||
|
1. │ 0 │ ᴺᵁᴸᴸ │
|
||||||
|
2. │ 1 │ 11 │
|
||||||
|
3. │ 2 │ 12 │
|
||||||
|
4. │ 3 │ ᴺᵁᴸᴸ │
|
||||||
|
└────────┴──────────────────────────────────────────────────────────┘
|
||||||
|
```
|
||||||
|
|
||||||
|
## joinGetOrNull
|
||||||
|
|
||||||
|
Like [joinGet](#joinget) but returns `NULL` when the key is missing instead of returning the default value.
|
||||||
|
|
||||||
|
**Syntax**
|
||||||
|
|
||||||
|
```sql
|
||||||
|
joinGetOrNull(join_storage_table_name, `value_column`, join_keys)
|
||||||
|
```
|
||||||
|
|
||||||
|
**Arguments**
|
||||||
|
|
||||||
|
- `join_storage_table_name` — an [identifier](../../sql-reference/syntax.md#syntax-identifiers) indicating where the search is performed.
|
||||||
|
- `value_column` — name of the column of the table that contains required data.
|
||||||
|
- `join_keys` — list of keys.
|
||||||
|
|
||||||
|
:::note
|
||||||
|
The identifier is searched for in the default database (see setting `default_database` in the config file). To override the default database, use `USE db_name` or specify the database and the table through the separator `db_name.db_table` as in the example.
|
||||||
|
:::
|
||||||
|
|
||||||
|
**Returned value**
|
||||||
|
|
||||||
|
- Returns a list of values corresponded to the list of keys.
|
||||||
|
|
||||||
|
:::note
|
||||||
|
If a certain key does not exist in source table then `NULL` is returned for that key.
|
||||||
|
:::
|
||||||
|
|
||||||
|
**Example**
|
||||||
|
|
||||||
|
Input table:
|
||||||
|
|
||||||
|
```sql
|
||||||
|
CREATE DATABASE db_test;
|
||||||
|
CREATE TABLE db_test.id_val(`id` UInt32, `val` UInt32) ENGINE = Join(ANY, LEFT, id);
|
||||||
|
INSERT INTO db_test.id_val VALUES (1, 11)(2, 12)(4, 13);
|
||||||
|
SELECT * FROM db_test.id_val;
|
||||||
|
```
|
||||||
|
|
||||||
|
```text
|
||||||
|
┌─id─┬─val─┐
|
||||||
|
│ 4 │ 13 │
|
||||||
|
│ 2 │ 12 │
|
||||||
|
│ 1 │ 11 │
|
||||||
|
└────┴─────┘
|
||||||
|
```
|
||||||
|
|
||||||
|
Query:
|
||||||
|
|
||||||
|
```sql
|
||||||
|
SELECT number, joinGetOrNull(db_test.id_val, 'val', toUInt32(number)) from numbers(4);
|
||||||
|
```
|
||||||
|
|
||||||
|
Result:
|
||||||
|
|
||||||
|
```text
|
||||||
|
┌─number─┬─joinGetOrNull('db_test.id_val', 'val', toUInt32(number))─┐
|
||||||
|
1. │ 0 │ ᴺᵁᴸᴸ │
|
||||||
|
2. │ 1 │ 11 │
|
||||||
|
3. │ 2 │ 12 │
|
||||||
|
4. │ 3 │ ᴺᵁᴸᴸ │
|
||||||
|
└────────┴──────────────────────────────────────────────────────────┘
|
||||||
```
|
```
|
||||||
|
|
||||||
## catboostEvaluate
|
## catboostEvaluate
|
||||||
|
@ -223,3 +223,28 @@ SELECT translateUTF8('Münchener Straße', 'üß', 'us') AS res;
|
|||||||
│ Munchener Strase │
|
│ Munchener Strase │
|
||||||
└──────────────────┘
|
└──────────────────┘
|
||||||
```
|
```
|
||||||
|
|
||||||
|
## printf
|
||||||
|
|
||||||
|
The `printf` function formats the given string with the values (strings, integers, floating-points etc.) listed in the arguments, similar to printf function in C++. The format string can contain format specifiers starting with `%` character. Anything not contained in `%` and the following format specifier is considered literal text and copied verbatim into the output. Literal `%` character can be escaped by `%%`.
|
||||||
|
|
||||||
|
**Syntax**
|
||||||
|
|
||||||
|
``` sql
|
||||||
|
printf(format, arg1, arg2, ...)
|
||||||
|
```
|
||||||
|
|
||||||
|
**Example**
|
||||||
|
|
||||||
|
Query:
|
||||||
|
|
||||||
|
``` sql
|
||||||
|
select printf('%%%s %s %d', 'Hello', 'World', 2024);
|
||||||
|
```
|
||||||
|
|
||||||
|
|
||||||
|
``` response
|
||||||
|
┌─printf('%%%s %s %d', 'Hello', 'World', 2024)─┐
|
||||||
|
│ %Hello World 2024 │
|
||||||
|
└──────────────────────────────────────────────┘
|
||||||
|
```
|
||||||
|
@ -150,15 +150,15 @@ A case insensitive invariant of [position](#position).
|
|||||||
Query:
|
Query:
|
||||||
|
|
||||||
``` sql
|
``` sql
|
||||||
SELECT position('Hello, world!', 'hello');
|
SELECT positionCaseInsensitive('Hello, world!', 'hello');
|
||||||
```
|
```
|
||||||
|
|
||||||
Result:
|
Result:
|
||||||
|
|
||||||
``` text
|
``` text
|
||||||
┌─position('Hello, world!', 'hello')─┐
|
┌─positionCaseInsensitive('Hello, world!', 'hello')─┐
|
||||||
│ 0 │
|
│ 1 │
|
||||||
└────────────────────────────────────┘
|
└───────────────────────────────────────────────────┘
|
||||||
```
|
```
|
||||||
|
|
||||||
## positionUTF8
|
## positionUTF8
|
||||||
|
@ -43,7 +43,7 @@ Result:
|
|||||||
|
|
||||||
## mapFromArrays
|
## mapFromArrays
|
||||||
|
|
||||||
Creates a map from an array of keys and an array of values.
|
Creates a map from an array or map of keys and an array or map of values.
|
||||||
|
|
||||||
The function is a convenient alternative to syntax `CAST([...], 'Map(key_type, value_type)')`.
|
The function is a convenient alternative to syntax `CAST([...], 'Map(key_type, value_type)')`.
|
||||||
For example, instead of writing
|
For example, instead of writing
|
||||||
@ -62,8 +62,8 @@ Alias: `MAP_FROM_ARRAYS(keys, values)`
|
|||||||
|
|
||||||
**Arguments**
|
**Arguments**
|
||||||
|
|
||||||
- `keys` — Array of keys to create the map from. [Array(T)](../data-types/array.md) where `T` can be any type supported by [Map](../data-types/map.md) as key type.
|
- `keys` — Array or map of keys to create the map from [Array](../data-types/array.md) or [Map](../data-types/map.md). If `keys` is an array, we accept `Array(Nullable(T))` or `Array(LowCardinality(Nullable(T)))` as its type as long as it doesn't contain NULL value.
|
||||||
- `values` - Array or map of values to create the map from. [Array](../data-types/array.md) or [Map](../data-types/map.md).
|
- `values` - Array or map of values to create the map from [Array](../data-types/array.md) or [Map](../data-types/map.md).
|
||||||
|
|
||||||
**Returned value**
|
**Returned value**
|
||||||
|
|
||||||
@ -99,6 +99,18 @@ Result:
|
|||||||
└───────────────────────────────────────────────────────┘
|
└───────────────────────────────────────────────────────┘
|
||||||
```
|
```
|
||||||
|
|
||||||
|
```sql
|
||||||
|
SELECT mapFromArrays(map('a', 1, 'b', 2, 'c', 3), [1, 2, 3])
|
||||||
|
```
|
||||||
|
|
||||||
|
Result:
|
||||||
|
|
||||||
|
```
|
||||||
|
┌─mapFromArrays(map('a', 1, 'b', 2, 'c', 3), [1, 2, 3])─┐
|
||||||
|
│ {('a',1):1,('b',2):2,('c',3):3} │
|
||||||
|
└───────────────────────────────────────────────────────┘
|
||||||
|
```
|
||||||
|
|
||||||
## extractKeyValuePairs
|
## extractKeyValuePairs
|
||||||
|
|
||||||
Converts a string of key-value pairs to a [Map(String, String)](../data-types/map.md).
|
Converts a string of key-value pairs to a [Map(String, String)](../data-types/map.md).
|
||||||
|
File diff suppressed because it is too large
Load Diff
@ -36,7 +36,7 @@ These actions are described in detail below.
|
|||||||
ADD COLUMN [IF NOT EXISTS] name [type] [default_expr] [codec] [AFTER name_after | FIRST]
|
ADD COLUMN [IF NOT EXISTS] name [type] [default_expr] [codec] [AFTER name_after | FIRST]
|
||||||
```
|
```
|
||||||
|
|
||||||
Adds a new column to the table with the specified `name`, `type`, [`codec`](../create/table.md/#codecs) and `default_expr` (see the section [Default expressions](/docs/en/sql-reference/statements/create/table.md/#create-default-values)).
|
Adds a new column to the table with the specified `name`, `type`, [`codec`](../create/table.md/#column_compression_codec) and `default_expr` (see the section [Default expressions](/docs/en/sql-reference/statements/create/table.md/#create-default-values)).
|
||||||
|
|
||||||
If the `IF NOT EXISTS` clause is included, the query won’t return an error if the column already exists. If you specify `AFTER name_after` (the name of another column), the column is added after the specified one in the list of table columns. If you want to add a column to the beginning of the table use the `FIRST` clause. Otherwise, the column is added to the end of the table. For a chain of actions, `name_after` can be the name of a column that is added in one of the previous actions.
|
If the `IF NOT EXISTS` clause is included, the query won’t return an error if the column already exists. If you specify `AFTER name_after` (the name of another column), the column is added after the specified one in the list of table columns. If you want to add a column to the beginning of the table use the `FIRST` clause. Otherwise, the column is added to the end of the table. For a chain of actions, `name_after` can be the name of a column that is added in one of the previous actions.
|
||||||
|
|
||||||
@ -155,7 +155,7 @@ This query changes the `name` column properties:
|
|||||||
|
|
||||||
- Column-level Settings
|
- Column-level Settings
|
||||||
|
|
||||||
For examples of columns compression CODECS modifying, see [Column Compression Codecs](../create/table.md/#codecs).
|
For examples of columns compression CODECS modifying, see [Column Compression Codecs](../create/table.md/#column_compression_codec).
|
||||||
|
|
||||||
For examples of columns TTL modifying, see [Column TTL](/docs/en/engines/table-engines/mergetree-family/mergetree.md/#mergetree-column-ttl).
|
For examples of columns TTL modifying, see [Column TTL](/docs/en/engines/table-engines/mergetree-family/mergetree.md/#mergetree-column-ttl).
|
||||||
|
|
||||||
|
@ -21,7 +21,7 @@ CREATE TABLE [IF NOT EXISTS] [db.]table_name [ON CLUSTER cluster]
|
|||||||
name2 [type2] [NULL|NOT NULL] [DEFAULT|MATERIALIZED|EPHEMERAL|ALIAS expr2] [COMMENT 'comment for column'] [compression_codec] [TTL expr2],
|
name2 [type2] [NULL|NOT NULL] [DEFAULT|MATERIALIZED|EPHEMERAL|ALIAS expr2] [COMMENT 'comment for column'] [compression_codec] [TTL expr2],
|
||||||
...
|
...
|
||||||
) ENGINE = engine
|
) ENGINE = engine
|
||||||
COMMENT 'comment for table'
|
[COMMENT 'comment for table']
|
||||||
```
|
```
|
||||||
|
|
||||||
Creates a table named `table_name` in the `db` database or the current database if `db` is not set, with the structure specified in brackets and the `engine` engine.
|
Creates a table named `table_name` in the `db` database or the current database if `db` is not set, with the structure specified in brackets and the `engine` engine.
|
||||||
@ -626,11 +626,6 @@ SELECT * FROM base.t1;
|
|||||||
|
|
||||||
You can add a comment to the table when you creating it.
|
You can add a comment to the table when you creating it.
|
||||||
|
|
||||||
:::note
|
|
||||||
The comment clause is supported by all table engines except [Kafka](../../../engines/table-engines/integrations/kafka.md), [RabbitMQ](../../../engines/table-engines/integrations/rabbitmq.md) and [EmbeddedRocksDB](../../../engines/table-engines/integrations/embedded-rocksdb.md).
|
|
||||||
:::
|
|
||||||
|
|
||||||
|
|
||||||
**Syntax**
|
**Syntax**
|
||||||
|
|
||||||
``` sql
|
``` sql
|
||||||
|
@ -16,6 +16,7 @@ Syntax:
|
|||||||
CREATE [OR REPLACE] VIEW [IF NOT EXISTS] [db.]table_name [ON CLUSTER cluster_name]
|
CREATE [OR REPLACE] VIEW [IF NOT EXISTS] [db.]table_name [ON CLUSTER cluster_name]
|
||||||
[DEFINER = { user | CURRENT_USER }] [SQL SECURITY { DEFINER | INVOKER | NONE }]
|
[DEFINER = { user | CURRENT_USER }] [SQL SECURITY { DEFINER | INVOKER | NONE }]
|
||||||
AS SELECT ...
|
AS SELECT ...
|
||||||
|
[COMMENT 'comment']
|
||||||
```
|
```
|
||||||
|
|
||||||
Normal views do not store any data. They just perform a read from another table on each access. In other words, a normal view is nothing more than a saved query. When reading from a view, this saved query is used as a subquery in the [FROM](../../../sql-reference/statements/select/from.md) clause.
|
Normal views do not store any data. They just perform a read from another table on each access. In other words, a normal view is nothing more than a saved query. When reading from a view, this saved query is used as a subquery in the [FROM](../../../sql-reference/statements/select/from.md) clause.
|
||||||
@ -57,6 +58,7 @@ SELECT * FROM view(column1=value1, column2=value2 ...)
|
|||||||
CREATE MATERIALIZED VIEW [IF NOT EXISTS] [db.]table_name [ON CLUSTER] [TO[db.]name] [ENGINE = engine] [POPULATE]
|
CREATE MATERIALIZED VIEW [IF NOT EXISTS] [db.]table_name [ON CLUSTER] [TO[db.]name] [ENGINE = engine] [POPULATE]
|
||||||
[DEFINER = { user | CURRENT_USER }] [SQL SECURITY { DEFINER | INVOKER | NONE }]
|
[DEFINER = { user | CURRENT_USER }] [SQL SECURITY { DEFINER | INVOKER | NONE }]
|
||||||
AS SELECT ...
|
AS SELECT ...
|
||||||
|
[COMMENT 'comment']
|
||||||
```
|
```
|
||||||
|
|
||||||
:::tip
|
:::tip
|
||||||
@ -161,6 +163,7 @@ RANDOMIZE FOR interval
|
|||||||
DEPENDS ON [db.]name [, [db.]name [, ...]]
|
DEPENDS ON [db.]name [, [db.]name [, ...]]
|
||||||
[TO[db.]name] [(columns)] [ENGINE = engine] [EMPTY]
|
[TO[db.]name] [(columns)] [ENGINE = engine] [EMPTY]
|
||||||
AS SELECT ...
|
AS SELECT ...
|
||||||
|
[COMMENT 'comment']
|
||||||
```
|
```
|
||||||
where `interval` is a sequence of simple intervals:
|
where `interval` is a sequence of simple intervals:
|
||||||
```sql
|
```sql
|
||||||
@ -267,7 +270,10 @@ This is an experimental feature that may change in backwards-incompatible ways i
|
|||||||
:::
|
:::
|
||||||
|
|
||||||
``` sql
|
``` sql
|
||||||
CREATE WINDOW VIEW [IF NOT EXISTS] [db.]table_name [TO [db.]table_name] [INNER ENGINE engine] [ENGINE engine] [WATERMARK strategy] [ALLOWED_LATENESS interval_function] [POPULATE] AS SELECT ... GROUP BY time_window_function
|
CREATE WINDOW VIEW [IF NOT EXISTS] [db.]table_name [TO [db.]table_name] [INNER ENGINE engine] [ENGINE engine] [WATERMARK strategy] [ALLOWED_LATENESS interval_function] [POPULATE]
|
||||||
|
AS SELECT ...
|
||||||
|
GROUP BY time_window_function
|
||||||
|
[COMMENT 'comment']
|
||||||
```
|
```
|
||||||
|
|
||||||
Window view can aggregate data by time window and output the results when the window is ready to fire. It stores the partial aggregation results in an inner(or specified) table to reduce latency and can push the processing result to a specified table or push notifications using the WATCH query.
|
Window view can aggregate data by time window and output the results when the window is ready to fire. It stores the partial aggregation results in an inner(or specified) table to reduce latency and can push the processing result to a specified table or push notifications using the WATCH query.
|
||||||
|
@ -12,6 +12,8 @@ The [rank](./rank.md) function provides the same behaviour, but with gaps in ran
|
|||||||
|
|
||||||
**Syntax**
|
**Syntax**
|
||||||
|
|
||||||
|
Alias: `denseRank` (case-sensitive)
|
||||||
|
|
||||||
```sql
|
```sql
|
||||||
dense_rank (column_name)
|
dense_rank (column_name)
|
||||||
OVER ([[PARTITION BY grouping_column] [ORDER BY sorting_column]
|
OVER ([[PARTITION BY grouping_column] [ORDER BY sorting_column]
|
||||||
|
@ -23,8 +23,8 @@ ClickHouse supports the standard grammar for defining windows and window functio
|
|||||||
| `INTERVAL` syntax for `DateTime` `RANGE OFFSET` frame | ❌ (specify the number of seconds instead (`RANGE` works with any numeric type).) |
|
| `INTERVAL` syntax for `DateTime` `RANGE OFFSET` frame | ❌ (specify the number of seconds instead (`RANGE` works with any numeric type).) |
|
||||||
| `GROUPS` frame | ❌ |
|
| `GROUPS` frame | ❌ |
|
||||||
| Calculating aggregate functions over a frame (`sum(value) over (order by time)`) | ✅ (All aggregate functions are supported) |
|
| Calculating aggregate functions over a frame (`sum(value) over (order by time)`) | ✅ (All aggregate functions are supported) |
|
||||||
| `rank()`, `dense_rank()`, `row_number()` | ✅ |
|
| `rank()`, `dense_rank()`, `row_number()` | ✅ <br/>Alias: `denseRank()` |
|
||||||
| `percent_rank()` | ✅ Efficiently computes the relative standing of a value within a partition in a dataset. This function effectively replaces the more verbose and computationally intensive manual SQL calculation expressed as `ifNull((rank() OVER(PARTITION BY x ORDER BY y) - 1) / nullif(count(1) OVER(PARTITION BY x) - 1, 0), 0)`|
|
| `percent_rank()` | ✅ Efficiently computes the relative standing of a value within a partition in a dataset. This function effectively replaces the more verbose and computationally intensive manual SQL calculation expressed as `ifNull((rank() OVER(PARTITION BY x ORDER BY y) - 1) / nullif(count(1) OVER(PARTITION BY x) - 1, 0), 0)` <br/>Alias: `percentRank()`|
|
||||||
| `lag/lead(value, offset)` | ❌ <br/> You can use one of the following workarounds:<br/> 1) `any(value) over (.... rows between <offset> preceding and <offset> preceding)`, or `following` for `lead` <br/> 2) `lagInFrame/leadInFrame`, which are analogous, but respect the window frame. To get behavior identical to `lag/lead`, use `rows between unbounded preceding and unbounded following` |
|
| `lag/lead(value, offset)` | ❌ <br/> You can use one of the following workarounds:<br/> 1) `any(value) over (.... rows between <offset> preceding and <offset> preceding)`, or `following` for `lead` <br/> 2) `lagInFrame/leadInFrame`, which are analogous, but respect the window frame. To get behavior identical to `lag/lead`, use `rows between unbounded preceding and unbounded following` |
|
||||||
| ntile(buckets) | ✅ <br/> Specify window like, (partition by x order by y rows between unbounded preceding and unrounded following). |
|
| ntile(buckets) | ✅ <br/> Specify window like, (partition by x order by y rows between unbounded preceding and unrounded following). |
|
||||||
|
|
||||||
|
@ -23,7 +23,7 @@ For more detail on window function syntax see: [Window Functions - Syntax](./ind
|
|||||||
**Parameters**
|
**Parameters**
|
||||||
- `x` — Column name.
|
- `x` — Column name.
|
||||||
- `offset` — Offset to apply. [(U)Int*](../data-types/int-uint.md). (Optional - `1` by default).
|
- `offset` — Offset to apply. [(U)Int*](../data-types/int-uint.md). (Optional - `1` by default).
|
||||||
- `default` — Value to return if calculated row exceeds the boundaries of the window frame. (Optional - `null` by default).
|
- `default` — Value to return if calculated row exceeds the boundaries of the window frame. (Optional - default value of column type when omitted).
|
||||||
|
|
||||||
**Returned value**
|
**Returned value**
|
||||||
|
|
||||||
|
@ -23,7 +23,7 @@ For more detail on window function syntax see: [Window Functions - Syntax](./ind
|
|||||||
**Parameters**
|
**Parameters**
|
||||||
- `x` — Column name.
|
- `x` — Column name.
|
||||||
- `offset` — Offset to apply. [(U)Int*](../data-types/int-uint.md). (Optional - `1` by default).
|
- `offset` — Offset to apply. [(U)Int*](../data-types/int-uint.md). (Optional - `1` by default).
|
||||||
- `default` — Value to return if calculated row exceeds the boundaries of the window frame. (Optional - `null` by default).
|
- `default` — Value to return if calculated row exceeds the boundaries of the window frame. (Optional - default value of column type when omitted).
|
||||||
|
|
||||||
**Returned value**
|
**Returned value**
|
||||||
|
|
||||||
|
@ -209,8 +209,8 @@ std::vector<String> Client::loadWarningMessages()
|
|||||||
{} /* query_parameters */,
|
{} /* query_parameters */,
|
||||||
"" /* query_id */,
|
"" /* query_id */,
|
||||||
QueryProcessingStage::Complete,
|
QueryProcessingStage::Complete,
|
||||||
&global_context->getSettingsRef(),
|
&client_context->getSettingsRef(),
|
||||||
&global_context->getClientInfo(), false, {});
|
&client_context->getClientInfo(), false, {});
|
||||||
while (true)
|
while (true)
|
||||||
{
|
{
|
||||||
Packet packet = connection->receivePacket();
|
Packet packet = connection->receivePacket();
|
||||||
@ -306,9 +306,6 @@ void Client::initialize(Poco::Util::Application & self)
|
|||||||
if (env_password && !config().has("password"))
|
if (env_password && !config().has("password"))
|
||||||
config().setString("password", env_password);
|
config().setString("password", env_password);
|
||||||
|
|
||||||
// global_context->setApplicationType(Context::ApplicationType::CLIENT);
|
|
||||||
global_context->setQueryParameters(query_parameters);
|
|
||||||
|
|
||||||
/// settings and limits could be specified in config file, but passed settings has higher priority
|
/// settings and limits could be specified in config file, but passed settings has higher priority
|
||||||
for (const auto & setting : global_context->getSettingsRef().allUnchanged())
|
for (const auto & setting : global_context->getSettingsRef().allUnchanged())
|
||||||
{
|
{
|
||||||
@ -382,7 +379,7 @@ try
|
|||||||
showWarnings();
|
showWarnings();
|
||||||
|
|
||||||
/// Set user password complexity rules
|
/// Set user password complexity rules
|
||||||
auto & access_control = global_context->getAccessControl();
|
auto & access_control = client_context->getAccessControl();
|
||||||
access_control.setPasswordComplexityRules(connection->getPasswordComplexityRules());
|
access_control.setPasswordComplexityRules(connection->getPasswordComplexityRules());
|
||||||
|
|
||||||
if (is_interactive && !delayed_interactive)
|
if (is_interactive && !delayed_interactive)
|
||||||
@ -459,7 +456,7 @@ void Client::connect()
|
|||||||
<< connection_parameters.host << ":" << connection_parameters.port
|
<< connection_parameters.host << ":" << connection_parameters.port
|
||||||
<< (!connection_parameters.user.empty() ? " as user " + connection_parameters.user : "") << "." << std::endl;
|
<< (!connection_parameters.user.empty() ? " as user " + connection_parameters.user : "") << "." << std::endl;
|
||||||
|
|
||||||
connection = Connection::createConnection(connection_parameters, global_context);
|
connection = Connection::createConnection(connection_parameters, client_context);
|
||||||
|
|
||||||
if (max_client_network_bandwidth)
|
if (max_client_network_bandwidth)
|
||||||
{
|
{
|
||||||
@ -528,7 +525,7 @@ void Client::connect()
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if (!global_context->getSettingsRef().use_client_time_zone)
|
if (!client_context->getSettingsRef().use_client_time_zone)
|
||||||
{
|
{
|
||||||
const auto & time_zone = connection->getServerTimezone(connection_parameters.timeouts);
|
const auto & time_zone = connection->getServerTimezone(connection_parameters.timeouts);
|
||||||
if (!time_zone.empty())
|
if (!time_zone.empty())
|
||||||
@ -611,7 +608,7 @@ void Client::printChangedSettings() const
|
|||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
print_changes(global_context->getSettingsRef().changes(), "settings");
|
print_changes(client_context->getSettingsRef().changes(), "settings");
|
||||||
print_changes(cmd_merge_tree_settings.changes(), "MergeTree settings");
|
print_changes(cmd_merge_tree_settings.changes(), "MergeTree settings");
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -709,7 +706,7 @@ bool Client::processWithFuzzing(const String & full_query)
|
|||||||
{
|
{
|
||||||
const char * begin = full_query.data();
|
const char * begin = full_query.data();
|
||||||
orig_ast = parseQuery(begin, begin + full_query.size(),
|
orig_ast = parseQuery(begin, begin + full_query.size(),
|
||||||
global_context->getSettingsRef(),
|
client_context->getSettingsRef(),
|
||||||
/*allow_multi_statements=*/ true);
|
/*allow_multi_statements=*/ true);
|
||||||
}
|
}
|
||||||
catch (const Exception & e)
|
catch (const Exception & e)
|
||||||
@ -733,7 +730,7 @@ bool Client::processWithFuzzing(const String & full_query)
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Kusto is not a subject for fuzzing (yet)
|
// Kusto is not a subject for fuzzing (yet)
|
||||||
if (global_context->getSettingsRef().dialect == DB::Dialect::kusto)
|
if (client_context->getSettingsRef().dialect == DB::Dialect::kusto)
|
||||||
{
|
{
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
@ -1138,8 +1135,6 @@ void Client::processOptions(const OptionsDescription & options_description,
|
|||||||
|
|
||||||
if ((query_fuzzer_runs = options["query-fuzzer-runs"].as<int>()))
|
if ((query_fuzzer_runs = options["query-fuzzer-runs"].as<int>()))
|
||||||
{
|
{
|
||||||
// Fuzzer implies multiquery.
|
|
||||||
config().setBool("multiquery", true);
|
|
||||||
// Ignore errors in parsing queries.
|
// Ignore errors in parsing queries.
|
||||||
config().setBool("ignore-error", true);
|
config().setBool("ignore-error", true);
|
||||||
ignore_error = true;
|
ignore_error = true;
|
||||||
@ -1147,8 +1142,6 @@ void Client::processOptions(const OptionsDescription & options_description,
|
|||||||
|
|
||||||
if ((create_query_fuzzer_runs = options["create-query-fuzzer-runs"].as<int>()))
|
if ((create_query_fuzzer_runs = options["create-query-fuzzer-runs"].as<int>()))
|
||||||
{
|
{
|
||||||
// Fuzzer implies multiquery.
|
|
||||||
config().setBool("multiquery", true);
|
|
||||||
// Ignore errors in parsing queries.
|
// Ignore errors in parsing queries.
|
||||||
config().setBool("ignore-error", true);
|
config().setBool("ignore-error", true);
|
||||||
|
|
||||||
@ -1166,6 +1159,11 @@ void Client::processOptions(const OptionsDescription & options_description,
|
|||||||
|
|
||||||
if (options.count("opentelemetry-tracestate"))
|
if (options.count("opentelemetry-tracestate"))
|
||||||
global_context->getClientTraceContext().tracestate = options["opentelemetry-tracestate"].as<std::string>();
|
global_context->getClientTraceContext().tracestate = options["opentelemetry-tracestate"].as<std::string>();
|
||||||
|
|
||||||
|
/// In case of clickhouse-client the `client_context` can be just an alias for the `global_context`.
|
||||||
|
/// (There is no need to copy the context because clickhouse-client has no background tasks so it won't use that context in parallel.)
|
||||||
|
client_context = global_context;
|
||||||
|
initClientContext();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
@ -1199,17 +1197,9 @@ void Client::processConfig()
|
|||||||
}
|
}
|
||||||
print_stack_trace = config().getBool("stacktrace", false);
|
print_stack_trace = config().getBool("stacktrace", false);
|
||||||
|
|
||||||
if (config().has("multiquery"))
|
|
||||||
is_multiquery = true;
|
|
||||||
|
|
||||||
pager = config().getString("pager", "");
|
pager = config().getString("pager", "");
|
||||||
|
|
||||||
setDefaultFormatsAndCompressionFromConfiguration();
|
setDefaultFormatsAndCompressionFromConfiguration();
|
||||||
|
|
||||||
global_context->setClientName(std::string(DEFAULT_CLIENT_NAME));
|
|
||||||
global_context->setQueryKindInitial();
|
|
||||||
global_context->setQuotaClientKey(config().getString("quota_key", ""));
|
|
||||||
global_context->setQueryKind(query_kind);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
@ -1362,13 +1352,6 @@ void Client::readArguments(
|
|||||||
allow_repeated_settings = true;
|
allow_repeated_settings = true;
|
||||||
else if (arg == "--allow_merge_tree_settings")
|
else if (arg == "--allow_merge_tree_settings")
|
||||||
allow_merge_tree_settings = true;
|
allow_merge_tree_settings = true;
|
||||||
else if (arg == "--multiquery" && (arg_num + 1) < argc && !std::string_view(argv[arg_num + 1]).starts_with('-'))
|
|
||||||
{
|
|
||||||
/// Transform the abbreviated syntax '--multiquery <SQL>' into the full syntax '--multiquery -q <SQL>'
|
|
||||||
++arg_num;
|
|
||||||
arg = argv[arg_num];
|
|
||||||
addMultiquery(arg, common_arguments);
|
|
||||||
}
|
|
||||||
else if (arg == "--password" && ((arg_num + 1) >= argc || std::string_view(argv[arg_num + 1]).starts_with('-')))
|
else if (arg == "--password" && ((arg_num + 1) >= argc || std::string_view(argv[arg_num + 1]).starts_with('-')))
|
||||||
{
|
{
|
||||||
common_arguments.emplace_back(arg);
|
common_arguments.emplace_back(arg);
|
||||||
|
@ -1,14 +1,16 @@
|
|||||||
#pragma once
|
#pragma once
|
||||||
|
|
||||||
#include <Client/ClientBase.h>
|
#include <Client/ClientApplicationBase.h>
|
||||||
|
|
||||||
|
|
||||||
namespace DB
|
namespace DB
|
||||||
{
|
{
|
||||||
|
|
||||||
class Client : public ClientBase
|
class Client : public ClientApplicationBase
|
||||||
{
|
{
|
||||||
public:
|
public:
|
||||||
|
using Arguments = ClientApplicationBase::Arguments;
|
||||||
|
|
||||||
Client() = default;
|
Client() = default;
|
||||||
|
|
||||||
void initialize(Poco::Util::Application & self) override;
|
void initialize(Poco::Util::Application & self) override;
|
||||||
@ -16,7 +18,6 @@ public:
|
|||||||
int main(const std::vector<String> & /*args*/) override;
|
int main(const std::vector<String> & /*args*/) override;
|
||||||
|
|
||||||
protected:
|
protected:
|
||||||
|
|
||||||
Poco::Util::LayeredConfiguration & getClientConfiguration() override;
|
Poco::Util::LayeredConfiguration & getClientConfiguration() override;
|
||||||
|
|
||||||
bool processWithFuzzing(const String & full_query) override;
|
bool processWithFuzzing(const String & full_query) override;
|
||||||
|
@ -19,6 +19,7 @@
|
|||||||
#include <base/getMemoryAmount.h>
|
#include <base/getMemoryAmount.h>
|
||||||
#include <base/scope_guard.h>
|
#include <base/scope_guard.h>
|
||||||
#include <base/safeExit.h>
|
#include <base/safeExit.h>
|
||||||
|
#include <base/Numa.h>
|
||||||
#include <Poco/Net/NetException.h>
|
#include <Poco/Net/NetException.h>
|
||||||
#include <Poco/Net/TCPServerParams.h>
|
#include <Poco/Net/TCPServerParams.h>
|
||||||
#include <Poco/Net/TCPServer.h>
|
#include <Poco/Net/TCPServer.h>
|
||||||
@ -311,6 +312,12 @@ try
|
|||||||
|
|
||||||
MainThreadStatus::getInstance();
|
MainThreadStatus::getInstance();
|
||||||
|
|
||||||
|
if (auto total_numa_memory = getNumaNodesTotalMemory(); total_numa_memory.has_value())
|
||||||
|
{
|
||||||
|
LOG_INFO(
|
||||||
|
log, "Keeper is bound to a subset of NUMA nodes. Total memory of all available nodes: {}", ReadableSize(*total_numa_memory));
|
||||||
|
}
|
||||||
|
|
||||||
#if !defined(NDEBUG) || !defined(__OPTIMIZE__)
|
#if !defined(NDEBUG) || !defined(__OPTIMIZE__)
|
||||||
LOG_WARNING(log, "Keeper was built in debug mode. It will work slowly.");
|
LOG_WARNING(log, "Keeper was built in debug mode. It will work slowly.");
|
||||||
#endif
|
#endif
|
||||||
|
@ -1,6 +1,7 @@
|
|||||||
#include "LocalServer.h"
|
#include "LocalServer.h"
|
||||||
|
|
||||||
#include <sys/resource.h>
|
#include <sys/resource.h>
|
||||||
|
#include <Common/Config/getLocalConfigPath.h>
|
||||||
#include <Common/logger_useful.h>
|
#include <Common/logger_useful.h>
|
||||||
#include <Common/formatReadable.h>
|
#include <Common/formatReadable.h>
|
||||||
#include <Core/UUID.h>
|
#include <Core/UUID.h>
|
||||||
@ -80,7 +81,7 @@ namespace ErrorCodes
|
|||||||
|
|
||||||
void applySettingsOverridesForLocal(ContextMutablePtr context)
|
void applySettingsOverridesForLocal(ContextMutablePtr context)
|
||||||
{
|
{
|
||||||
Settings settings = context->getSettings();
|
Settings settings = context->getSettingsCopy();
|
||||||
|
|
||||||
settings.allow_introspection_functions = true;
|
settings.allow_introspection_functions = true;
|
||||||
settings.storage_file_read_method = LocalFSReadMethod::mmap;
|
settings.storage_file_read_method = LocalFSReadMethod::mmap;
|
||||||
@ -127,10 +128,21 @@ void LocalServer::initialize(Poco::Util::Application & self)
|
|||||||
{
|
{
|
||||||
Poco::Util::Application::initialize(self);
|
Poco::Util::Application::initialize(self);
|
||||||
|
|
||||||
|
const char * home_path_cstr = getenv("HOME"); // NOLINT(concurrency-mt-unsafe)
|
||||||
|
if (home_path_cstr)
|
||||||
|
home_path = home_path_cstr;
|
||||||
|
|
||||||
/// Load config files if exists
|
/// Load config files if exists
|
||||||
if (getClientConfiguration().has("config-file") || fs::exists("config.xml"))
|
std::string config_path;
|
||||||
|
if (getClientConfiguration().has("config-file"))
|
||||||
|
config_path = getClientConfiguration().getString("config-file");
|
||||||
|
else if (config_path.empty() && fs::exists("config.xml"))
|
||||||
|
config_path = "config.xml";
|
||||||
|
else if (config_path.empty())
|
||||||
|
config_path = getLocalConfigPath(home_path).value_or("");
|
||||||
|
|
||||||
|
if (fs::exists(config_path))
|
||||||
{
|
{
|
||||||
const auto config_path = getClientConfiguration().getString("config-file", "config.xml");
|
|
||||||
ConfigProcessor config_processor(config_path, false, true);
|
ConfigProcessor config_processor(config_path, false, true);
|
||||||
ConfigProcessor::setConfigPath(fs::path(config_path).parent_path());
|
ConfigProcessor::setConfigPath(fs::path(config_path).parent_path());
|
||||||
auto loaded_config = config_processor.loadConfig();
|
auto loaded_config = config_processor.loadConfig();
|
||||||
@ -184,6 +196,11 @@ void LocalServer::initialize(Poco::Util::Application & self)
|
|||||||
cleanup_threads,
|
cleanup_threads,
|
||||||
0, // We don't need any threads one all the parts will be deleted
|
0, // We don't need any threads one all the parts will be deleted
|
||||||
cleanup_threads);
|
cleanup_threads);
|
||||||
|
|
||||||
|
getDatabaseCatalogDropTablesThreadPool().initialize(
|
||||||
|
server_settings.database_catalog_drop_table_concurrency,
|
||||||
|
0, // We don't need any threads if there are no DROP queries.
|
||||||
|
server_settings.database_catalog_drop_table_concurrency);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
@ -295,6 +312,8 @@ void LocalServer::cleanup()
|
|||||||
if (suggest)
|
if (suggest)
|
||||||
suggest.reset();
|
suggest.reset();
|
||||||
|
|
||||||
|
client_context.reset();
|
||||||
|
|
||||||
if (global_context)
|
if (global_context)
|
||||||
{
|
{
|
||||||
global_context->shutdown();
|
global_context->shutdown();
|
||||||
@ -436,7 +455,7 @@ void LocalServer::connect()
|
|||||||
in = input.get();
|
in = input.get();
|
||||||
}
|
}
|
||||||
connection = LocalConnection::createConnection(
|
connection = LocalConnection::createConnection(
|
||||||
connection_parameters, global_context, in, need_render_progress, need_render_profile_events, server_display_name);
|
connection_parameters, client_context, in, need_render_progress, need_render_profile_events, server_display_name);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
@ -497,8 +516,6 @@ try
|
|||||||
initTTYBuffer(toProgressOption(getClientConfiguration().getString("progress", "default")));
|
initTTYBuffer(toProgressOption(getClientConfiguration().getString("progress", "default")));
|
||||||
ASTAlterCommand::setFormatAlterCommandsWithParentheses(true);
|
ASTAlterCommand::setFormatAlterCommandsWithParentheses(true);
|
||||||
|
|
||||||
applyCmdSettings(global_context);
|
|
||||||
|
|
||||||
/// try to load user defined executable functions, throw on error and die
|
/// try to load user defined executable functions, throw on error and die
|
||||||
try
|
try
|
||||||
{
|
{
|
||||||
@ -510,6 +527,11 @@ try
|
|||||||
throw;
|
throw;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// Must be called after we stopped initializing the global context and changing its settings.
|
||||||
|
/// After this point the global context must be stayed almost unchanged till shutdown,
|
||||||
|
/// and all necessary changes must be made to the client context instead.
|
||||||
|
createClientContext();
|
||||||
|
|
||||||
if (is_interactive)
|
if (is_interactive)
|
||||||
{
|
{
|
||||||
clearTerminal();
|
clearTerminal();
|
||||||
@ -564,9 +586,6 @@ void LocalServer::processConfig()
|
|||||||
if (!queries.empty() && getClientConfiguration().has("queries-file"))
|
if (!queries.empty() && getClientConfiguration().has("queries-file"))
|
||||||
throw Exception(ErrorCodes::BAD_ARGUMENTS, "Options '--query' and '--queries-file' cannot be specified at the same time");
|
throw Exception(ErrorCodes::BAD_ARGUMENTS, "Options '--query' and '--queries-file' cannot be specified at the same time");
|
||||||
|
|
||||||
if (getClientConfiguration().has("multiquery"))
|
|
||||||
is_multiquery = true;
|
|
||||||
|
|
||||||
pager = getClientConfiguration().getString("pager", "");
|
pager = getClientConfiguration().getString("pager", "");
|
||||||
|
|
||||||
delayed_interactive = getClientConfiguration().has("interactive") && (!queries.empty() || getClientConfiguration().has("queries-file"));
|
delayed_interactive = getClientConfiguration().has("interactive") && (!queries.empty() || getClientConfiguration().has("queries-file"));
|
||||||
@ -735,6 +754,9 @@ void LocalServer::processConfig()
|
|||||||
/// Load global settings from default_profile and system_profile.
|
/// Load global settings from default_profile and system_profile.
|
||||||
global_context->setDefaultProfiles(getClientConfiguration());
|
global_context->setDefaultProfiles(getClientConfiguration());
|
||||||
|
|
||||||
|
/// Command-line parameters can override settings from the default profile.
|
||||||
|
applyCmdSettings(global_context);
|
||||||
|
|
||||||
/// We load temporary database first, because projections need it.
|
/// We load temporary database first, because projections need it.
|
||||||
DatabaseCatalog::instance().initializeAndLoadTemporaryDatabase();
|
DatabaseCatalog::instance().initializeAndLoadTemporaryDatabase();
|
||||||
|
|
||||||
@ -778,10 +800,6 @@ void LocalServer::processConfig()
|
|||||||
|
|
||||||
server_display_name = getClientConfiguration().getString("display_name", "");
|
server_display_name = getClientConfiguration().getString("display_name", "");
|
||||||
prompt_by_server_display_name = getClientConfiguration().getRawString("prompt_by_server_display_name.default", ":) ");
|
prompt_by_server_display_name = getClientConfiguration().getRawString("prompt_by_server_display_name.default", ":) ");
|
||||||
|
|
||||||
global_context->setQueryKindInitial();
|
|
||||||
global_context->setQueryKind(query_kind);
|
|
||||||
global_context->setQueryParameters(query_parameters);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
@ -860,6 +878,16 @@ void LocalServer::applyCmdOptions(ContextMutablePtr context)
|
|||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
void LocalServer::createClientContext()
|
||||||
|
{
|
||||||
|
/// In case of clickhouse-local it's necessary to use a separate context for client-related purposes.
|
||||||
|
/// We can't just change the global context because it is used in background tasks (for example, in merges)
|
||||||
|
/// which don't expect that the global context can suddenly change.
|
||||||
|
client_context = Context::createCopy(global_context);
|
||||||
|
initClientContext();
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
void LocalServer::processOptions(const OptionsDescription &, const CommandLineOptions & options, const std::vector<Arguments> &, const std::vector<Arguments> &)
|
void LocalServer::processOptions(const OptionsDescription &, const CommandLineOptions & options, const std::vector<Arguments> &, const std::vector<Arguments> &)
|
||||||
{
|
{
|
||||||
if (options.count("table"))
|
if (options.count("table"))
|
||||||
@ -922,13 +950,6 @@ void LocalServer::readArguments(int argc, char ** argv, Arguments & common_argum
|
|||||||
query_parameters.emplace(param_continuation.substr(0, equal_pos), param_continuation.substr(equal_pos + 1));
|
query_parameters.emplace(param_continuation.substr(0, equal_pos), param_continuation.substr(equal_pos + 1));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
else if (arg == "--multiquery" && (arg_num + 1) < argc && !std::string_view(argv[arg_num + 1]).starts_with('-'))
|
|
||||||
{
|
|
||||||
/// Transform the abbreviated syntax '--multiquery <SQL>' into the full syntax '--multiquery -q <SQL>'
|
|
||||||
++arg_num;
|
|
||||||
arg = argv[arg_num];
|
|
||||||
addMultiquery(arg, common_arguments);
|
|
||||||
}
|
|
||||||
else
|
else
|
||||||
{
|
{
|
||||||
common_arguments.emplace_back(arg);
|
common_arguments.emplace_back(arg);
|
||||||
|
@ -1,6 +1,6 @@
|
|||||||
#pragma once
|
#pragma once
|
||||||
|
|
||||||
#include <Client/ClientBase.h>
|
#include <Client/ClientApplicationBase.h>
|
||||||
#include <Client/LocalConnection.h>
|
#include <Client/LocalConnection.h>
|
||||||
|
|
||||||
#include <Core/ServerSettings.h>
|
#include <Core/ServerSettings.h>
|
||||||
@ -21,7 +21,7 @@ namespace DB
|
|||||||
/// Lightweight Application for clickhouse-local
|
/// Lightweight Application for clickhouse-local
|
||||||
/// No networking, no extra configs and working directories, no pid and status files, no dictionaries, no logging.
|
/// No networking, no extra configs and working directories, no pid and status files, no dictionaries, no logging.
|
||||||
/// Quiet mode by default
|
/// Quiet mode by default
|
||||||
class LocalServer : public ClientBase, public Loggers
|
class LocalServer : public ClientApplicationBase, public Loggers
|
||||||
{
|
{
|
||||||
public:
|
public:
|
||||||
LocalServer() = default;
|
LocalServer() = default;
|
||||||
@ -31,7 +31,6 @@ public:
|
|||||||
int main(const std::vector<String> & /*args*/) override;
|
int main(const std::vector<String> & /*args*/) override;
|
||||||
|
|
||||||
protected:
|
protected:
|
||||||
|
|
||||||
Poco::Util::LayeredConfiguration & getClientConfiguration() override;
|
Poco::Util::LayeredConfiguration & getClientConfiguration() override;
|
||||||
|
|
||||||
void connect() override;
|
void connect() override;
|
||||||
@ -50,7 +49,6 @@ protected:
|
|||||||
void processConfig() override;
|
void processConfig() override;
|
||||||
void readArguments(int argc, char ** argv, Arguments & common_arguments, std::vector<Arguments> &, std::vector<Arguments> &) override;
|
void readArguments(int argc, char ** argv, Arguments & common_arguments, std::vector<Arguments> &, std::vector<Arguments> &) override;
|
||||||
|
|
||||||
|
|
||||||
void updateLoggerLevel(const String & logs_level) override;
|
void updateLoggerLevel(const String & logs_level) override;
|
||||||
|
|
||||||
private:
|
private:
|
||||||
@ -67,6 +65,8 @@ private:
|
|||||||
void applyCmdOptions(ContextMutablePtr context);
|
void applyCmdOptions(ContextMutablePtr context);
|
||||||
void applyCmdSettings(ContextMutablePtr context);
|
void applyCmdSettings(ContextMutablePtr context);
|
||||||
|
|
||||||
|
void createClientContext();
|
||||||
|
|
||||||
ServerSettings server_settings;
|
ServerSettings server_settings;
|
||||||
|
|
||||||
std::optional<StatusFile> status;
|
std::optional<StatusFile> status;
|
||||||
|
@ -22,6 +22,7 @@
|
|||||||
#include <base/coverage.h>
|
#include <base/coverage.h>
|
||||||
#include <base/getFQDNOrHostName.h>
|
#include <base/getFQDNOrHostName.h>
|
||||||
#include <base/safeExit.h>
|
#include <base/safeExit.h>
|
||||||
|
#include <base/Numa.h>
|
||||||
#include <Common/PoolId.h>
|
#include <Common/PoolId.h>
|
||||||
#include <Common/MemoryTracker.h>
|
#include <Common/MemoryTracker.h>
|
||||||
#include <Common/ClickHouseRevision.h>
|
#include <Common/ClickHouseRevision.h>
|
||||||
@ -140,6 +141,7 @@
|
|||||||
# include <azure/core/diagnostics/logger.hpp>
|
# include <azure/core/diagnostics/logger.hpp>
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
|
||||||
#include <incbin.h>
|
#include <incbin.h>
|
||||||
/// A minimal file used when the server is run without installation
|
/// A minimal file used when the server is run without installation
|
||||||
INCBIN(resource_embedded_xml, SOURCE_DIR "/programs/server/embedded.xml");
|
INCBIN(resource_embedded_xml, SOURCE_DIR "/programs/server/embedded.xml");
|
||||||
@ -754,6 +756,12 @@ try
|
|||||||
setenv("OPENSSL_CONF", config_dir.c_str(), true); /// NOLINT
|
setenv("OPENSSL_CONF", config_dir.c_str(), true); /// NOLINT
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if (auto total_numa_memory = getNumaNodesTotalMemory(); total_numa_memory.has_value())
|
||||||
|
{
|
||||||
|
LOG_INFO(
|
||||||
|
log, "ClickHouse is bound to a subset of NUMA nodes. Total memory of all available nodes: {}", ReadableSize(*total_numa_memory));
|
||||||
|
}
|
||||||
|
|
||||||
registerInterpreters();
|
registerInterpreters();
|
||||||
registerFunctions();
|
registerFunctions();
|
||||||
registerAggregateFunctions();
|
registerAggregateFunctions();
|
||||||
@ -841,7 +849,7 @@ try
|
|||||||
#endif
|
#endif
|
||||||
|
|
||||||
#if defined(SANITIZER)
|
#if defined(SANITIZER)
|
||||||
LOG_INFO(log, "Query Profiler disabled because they cannot work under sanitizers"
|
LOG_INFO(log, "Query Profiler is disabled because it cannot work under sanitizers"
|
||||||
" when two different stack unwinding methods will interfere with each other.");
|
" when two different stack unwinding methods will interfere with each other.");
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
@ -1035,6 +1043,11 @@ try
|
|||||||
0, // We don't need any threads once all the tables will be created
|
0, // We don't need any threads once all the tables will be created
|
||||||
max_database_replicated_create_table_thread_pool_size);
|
max_database_replicated_create_table_thread_pool_size);
|
||||||
|
|
||||||
|
getDatabaseCatalogDropTablesThreadPool().initialize(
|
||||||
|
server_settings.database_catalog_drop_table_concurrency,
|
||||||
|
0, // We don't need any threads if there are no DROP queries.
|
||||||
|
server_settings.database_catalog_drop_table_concurrency);
|
||||||
|
|
||||||
/// Initialize global local cache for remote filesystem.
|
/// Initialize global local cache for remote filesystem.
|
||||||
if (config().has("local_cache_for_remote_fs"))
|
if (config().has("local_cache_for_remote_fs"))
|
||||||
{
|
{
|
||||||
@ -1582,6 +1595,8 @@ try
|
|||||||
global_context->setMacros(std::make_unique<Macros>(*config, "macros", log));
|
global_context->setMacros(std::make_unique<Macros>(*config, "macros", log));
|
||||||
global_context->setExternalAuthenticatorsConfig(*config);
|
global_context->setExternalAuthenticatorsConfig(*config);
|
||||||
|
|
||||||
|
global_context->setDashboardsConfig(config);
|
||||||
|
|
||||||
if (global_context->isServerCompletelyStarted())
|
if (global_context->isServerCompletelyStarted())
|
||||||
{
|
{
|
||||||
/// It does not make sense to reload anything before server has started.
|
/// It does not make sense to reload anything before server has started.
|
||||||
|
13
programs/server/config.d/backups.xml
Normal file
13
programs/server/config.d/backups.xml
Normal file
@ -0,0 +1,13 @@
|
|||||||
|
<clickhouse>
|
||||||
|
<storage_configuration>
|
||||||
|
<disks>
|
||||||
|
<backups>
|
||||||
|
<type>local</type>
|
||||||
|
<path>/tmp/backups/</path>
|
||||||
|
</backups>
|
||||||
|
</disks>
|
||||||
|
</storage_configuration>
|
||||||
|
<backups>
|
||||||
|
<allowed_disk>backups</allowed_disk>
|
||||||
|
</backups>
|
||||||
|
</clickhouse>
|
1
programs/server/config.d/enable_keeper_map.xml
Symbolic link
1
programs/server/config.d/enable_keeper_map.xml
Symbolic link
@ -0,0 +1 @@
|
|||||||
|
../../../tests/config/config.d/enable_keeper_map.xml
|
1
programs/server/config.d/session_log.xml
Symbolic link
1
programs/server/config.d/session_log.xml
Symbolic link
@ -0,0 +1 @@
|
|||||||
|
../../../tests/config/config.d/session_log.xml
|
@ -1130,8 +1130,7 @@
|
|||||||
<flush_interval_milliseconds>7500</flush_interval_milliseconds>
|
<flush_interval_milliseconds>7500</flush_interval_milliseconds>
|
||||||
</query_views_log>
|
</query_views_log>
|
||||||
|
|
||||||
<!-- Uncomment if use part log.
|
<!-- Part log contains information about all actions with parts in MergeTree tables (creation, deletion, merges, downloads). -->
|
||||||
Part log contains information about all actions with parts in MergeTree tables (creation, deletion, merges, downloads).-->
|
|
||||||
<part_log>
|
<part_log>
|
||||||
<database>system</database>
|
<database>system</database>
|
||||||
<table>part_log</table>
|
<table>part_log</table>
|
||||||
@ -1143,9 +1142,9 @@
|
|||||||
<flush_on_crash>false</flush_on_crash>
|
<flush_on_crash>false</flush_on_crash>
|
||||||
</part_log>
|
</part_log>
|
||||||
|
|
||||||
<!-- Uncomment to write text log into table.
|
<!-- Text log contains all information from usual server log but stores it in structured and efficient way.
|
||||||
Text log contains all information from usual server log but stores it in structured and efficient way.
|
|
||||||
The level of the messages that goes to the table can be limited (<level>), if not specified all messages will go to the table.
|
The level of the messages that goes to the table can be limited (<level>), if not specified all messages will go to the table.
|
||||||
|
-->
|
||||||
<text_log>
|
<text_log>
|
||||||
<database>system</database>
|
<database>system</database>
|
||||||
<table>text_log</table>
|
<table>text_log</table>
|
||||||
@ -1154,9 +1153,8 @@
|
|||||||
<reserved_size_rows>8192</reserved_size_rows>
|
<reserved_size_rows>8192</reserved_size_rows>
|
||||||
<buffer_size_rows_flush_threshold>524288</buffer_size_rows_flush_threshold>
|
<buffer_size_rows_flush_threshold>524288</buffer_size_rows_flush_threshold>
|
||||||
<flush_on_crash>false</flush_on_crash>
|
<flush_on_crash>false</flush_on_crash>
|
||||||
<level></level>
|
<level>trace</level>
|
||||||
</text_log>
|
</text_log>
|
||||||
-->
|
|
||||||
|
|
||||||
<!-- Metric log contains rows with current values of ProfileEvents, CurrentMetrics collected with "collect_interval_milliseconds" interval. -->
|
<!-- Metric log contains rows with current values of ProfileEvents, CurrentMetrics collected with "collect_interval_milliseconds" interval. -->
|
||||||
<metric_log>
|
<metric_log>
|
||||||
@ -1312,6 +1310,31 @@
|
|||||||
<ttl>event_date + INTERVAL 30 DAY</ttl>
|
<ttl>event_date + INTERVAL 30 DAY</ttl>
|
||||||
</blob_storage_log>
|
</blob_storage_log>
|
||||||
|
|
||||||
|
<!-- Configure system.dashboards for dashboard.html.
|
||||||
|
|
||||||
|
Could have any query parameters, for which there will be an input on the page.
|
||||||
|
For instance an example from comments have the following:
|
||||||
|
- seconds
|
||||||
|
- rounding
|
||||||
|
|
||||||
|
NOTE: All default dashboards will be overwritten if it was set here. -->
|
||||||
|
<!-- Here is an example without merge() function, to make it work with readonly user -->
|
||||||
|
<!--
|
||||||
|
<dashboards>
|
||||||
|
<dashboard>
|
||||||
|
<dashboard>Overview</dashboard>
|
||||||
|
<title>Queries/second</title>
|
||||||
|
<query>
|
||||||
|
SELECT toStartOfInterval(event_time, INTERVAL {rounding:UInt32} SECOND)::INT AS t, avg(ProfileEvent_Query)
|
||||||
|
FROM system.metric_log
|
||||||
|
WHERE event_date >= toDate(now() - {seconds:UInt32}) AND event_time >= now() - {seconds:UInt32}
|
||||||
|
GROUP BY t
|
||||||
|
ORDER BY t WITH FILL STEP {rounding:UInt32}
|
||||||
|
</query>
|
||||||
|
</dashboard>
|
||||||
|
</dashboards>
|
||||||
|
-->
|
||||||
|
|
||||||
<!-- <top_level_domains_path>/var/lib/clickhouse/top_level_domains/</top_level_domains_path> -->
|
<!-- <top_level_domains_path>/var/lib/clickhouse/top_level_domains/</top_level_domains_path> -->
|
||||||
<!-- Custom TLD lists.
|
<!-- Custom TLD lists.
|
||||||
Format: <name>/path/to/file</name>
|
Format: <name>/path/to/file</name>
|
||||||
|
@ -17,7 +17,7 @@
|
|||||||
--input-shadow-color: rgba(0, 255, 0, 1);
|
--input-shadow-color: rgba(0, 255, 0, 1);
|
||||||
--error-color: red;
|
--error-color: red;
|
||||||
--global-error-color: white;
|
--global-error-color: white;
|
||||||
--legend-background: rgba(255, 255, 255, 0.75);
|
--legend-background: rgba(255, 255, 0, 0.75);
|
||||||
--title-color: #666;
|
--title-color: #666;
|
||||||
--text-color: black;
|
--text-color: black;
|
||||||
--edit-title-background: #FEE;
|
--edit-title-background: #FEE;
|
||||||
@ -41,7 +41,7 @@
|
|||||||
--moving-shadow-color: rgba(255, 255, 255, 0.25);
|
--moving-shadow-color: rgba(255, 255, 255, 0.25);
|
||||||
--input-shadow-color: rgba(255, 128, 0, 0.25);
|
--input-shadow-color: rgba(255, 128, 0, 0.25);
|
||||||
--error-color: #F66;
|
--error-color: #F66;
|
||||||
--legend-background: rgba(255, 255, 255, 0.25);
|
--legend-background: rgba(0, 96, 128, 0.75);
|
||||||
--title-color: white;
|
--title-color: white;
|
||||||
--text-color: white;
|
--text-color: white;
|
||||||
--edit-title-background: #364f69;
|
--edit-title-background: #364f69;
|
||||||
@ -218,6 +218,7 @@
|
|||||||
|
|
||||||
#chart-params .param {
|
#chart-params .param {
|
||||||
width: 6%;
|
width: 6%;
|
||||||
|
font-family: monospace;
|
||||||
}
|
}
|
||||||
|
|
||||||
input {
|
input {
|
||||||
@ -256,6 +257,7 @@
|
|||||||
font-weight: bold;
|
font-weight: bold;
|
||||||
user-select: none;
|
user-select: none;
|
||||||
cursor: pointer;
|
cursor: pointer;
|
||||||
|
margin-bottom: 1rem;
|
||||||
}
|
}
|
||||||
|
|
||||||
#run:hover {
|
#run:hover {
|
||||||
@ -309,7 +311,7 @@
|
|||||||
color: var(--param-text-color);
|
color: var(--param-text-color);
|
||||||
display: inline-block;
|
display: inline-block;
|
||||||
box-shadow: 1px 1px 0 var(--shadow-color);
|
box-shadow: 1px 1px 0 var(--shadow-color);
|
||||||
margin-bottom: 1rem;
|
margin-bottom: 0.5rem;
|
||||||
}
|
}
|
||||||
|
|
||||||
input:focus {
|
input:focus {
|
||||||
@ -657,6 +659,10 @@ function insertParam(name, value) {
|
|||||||
param_value.value = value;
|
param_value.value = value;
|
||||||
param_value.spellcheck = false;
|
param_value.spellcheck = false;
|
||||||
|
|
||||||
|
let setWidth = e => { e.style.width = (e.value.length + 1) + 'ch' };
|
||||||
|
if (value) { setWidth(param_value); }
|
||||||
|
param_value.addEventListener('input', e => setWidth(e.target));
|
||||||
|
|
||||||
param_wrapper.appendChild(param_name);
|
param_wrapper.appendChild(param_name);
|
||||||
param_wrapper.appendChild(param_value);
|
param_wrapper.appendChild(param_value);
|
||||||
document.getElementById('chart-params').appendChild(param_wrapper);
|
document.getElementById('chart-params').appendChild(param_wrapper);
|
||||||
@ -945,6 +951,7 @@ function showMassEditor() {
|
|||||||
let editor = document.getElementById('mass-editor-textarea');
|
let editor = document.getElementById('mass-editor-textarea');
|
||||||
editor.value = JSON.stringify({params: params, queries: queries}, null, 2);
|
editor.value = JSON.stringify({params: params, queries: queries}, null, 2);
|
||||||
|
|
||||||
|
editor.focus();
|
||||||
mass_editor_active = true;
|
mass_editor_active = true;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -1004,14 +1011,14 @@ function legendAsTooltipPlugin({ className, style = { background: "var(--legend-
|
|||||||
className && legendEl.classList.add(className);
|
className && legendEl.classList.add(className);
|
||||||
|
|
||||||
uPlot.assign(legendEl.style, {
|
uPlot.assign(legendEl.style, {
|
||||||
textAlign: "left",
|
textAlign: "right",
|
||||||
pointerEvents: "none",
|
pointerEvents: "none",
|
||||||
display: "none",
|
display: "none",
|
||||||
position: "absolute",
|
position: "absolute",
|
||||||
left: 0,
|
left: 0,
|
||||||
top: 0,
|
top: 0,
|
||||||
zIndex: 100,
|
zIndex: 100,
|
||||||
boxShadow: "2px 2px 10px rgba(0,0,0,0.1)",
|
boxShadow: "2px 2px 10px rgba(0, 0, 0, 0.1)",
|
||||||
...style
|
...style
|
||||||
});
|
});
|
||||||
|
|
||||||
@ -1051,8 +1058,10 @@ function legendAsTooltipPlugin({ className, style = { background: "var(--legend-
|
|||||||
|
|
||||||
function update(u) {
|
function update(u) {
|
||||||
let { left, top } = u.cursor;
|
let { left, top } = u.cursor;
|
||||||
left -= legendEl.clientWidth / 2;
|
/// This will make the balloon to the right of the cursor when the cursor is on the left side, and vise-versa,
|
||||||
top -= legendEl.clientHeight / 2;
|
/// avoiding the borders of the chart.
|
||||||
|
left -= legendEl.clientWidth * (left / u.width);
|
||||||
|
top -= legendEl.clientHeight;
|
||||||
legendEl.style.transform = "translate(" + left + "px, " + top + "px)";
|
legendEl.style.transform = "translate(" + left + "px, " + top + "px)";
|
||||||
|
|
||||||
if (multiline) {
|
if (multiline) {
|
||||||
@ -1139,7 +1148,7 @@ async function draw(idx, chart, url_params, query) {
|
|||||||
|
|
||||||
let {reply, error} = await doFetch(query, url_params);
|
let {reply, error} = await doFetch(query, url_params);
|
||||||
if (!error) {
|
if (!error) {
|
||||||
if (reply.rows.length == 0) {
|
if (reply.rows == 0) {
|
||||||
error = "Query returned empty result.";
|
error = "Query returned empty result.";
|
||||||
} else if (reply.meta.length < 2) {
|
} else if (reply.meta.length < 2) {
|
||||||
error = "Query should return at least two columns: unix timestamp and value.";
|
error = "Query should return at least two columns: unix timestamp and value.";
|
||||||
@ -1229,14 +1238,53 @@ async function draw(idx, chart, url_params, query) {
|
|||||||
|
|
||||||
let sync = uPlot.sync("sync");
|
let sync = uPlot.sync("sync");
|
||||||
|
|
||||||
let axis = {
|
function formatDateTime(t) {
|
||||||
|
return (new Date(t * 1000)).toISOString().replace('T', '\n').replace('.000Z', '');
|
||||||
|
}
|
||||||
|
|
||||||
|
function formatDateTimes(self, ticks) {
|
||||||
|
return ticks.map((t, idx) => {
|
||||||
|
let res = formatDateTime(t);
|
||||||
|
if (idx == 0 || res.substring(0, 10) != formatDateTime(ticks[idx - 1]).substring(0, 10)) {
|
||||||
|
return res;
|
||||||
|
} else {
|
||||||
|
return res.substring(11);
|
||||||
|
}
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
|
function formatValue(v) {
|
||||||
|
const a = Math.abs(v);
|
||||||
|
if (a >= 1000000000000000) { return (v / 1000000000000000) + 'P'; }
|
||||||
|
if (a >= 1000000000000) { return (v / 1000000000000) + 'T'; }
|
||||||
|
if (a >= 1000000000) { return (v / 1000000000) + 'G'; }
|
||||||
|
if (a >= 1000000) { return (v / 1000000) + 'M'; }
|
||||||
|
if (a >= 1000) { return (v / 1000) + 'K'; }
|
||||||
|
if (a > 0 && a < 0.001) { return (v * 1000000) + "μ"; }
|
||||||
|
return v;
|
||||||
|
}
|
||||||
|
|
||||||
|
let axis_x = {
|
||||||
stroke: axes_color,
|
stroke: axes_color,
|
||||||
grid: { width: 1 / devicePixelRatio, stroke: grid_color },
|
grid: { width: 1 / devicePixelRatio, stroke: grid_color },
|
||||||
ticks: { width: 1 / devicePixelRatio, stroke: grid_color }
|
ticks: { width: 1 / devicePixelRatio, stroke: grid_color },
|
||||||
|
values: formatDateTimes,
|
||||||
|
space: 80,
|
||||||
|
incrs: [1, 5, 10, 15, 30,
|
||||||
|
60, 60 * 5, 60 * 10, 60 * 15, 60 * 30,
|
||||||
|
3600, 3600 * 2, 3600 * 3, 3600 * 4, 3600 * 6, 3600 * 12,
|
||||||
|
3600 * 24],
|
||||||
};
|
};
|
||||||
|
|
||||||
let axes = [axis, axis];
|
let axis_y = {
|
||||||
let series = [{ label: "x" }];
|
stroke: axes_color,
|
||||||
|
grid: { width: 1 / devicePixelRatio, stroke: grid_color },
|
||||||
|
ticks: { width: 1 / devicePixelRatio, stroke: grid_color },
|
||||||
|
values: (self, ticks) => ticks.map(formatValue)
|
||||||
|
};
|
||||||
|
|
||||||
|
let axes = [axis_x, axis_y];
|
||||||
|
let series = [{ label: "time", value: (self, t) => formatDateTime(t) }];
|
||||||
let data = [reply.data[reply.meta[0].name]];
|
let data = [reply.data[reply.meta[0].name]];
|
||||||
|
|
||||||
// Treat every column as series
|
// Treat every column as series
|
||||||
@ -1254,9 +1302,10 @@ async function draw(idx, chart, url_params, query) {
|
|||||||
const opts = {
|
const opts = {
|
||||||
width: chart.clientWidth,
|
width: chart.clientWidth,
|
||||||
height: chart.clientHeight,
|
height: chart.clientHeight,
|
||||||
|
scales: { x: { time: false } }, /// Because we want to split and format time on our own.
|
||||||
axes,
|
axes,
|
||||||
series,
|
series,
|
||||||
padding: [ null, null, null, (Math.round(max_value * 100) / 100).toString().length * 6 - 10 ],
|
padding: [ null, null, null, 3 ],
|
||||||
plugins: [ legendAsTooltipPlugin() ],
|
plugins: [ legendAsTooltipPlugin() ],
|
||||||
cursor: {
|
cursor: {
|
||||||
sync: {
|
sync: {
|
||||||
|
@ -39,6 +39,8 @@ disable = '''
|
|||||||
no-else-return,
|
no-else-return,
|
||||||
global-statement,
|
global-statement,
|
||||||
f-string-without-interpolation,
|
f-string-without-interpolation,
|
||||||
|
consider-using-with,
|
||||||
|
use-maxsplit-arg,
|
||||||
'''
|
'''
|
||||||
|
|
||||||
[tool.pylint.SIMILARITIES]
|
[tool.pylint.SIMILARITIES]
|
||||||
|
@ -224,7 +224,11 @@ void AccessRightsElement::replaceEmptyDatabase(const String & current_database)
|
|||||||
|
|
||||||
String AccessRightsElement::toString() const { return toStringImpl(*this, true); }
|
String AccessRightsElement::toString() const { return toStringImpl(*this, true); }
|
||||||
String AccessRightsElement::toStringWithoutOptions() const { return toStringImpl(*this, false); }
|
String AccessRightsElement::toStringWithoutOptions() const { return toStringImpl(*this, false); }
|
||||||
|
String AccessRightsElement::toStringForAccessTypeSource() const
|
||||||
|
{
|
||||||
|
String result{access_flags.toKeywords().front()};
|
||||||
|
return result + " ON *.*";
|
||||||
|
}
|
||||||
|
|
||||||
bool AccessRightsElements::empty() const { return std::all_of(begin(), end(), [](const AccessRightsElement & e) { return e.empty(); }); }
|
bool AccessRightsElements::empty() const { return std::all_of(begin(), end(), [](const AccessRightsElement & e) { return e.empty(); }); }
|
||||||
|
|
||||||
|
@ -89,6 +89,7 @@ struct AccessRightsElement
|
|||||||
/// Returns a human-readable representation like "GRANT SELECT, UPDATE(x, y) ON db.table".
|
/// Returns a human-readable representation like "GRANT SELECT, UPDATE(x, y) ON db.table".
|
||||||
String toString() const;
|
String toString() const;
|
||||||
String toStringWithoutOptions() const;
|
String toStringWithoutOptions() const;
|
||||||
|
String toStringForAccessTypeSource() const;
|
||||||
};
|
};
|
||||||
|
|
||||||
|
|
||||||
|
@ -38,6 +38,24 @@ namespace ErrorCodes
|
|||||||
|
|
||||||
namespace
|
namespace
|
||||||
{
|
{
|
||||||
|
const std::vector<std::tuple<AccessFlags, std::string>> source_and_table_engines = {
|
||||||
|
{AccessType::FILE, "File"},
|
||||||
|
{AccessType::URL, "URL"},
|
||||||
|
{AccessType::REMOTE, "Distributed"},
|
||||||
|
{AccessType::MONGO, "MongoDB"},
|
||||||
|
{AccessType::REDIS, "Redis"},
|
||||||
|
{AccessType::MYSQL, "MySQL"},
|
||||||
|
{AccessType::POSTGRES, "PostgreSQL"},
|
||||||
|
{AccessType::SQLITE, "SQLite"},
|
||||||
|
{AccessType::ODBC, "ODBC"},
|
||||||
|
{AccessType::JDBC, "JDBC"},
|
||||||
|
{AccessType::HDFS, "HDFS"},
|
||||||
|
{AccessType::S3, "S3"},
|
||||||
|
{AccessType::HIVE, "Hive"},
|
||||||
|
{AccessType::AZURE, "AzureBlobStorage"}
|
||||||
|
};
|
||||||
|
|
||||||
|
|
||||||
AccessRights mixAccessRightsFromUserAndRoles(const User & user, const EnabledRolesInfo & roles_info)
|
AccessRights mixAccessRightsFromUserAndRoles(const User & user, const EnabledRolesInfo & roles_info)
|
||||||
{
|
{
|
||||||
AccessRights res = user.access;
|
AccessRights res = user.access;
|
||||||
@ -206,22 +224,6 @@ namespace
|
|||||||
}
|
}
|
||||||
|
|
||||||
/// There is overlap between AccessType sources and table engines, so the following code avoids user granting twice.
|
/// There is overlap between AccessType sources and table engines, so the following code avoids user granting twice.
|
||||||
static const std::vector<std::tuple<AccessFlags, std::string>> source_and_table_engines = {
|
|
||||||
{AccessType::FILE, "File"},
|
|
||||||
{AccessType::URL, "URL"},
|
|
||||||
{AccessType::REMOTE, "Distributed"},
|
|
||||||
{AccessType::MONGO, "MongoDB"},
|
|
||||||
{AccessType::REDIS, "Redis"},
|
|
||||||
{AccessType::MYSQL, "MySQL"},
|
|
||||||
{AccessType::POSTGRES, "PostgreSQL"},
|
|
||||||
{AccessType::SQLITE, "SQLite"},
|
|
||||||
{AccessType::ODBC, "ODBC"},
|
|
||||||
{AccessType::JDBC, "JDBC"},
|
|
||||||
{AccessType::HDFS, "HDFS"},
|
|
||||||
{AccessType::S3, "S3"},
|
|
||||||
{AccessType::HIVE, "Hive"},
|
|
||||||
{AccessType::AZURE, "AzureBlobStorage"}
|
|
||||||
};
|
|
||||||
|
|
||||||
/// Sync SOURCE and TABLE_ENGINE, so only need to check TABLE_ENGINE later.
|
/// Sync SOURCE and TABLE_ENGINE, so only need to check TABLE_ENGINE later.
|
||||||
if (access_control.doesTableEnginesRequireGrant())
|
if (access_control.doesTableEnginesRequireGrant())
|
||||||
@ -267,6 +269,11 @@ namespace
|
|||||||
|
|
||||||
template <typename... OtherArgs>
|
template <typename... OtherArgs>
|
||||||
std::string_view getDatabase(std::string_view arg1, const OtherArgs &...) { return arg1; }
|
std::string_view getDatabase(std::string_view arg1, const OtherArgs &...) { return arg1; }
|
||||||
|
|
||||||
|
std::string_view getTableEngine() { return {}; }
|
||||||
|
|
||||||
|
template <typename... OtherArgs>
|
||||||
|
std::string_view getTableEngine(std::string_view arg1, const OtherArgs &...) { return arg1; }
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
@ -620,18 +627,58 @@ bool ContextAccess::checkAccessImplHelper(const ContextPtr & context, AccessFlag
|
|||||||
|
|
||||||
if (!granted)
|
if (!granted)
|
||||||
{
|
{
|
||||||
if (grant_option && acs->isGranted(flags, args...))
|
auto access_denied_no_grant = [&]<typename... FmtArgs>(AccessFlags access_flags, FmtArgs && ...fmt_args)
|
||||||
{
|
{
|
||||||
|
if (grant_option && acs->isGranted(access_flags, fmt_args...))
|
||||||
|
{
|
||||||
|
return access_denied(ErrorCodes::ACCESS_DENIED,
|
||||||
|
"{}: Not enough privileges. "
|
||||||
|
"The required privileges have been granted, but without grant option. "
|
||||||
|
"To execute this query, it's necessary to have the grant {} WITH GRANT OPTION",
|
||||||
|
AccessRightsElement{access_flags, fmt_args...}.toStringWithoutOptions());
|
||||||
|
}
|
||||||
|
|
||||||
return access_denied(ErrorCodes::ACCESS_DENIED,
|
return access_denied(ErrorCodes::ACCESS_DENIED,
|
||||||
"{}: Not enough privileges. "
|
"{}: Not enough privileges. To execute this query, it's necessary to have the grant {}",
|
||||||
"The required privileges have been granted, but without grant option. "
|
AccessRightsElement{access_flags, fmt_args...}.toStringWithoutOptions() + (grant_option ? " WITH GRANT OPTION" : ""));
|
||||||
"To execute this query, it's necessary to have the grant {} WITH GRANT OPTION",
|
};
|
||||||
AccessRightsElement{flags, args...}.toStringWithoutOptions());
|
|
||||||
|
/// As we check the SOURCES from the Table Engine logic, direct prompt about Table Engine would be misleading
|
||||||
|
/// since SOURCES is not granted actually. In order to solve this, turn the prompt logic back to Sources.
|
||||||
|
if (flags & AccessType::TABLE_ENGINE && !access_control->doesTableEnginesRequireGrant())
|
||||||
|
{
|
||||||
|
AccessFlags new_flags;
|
||||||
|
|
||||||
|
String table_engine_name{getTableEngine(args...)};
|
||||||
|
for (const auto & source_and_table_engine : source_and_table_engines)
|
||||||
|
{
|
||||||
|
const auto & table_engine = std::get<1>(source_and_table_engine);
|
||||||
|
if (table_engine != table_engine_name) continue;
|
||||||
|
const auto & source = std::get<0>(source_and_table_engine);
|
||||||
|
/// Set the flags from Table Engine to SOURCES so that prompts can be meaningful.
|
||||||
|
new_flags = source;
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Might happen in the case of grant Table Engine on A (but not source), then revoke A.
|
||||||
|
if (new_flags.isEmpty())
|
||||||
|
return access_denied_no_grant(flags, args...);
|
||||||
|
|
||||||
|
if (grant_option && acs->isGranted(flags, args...))
|
||||||
|
{
|
||||||
|
return access_denied(ErrorCodes::ACCESS_DENIED,
|
||||||
|
"{}: Not enough privileges. "
|
||||||
|
"The required privileges have been granted, but without grant option. "
|
||||||
|
"To execute this query, it's necessary to have the grant {} WITH GRANT OPTION",
|
||||||
|
AccessRightsElement{new_flags}.toStringForAccessTypeSource());
|
||||||
|
}
|
||||||
|
|
||||||
|
return access_denied(ErrorCodes::ACCESS_DENIED,
|
||||||
|
"{}: Not enough privileges. To execute this query, it's necessary to have the grant {}",
|
||||||
|
AccessRightsElement{new_flags}.toStringForAccessTypeSource() + (grant_option ? " WITH GRANT OPTION" : ""));
|
||||||
}
|
}
|
||||||
|
|
||||||
return access_denied(ErrorCodes::ACCESS_DENIED,
|
return access_denied_no_grant(flags, args...);
|
||||||
"{}: Not enough privileges. To execute this query, it's necessary to have the grant {}",
|
|
||||||
AccessRightsElement{flags, args...}.toStringWithoutOptions() + (grant_option ? " WITH GRANT OPTION" : ""));
|
|
||||||
}
|
}
|
||||||
|
|
||||||
struct PrecalculatedFlags
|
struct PrecalculatedFlags
|
||||||
|
@ -1,12 +1,12 @@
|
|||||||
#include <cassert>
|
|
||||||
#include <memory>
|
|
||||||
|
|
||||||
#include <IO/WriteHelpers.h>
|
#include <IO/WriteHelpers.h>
|
||||||
#include <IO/ReadHelpers.h>
|
#include <IO/ReadHelpers.h>
|
||||||
#include <IO/ReadHelpersArena.h>
|
#include <IO/ReadHelpersArena.h>
|
||||||
|
|
||||||
#include <DataTypes/DataTypeArray.h>
|
#include <DataTypes/DataTypeArray.h>
|
||||||
#include <DataTypes/DataTypesNumber.h>
|
#include <DataTypes/DataTypeDate.h>
|
||||||
|
#include <DataTypes/DataTypeDate32.h>
|
||||||
|
#include <DataTypes/DataTypeDateTime.h>
|
||||||
|
#include <DataTypes/DataTypeDateTime64.h>
|
||||||
#include <DataTypes/DataTypeString.h>
|
#include <DataTypes/DataTypeString.h>
|
||||||
|
|
||||||
#include <Columns/ColumnArray.h>
|
#include <Columns/ColumnArray.h>
|
||||||
@ -15,18 +15,14 @@
|
|||||||
#include <Common/HashTable/HashTableKeyHolder.h>
|
#include <Common/HashTable/HashTableKeyHolder.h>
|
||||||
#include <Common/assert_cast.h>
|
#include <Common/assert_cast.h>
|
||||||
|
|
||||||
#include <AggregateFunctions/IAggregateFunction.h>
|
|
||||||
#include <AggregateFunctions/KeyHolderHelpers.h>
|
|
||||||
|
|
||||||
#include <Core/Field.h>
|
#include <Core/Field.h>
|
||||||
|
|
||||||
#include <AggregateFunctions/AggregateFunctionFactory.h>
|
#include <AggregateFunctions/AggregateFunctionFactory.h>
|
||||||
#include <AggregateFunctions/Helpers.h>
|
|
||||||
#include <AggregateFunctions/FactoryHelpers.h>
|
#include <AggregateFunctions/FactoryHelpers.h>
|
||||||
#include <DataTypes/DataTypeDate.h>
|
#include <AggregateFunctions/Helpers.h>
|
||||||
#include <DataTypes/DataTypeDate32.h>
|
#include <AggregateFunctions/IAggregateFunction.h>
|
||||||
#include <DataTypes/DataTypeDateTime.h>
|
|
||||||
#include <DataTypes/DataTypeDateTime64.h>
|
#include <memory>
|
||||||
|
|
||||||
|
|
||||||
namespace DB
|
namespace DB
|
||||||
@ -51,7 +47,7 @@ struct AggregateFunctionGroupArrayIntersectData
|
|||||||
};
|
};
|
||||||
|
|
||||||
|
|
||||||
/// Puts all values to the hash set. Returns an array of unique values. Implemented for numeric types.
|
/// Puts all values to the hash set. Returns an array of unique values present in all inputs. Implemented for numeric types.
|
||||||
template <typename T>
|
template <typename T>
|
||||||
class AggregateFunctionGroupArrayIntersect
|
class AggregateFunctionGroupArrayIntersect
|
||||||
: public IAggregateFunctionDataHelper<AggregateFunctionGroupArrayIntersectData<T>, AggregateFunctionGroupArrayIntersect<T>>
|
: public IAggregateFunctionDataHelper<AggregateFunctionGroupArrayIntersectData<T>, AggregateFunctionGroupArrayIntersect<T>>
|
||||||
@ -69,7 +65,7 @@ public:
|
|||||||
: IAggregateFunctionDataHelper<AggregateFunctionGroupArrayIntersectData<T>,
|
: IAggregateFunctionDataHelper<AggregateFunctionGroupArrayIntersectData<T>,
|
||||||
AggregateFunctionGroupArrayIntersect<T>>({argument_type}, parameters_, result_type_) {}
|
AggregateFunctionGroupArrayIntersect<T>>({argument_type}, parameters_, result_type_) {}
|
||||||
|
|
||||||
String getName() const override { return "GroupArrayIntersect"; }
|
String getName() const override { return "groupArrayIntersect"; }
|
||||||
|
|
||||||
bool allocatesMemoryInArena() const override { return false; }
|
bool allocatesMemoryInArena() const override { return false; }
|
||||||
|
|
||||||
@ -158,7 +154,7 @@ public:
|
|||||||
set.reserve(size);
|
set.reserve(size);
|
||||||
for (size_t i = 0; i < size; ++i)
|
for (size_t i = 0; i < size; ++i)
|
||||||
{
|
{
|
||||||
int key;
|
T key;
|
||||||
readIntBinary(key, buf);
|
readIntBinary(key, buf);
|
||||||
set.insert(key);
|
set.insert(key);
|
||||||
}
|
}
|
||||||
@ -213,7 +209,7 @@ public:
|
|||||||
: IAggregateFunctionDataHelper<AggregateFunctionGroupArrayIntersectGenericData, AggregateFunctionGroupArrayIntersectGeneric<is_plain_column>>({input_data_type_}, parameters_, result_type_)
|
: IAggregateFunctionDataHelper<AggregateFunctionGroupArrayIntersectGenericData, AggregateFunctionGroupArrayIntersectGeneric<is_plain_column>>({input_data_type_}, parameters_, result_type_)
|
||||||
, input_data_type(result_type_) {}
|
, input_data_type(result_type_) {}
|
||||||
|
|
||||||
String getName() const override { return "GroupArrayIntersect"; }
|
String getName() const override { return "groupArrayIntersect"; }
|
||||||
|
|
||||||
bool allocatesMemoryInArena() const override { return true; }
|
bool allocatesMemoryInArena() const override { return true; }
|
||||||
|
|
||||||
@ -240,7 +236,7 @@ public:
|
|||||||
{
|
{
|
||||||
const char * begin = nullptr;
|
const char * begin = nullptr;
|
||||||
StringRef serialized = data_column->serializeValueIntoArena(offset + i, *arena, begin);
|
StringRef serialized = data_column->serializeValueIntoArena(offset + i, *arena, begin);
|
||||||
assert(serialized.data != nullptr);
|
chassert(serialized.data != nullptr);
|
||||||
set.emplace(SerializedKeyHolder{serialized, *arena}, it, inserted);
|
set.emplace(SerializedKeyHolder{serialized, *arena}, it, inserted);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -260,7 +256,7 @@ public:
|
|||||||
{
|
{
|
||||||
const char * begin = nullptr;
|
const char * begin = nullptr;
|
||||||
StringRef serialized = data_column->serializeValueIntoArena(offset + i, *arena, begin);
|
StringRef serialized = data_column->serializeValueIntoArena(offset + i, *arena, begin);
|
||||||
assert(serialized.data != nullptr);
|
chassert(serialized.data != nullptr);
|
||||||
it = set.find(serialized);
|
it = set.find(serialized);
|
||||||
|
|
||||||
if (it != nullptr)
|
if (it != nullptr)
|
||||||
|
@ -195,7 +195,7 @@ bool SingleValueDataFixed<T>::isEqualTo(const IColumn & column, size_t index) co
|
|||||||
template <typename T>
|
template <typename T>
|
||||||
bool SingleValueDataFixed<T>::isEqualTo(const SingleValueDataFixed<T> & to) const
|
bool SingleValueDataFixed<T>::isEqualTo(const SingleValueDataFixed<T> & to) const
|
||||||
{
|
{
|
||||||
return has() && to.value == value;
|
return has() && to.has() && to.value == value;
|
||||||
}
|
}
|
||||||
|
|
||||||
template <typename T>
|
template <typename T>
|
||||||
@ -904,6 +904,7 @@ bool SingleValueDataNumeric<T>::isEqualTo(const DB::IColumn & column, size_t ind
|
|||||||
template <typename T>
|
template <typename T>
|
||||||
bool SingleValueDataNumeric<T>::isEqualTo(const DB::SingleValueDataBase & to) const
|
bool SingleValueDataNumeric<T>::isEqualTo(const DB::SingleValueDataBase & to) const
|
||||||
{
|
{
|
||||||
|
/// to.has() is checked in memory.get().isEqualTo
|
||||||
auto const & other = assert_cast<const Self &>(to);
|
auto const & other = assert_cast<const Self &>(to);
|
||||||
return memory.get().isEqualTo(other.memory.get());
|
return memory.get().isEqualTo(other.memory.get());
|
||||||
}
|
}
|
||||||
@ -917,6 +918,7 @@ void SingleValueDataNumeric<T>::set(const DB::IColumn & column, size_t row_num,
|
|||||||
template <typename T>
|
template <typename T>
|
||||||
void SingleValueDataNumeric<T>::set(const DB::SingleValueDataBase & to, DB::Arena * arena)
|
void SingleValueDataNumeric<T>::set(const DB::SingleValueDataBase & to, DB::Arena * arena)
|
||||||
{
|
{
|
||||||
|
/// to.has() is checked in memory.get().set
|
||||||
auto const & other = assert_cast<const Self &>(to);
|
auto const & other = assert_cast<const Self &>(to);
|
||||||
return memory.get().set(other.memory.get(), arena);
|
return memory.get().set(other.memory.get(), arena);
|
||||||
}
|
}
|
||||||
@ -924,6 +926,7 @@ void SingleValueDataNumeric<T>::set(const DB::SingleValueDataBase & to, DB::Aren
|
|||||||
template <typename T>
|
template <typename T>
|
||||||
bool SingleValueDataNumeric<T>::setIfSmaller(const DB::SingleValueDataBase & to, DB::Arena * arena)
|
bool SingleValueDataNumeric<T>::setIfSmaller(const DB::SingleValueDataBase & to, DB::Arena * arena)
|
||||||
{
|
{
|
||||||
|
/// to.has() is checked in memory.get().setIfSmaller
|
||||||
auto const & other = assert_cast<const Self &>(to);
|
auto const & other = assert_cast<const Self &>(to);
|
||||||
return memory.get().setIfSmaller(other.memory.get(), arena);
|
return memory.get().setIfSmaller(other.memory.get(), arena);
|
||||||
}
|
}
|
||||||
@ -931,6 +934,7 @@ bool SingleValueDataNumeric<T>::setIfSmaller(const DB::SingleValueDataBase & to,
|
|||||||
template <typename T>
|
template <typename T>
|
||||||
bool SingleValueDataNumeric<T>::setIfGreater(const DB::SingleValueDataBase & to, DB::Arena * arena)
|
bool SingleValueDataNumeric<T>::setIfGreater(const DB::SingleValueDataBase & to, DB::Arena * arena)
|
||||||
{
|
{
|
||||||
|
/// to.has() is checked in memory.get().setIfGreater
|
||||||
auto const & other = assert_cast<const Self &>(to);
|
auto const & other = assert_cast<const Self &>(to);
|
||||||
return memory.get().setIfGreater(other.memory.get(), arena);
|
return memory.get().setIfGreater(other.memory.get(), arena);
|
||||||
}
|
}
|
||||||
@ -1191,7 +1195,7 @@ bool SingleValueDataString::isEqualTo(const DB::IColumn & column, size_t row_num
|
|||||||
bool SingleValueDataString::isEqualTo(const SingleValueDataBase & other) const
|
bool SingleValueDataString::isEqualTo(const SingleValueDataBase & other) const
|
||||||
{
|
{
|
||||||
auto const & to = assert_cast<const Self &>(other);
|
auto const & to = assert_cast<const Self &>(other);
|
||||||
return has() && to.getStringRef() == getStringRef();
|
return has() && to.has() && to.getStringRef() == getStringRef();
|
||||||
}
|
}
|
||||||
|
|
||||||
void SingleValueDataString::set(const IColumn & column, size_t row_num, Arena * arena)
|
void SingleValueDataString::set(const IColumn & column, size_t row_num, Arena * arena)
|
||||||
@ -1291,7 +1295,7 @@ bool SingleValueDataGeneric::isEqualTo(const IColumn & column, size_t row_num) c
|
|||||||
bool SingleValueDataGeneric::isEqualTo(const DB::SingleValueDataBase & other) const
|
bool SingleValueDataGeneric::isEqualTo(const DB::SingleValueDataBase & other) const
|
||||||
{
|
{
|
||||||
auto const & to = assert_cast<const Self &>(other);
|
auto const & to = assert_cast<const Self &>(other);
|
||||||
return has() && to.value == value;
|
return has() && to.has() && to.value == value;
|
||||||
}
|
}
|
||||||
|
|
||||||
void SingleValueDataGeneric::set(const IColumn & column, size_t row_num, Arena *)
|
void SingleValueDataGeneric::set(const IColumn & column, size_t row_num, Arena *)
|
||||||
|
@ -67,6 +67,9 @@ struct UniqVariadicHash<false, true>
|
|||||||
{
|
{
|
||||||
static UInt64 apply(size_t num_args, const IColumn ** columns, size_t row_num)
|
static UInt64 apply(size_t num_args, const IColumn ** columns, size_t row_num)
|
||||||
{
|
{
|
||||||
|
if (!num_args)
|
||||||
|
return 0;
|
||||||
|
|
||||||
UInt64 hash;
|
UInt64 hash;
|
||||||
|
|
||||||
const auto & tuple_columns = assert_cast<const ColumnTuple *>(columns[0])->getColumns();
|
const auto & tuple_columns = assert_cast<const ColumnTuple *>(columns[0])->getColumns();
|
||||||
|
@ -1,2 +1,2 @@
|
|||||||
clickhouse_add_executable(aggregate_function_state_deserialization_fuzzer aggregate_function_state_deserialization_fuzzer.cpp ${SRCS})
|
clickhouse_add_executable(aggregate_function_state_deserialization_fuzzer aggregate_function_state_deserialization_fuzzer.cpp ${SRCS})
|
||||||
target_link_libraries(aggregate_function_state_deserialization_fuzzer PRIVATE dbms clickhouse_aggregate_functions)
|
target_link_libraries(aggregate_function_state_deserialization_fuzzer PRIVATE dbms clickhouse_aggregate_functions clickhouse_functions)
|
||||||
|
@ -12,38 +12,36 @@
|
|||||||
|
|
||||||
#include <Interpreters/Context.h>
|
#include <Interpreters/Context.h>
|
||||||
|
|
||||||
|
#include <AggregateFunctions/IAggregateFunction.h>
|
||||||
#include <AggregateFunctions/registerAggregateFunctions.h>
|
#include <AggregateFunctions/registerAggregateFunctions.h>
|
||||||
|
|
||||||
#include <base/scope_guard.h>
|
#include <base/scope_guard.h>
|
||||||
|
|
||||||
|
using namespace DB;
|
||||||
|
|
||||||
|
|
||||||
|
ContextMutablePtr context;
|
||||||
|
|
||||||
|
extern "C" int LLVMFuzzerInitialize(int *, char ***)
|
||||||
|
{
|
||||||
|
if (context)
|
||||||
|
return true;
|
||||||
|
|
||||||
|
SharedContextHolder shared_context = Context::createShared();
|
||||||
|
context = Context::createGlobal(shared_context.get());
|
||||||
|
context->makeGlobalContext();
|
||||||
|
|
||||||
|
MainThreadStatus::getInstance();
|
||||||
|
|
||||||
|
registerAggregateFunctions();
|
||||||
|
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
extern "C" int LLVMFuzzerTestOneInput(const uint8_t * data, size_t size)
|
extern "C" int LLVMFuzzerTestOneInput(const uint8_t * data, size_t size)
|
||||||
{
|
{
|
||||||
try
|
try
|
||||||
{
|
{
|
||||||
using namespace DB;
|
|
||||||
|
|
||||||
static SharedContextHolder shared_context;
|
|
||||||
static ContextMutablePtr context;
|
|
||||||
|
|
||||||
auto initialize = [&]() mutable
|
|
||||||
{
|
|
||||||
if (context)
|
|
||||||
return true;
|
|
||||||
|
|
||||||
shared_context = Context::createShared();
|
|
||||||
context = Context::createGlobal(shared_context.get());
|
|
||||||
context->makeGlobalContext();
|
|
||||||
context->setApplicationType(Context::ApplicationType::LOCAL);
|
|
||||||
|
|
||||||
MainThreadStatus::getInstance();
|
|
||||||
|
|
||||||
registerAggregateFunctions();
|
|
||||||
return true;
|
|
||||||
};
|
|
||||||
|
|
||||||
static bool initialized = initialize();
|
|
||||||
(void) initialized;
|
|
||||||
|
|
||||||
total_memory_tracker.resetCounters();
|
total_memory_tracker.resetCounters();
|
||||||
total_memory_tracker.setHardLimit(1_GiB);
|
total_memory_tracker.setHardLimit(1_GiB);
|
||||||
CurrentThread::get().memory_tracker.resetCounters();
|
CurrentThread::get().memory_tracker.resetCounters();
|
||||||
|
@ -24,7 +24,7 @@ void InterpolateNode::dumpTreeImpl(WriteBuffer & buffer, FormatState & format_st
|
|||||||
{
|
{
|
||||||
buffer << std::string(indent, ' ') << "INTERPOLATE id: " << format_state.getNodeId(this);
|
buffer << std::string(indent, ' ') << "INTERPOLATE id: " << format_state.getNodeId(this);
|
||||||
|
|
||||||
buffer << '\n' << std::string(indent + 2, ' ') << "EXPRESSION\n";
|
buffer << '\n' << std::string(indent + 2, ' ') << "EXPRESSION " << expression_name << " \n";
|
||||||
getExpression()->dumpTreeImpl(buffer, format_state, indent + 4);
|
getExpression()->dumpTreeImpl(buffer, format_state, indent + 4);
|
||||||
|
|
||||||
buffer << '\n' << std::string(indent + 2, ' ') << "INTERPOLATE_EXPRESSION\n";
|
buffer << '\n' << std::string(indent + 2, ' ') << "INTERPOLATE_EXPRESSION\n";
|
||||||
|
@ -50,6 +50,8 @@ public:
|
|||||||
return QueryTreeNodeType::INTERPOLATE;
|
return QueryTreeNodeType::INTERPOLATE;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
const std::string & getExpressionName() const { return expression_name; }
|
||||||
|
|
||||||
void dumpTreeImpl(WriteBuffer & buffer, FormatState & format_state, size_t indent) const override;
|
void dumpTreeImpl(WriteBuffer & buffer, FormatState & format_state, size_t indent) const override;
|
||||||
|
|
||||||
protected:
|
protected:
|
||||||
|
@ -68,10 +68,13 @@ QueryTreeNodePtr findEqualsFunction(const QueryTreeNodes & nodes)
|
|||||||
return nullptr;
|
return nullptr;
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Checks if the node is combination of isNull and notEquals functions of two the same arguments
|
/// Checks if the node is combination of isNull and notEquals functions of two the same arguments:
|
||||||
|
/// [ (a <> b AND) ] (a IS NULL) AND (b IS NULL)
|
||||||
bool matchIsNullOfTwoArgs(const QueryTreeNodes & nodes, QueryTreeNodePtr & lhs, QueryTreeNodePtr & rhs)
|
bool matchIsNullOfTwoArgs(const QueryTreeNodes & nodes, QueryTreeNodePtr & lhs, QueryTreeNodePtr & rhs)
|
||||||
{
|
{
|
||||||
QueryTreeNodePtrWithHashSet all_arguments;
|
QueryTreeNodePtrWithHashSet all_arguments;
|
||||||
|
QueryTreeNodePtrWithHashSet is_null_arguments;
|
||||||
|
|
||||||
for (const auto & node : nodes)
|
for (const auto & node : nodes)
|
||||||
{
|
{
|
||||||
const auto * func_node = node->as<FunctionNode>();
|
const auto * func_node = node->as<FunctionNode>();
|
||||||
@ -80,7 +83,11 @@ bool matchIsNullOfTwoArgs(const QueryTreeNodes & nodes, QueryTreeNodePtr & lhs,
|
|||||||
|
|
||||||
const auto & arguments = func_node->getArguments().getNodes();
|
const auto & arguments = func_node->getArguments().getNodes();
|
||||||
if (func_node->getFunctionName() == "isNull" && arguments.size() == 1)
|
if (func_node->getFunctionName() == "isNull" && arguments.size() == 1)
|
||||||
|
{
|
||||||
all_arguments.insert(QueryTreeNodePtrWithHash(arguments[0]));
|
all_arguments.insert(QueryTreeNodePtrWithHash(arguments[0]));
|
||||||
|
is_null_arguments.insert(QueryTreeNodePtrWithHash(arguments[0]));
|
||||||
|
}
|
||||||
|
|
||||||
else if (func_node->getFunctionName() == "notEquals" && arguments.size() == 2)
|
else if (func_node->getFunctionName() == "notEquals" && arguments.size() == 2)
|
||||||
{
|
{
|
||||||
if (arguments[0]->isEqual(*arguments[1]))
|
if (arguments[0]->isEqual(*arguments[1]))
|
||||||
@ -95,7 +102,7 @@ bool matchIsNullOfTwoArgs(const QueryTreeNodes & nodes, QueryTreeNodePtr & lhs,
|
|||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (all_arguments.size() != 2)
|
if (all_arguments.size() != 2 || is_null_arguments.size() != 2)
|
||||||
return false;
|
return false;
|
||||||
|
|
||||||
lhs = all_arguments.begin()->node;
|
lhs = all_arguments.begin()->node;
|
||||||
|
@ -2,6 +2,7 @@
|
|||||||
|
|
||||||
#include <DataTypes/DataTypesNumber.h>
|
#include <DataTypes/DataTypesNumber.h>
|
||||||
#include <DataTypes/DataTypeNullable.h>
|
#include <DataTypes/DataTypeNullable.h>
|
||||||
|
#include <DataTypes/DataTypeAggregateFunction.h>
|
||||||
|
|
||||||
#include <AggregateFunctions/AggregateFunctionFactory.h>
|
#include <AggregateFunctions/AggregateFunctionFactory.h>
|
||||||
#include <AggregateFunctions/IAggregateFunction.h>
|
#include <AggregateFunctions/IAggregateFunction.h>
|
||||||
@ -42,7 +43,7 @@ public:
|
|||||||
if (lower_name.ends_with("if"))
|
if (lower_name.ends_with("if"))
|
||||||
return;
|
return;
|
||||||
|
|
||||||
auto & function_arguments_nodes = function_node->getArguments().getNodes();
|
const auto & function_arguments_nodes = function_node->getArguments().getNodes();
|
||||||
if (function_arguments_nodes.size() != 1)
|
if (function_arguments_nodes.size() != 1)
|
||||||
return;
|
return;
|
||||||
|
|
||||||
@ -50,6 +51,8 @@ public:
|
|||||||
if (!if_node || if_node->getFunctionName() != "if")
|
if (!if_node || if_node->getFunctionName() != "if")
|
||||||
return;
|
return;
|
||||||
|
|
||||||
|
FunctionNodePtr replaced_node;
|
||||||
|
|
||||||
auto if_arguments_nodes = if_node->getArguments().getNodes();
|
auto if_arguments_nodes = if_node->getArguments().getNodes();
|
||||||
auto * first_const_node = if_arguments_nodes[1]->as<ConstantNode>();
|
auto * first_const_node = if_arguments_nodes[1]->as<ConstantNode>();
|
||||||
auto * second_const_node = if_arguments_nodes[2]->as<ConstantNode>();
|
auto * second_const_node = if_arguments_nodes[2]->as<ConstantNode>();
|
||||||
@ -75,8 +78,11 @@ public:
|
|||||||
new_arguments[0] = std::move(if_arguments_nodes[1]);
|
new_arguments[0] = std::move(if_arguments_nodes[1]);
|
||||||
|
|
||||||
new_arguments[1] = std::move(if_arguments_nodes[0]);
|
new_arguments[1] = std::move(if_arguments_nodes[0]);
|
||||||
function_arguments_nodes = std::move(new_arguments);
|
|
||||||
resolveAggregateFunctionNodeByName(*function_node, function_node->getFunctionName() + "If");
|
replaced_node = std::make_shared<FunctionNode>(function_node->getFunctionName() + "If");
|
||||||
|
replaced_node->getArguments().getNodes() = std::move(new_arguments);
|
||||||
|
replaced_node->getParameters().getNodes() = function_node->getParameters().getNodes();
|
||||||
|
resolveAggregateFunctionNodeByName(*replaced_node, replaced_node->getFunctionName());
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
else if (first_const_node)
|
else if (first_const_node)
|
||||||
@ -104,10 +110,26 @@ public:
|
|||||||
FunctionFactory::instance().get("not", getContext())->build(not_function->getArgumentColumns()));
|
FunctionFactory::instance().get("not", getContext())->build(not_function->getArgumentColumns()));
|
||||||
new_arguments[1] = std::move(not_function);
|
new_arguments[1] = std::move(not_function);
|
||||||
|
|
||||||
function_arguments_nodes = std::move(new_arguments);
|
replaced_node = std::make_shared<FunctionNode>(function_node->getFunctionName() + "If");
|
||||||
resolveAggregateFunctionNodeByName(*function_node, function_node->getFunctionName() + "If");
|
replaced_node->getArguments().getNodes() = std::move(new_arguments);
|
||||||
|
replaced_node->getParameters().getNodes() = function_node->getParameters().getNodes();
|
||||||
|
resolveAggregateFunctionNodeByName(*replaced_node, replaced_node->getFunctionName());
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if (!replaced_node)
|
||||||
|
return;
|
||||||
|
|
||||||
|
auto prev_type = function_node->getResultType();
|
||||||
|
auto curr_type = replaced_node->getResultType();
|
||||||
|
if (!prev_type->equals(*curr_type))
|
||||||
|
return;
|
||||||
|
|
||||||
|
/// Just in case, CAST compatible aggregate function states.
|
||||||
|
if (WhichDataType(prev_type).isAggregateFunction() && !DataTypeAggregateFunction::strictEquals(prev_type, curr_type))
|
||||||
|
node = createCastFunction(std::move(replaced_node), prev_type, getContext());
|
||||||
|
else
|
||||||
|
node = std::move(replaced_node);
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
|
@ -237,7 +237,7 @@ QueryTreeNodePtr QueryTreeBuilder::buildSelectExpression(const ASTPtr & select_q
|
|||||||
/// Remove global settings limit and offset
|
/// Remove global settings limit and offset
|
||||||
if (const auto & settings_ref = updated_context->getSettingsRef(); settings_ref.limit || settings_ref.offset)
|
if (const auto & settings_ref = updated_context->getSettingsRef(); settings_ref.limit || settings_ref.offset)
|
||||||
{
|
{
|
||||||
Settings settings = updated_context->getSettings();
|
Settings settings = updated_context->getSettingsCopy();
|
||||||
limit = settings.limit;
|
limit = settings.limit;
|
||||||
offset = settings.offset;
|
offset = settings.offset;
|
||||||
settings.limit = 0;
|
settings.limit = 0;
|
||||||
@ -268,6 +268,8 @@ QueryTreeNodePtr QueryTreeBuilder::buildSelectExpression(const ASTPtr & select_q
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
const auto enable_order_by_all = updated_context->getSettingsRef().enable_order_by_all;
|
||||||
|
|
||||||
auto current_query_tree = std::make_shared<QueryNode>(std::move(updated_context), std::move(settings_changes));
|
auto current_query_tree = std::make_shared<QueryNode>(std::move(updated_context), std::move(settings_changes));
|
||||||
|
|
||||||
current_query_tree->setIsSubquery(is_subquery);
|
current_query_tree->setIsSubquery(is_subquery);
|
||||||
@ -281,7 +283,10 @@ QueryTreeNodePtr QueryTreeBuilder::buildSelectExpression(const ASTPtr & select_q
|
|||||||
current_query_tree->setIsGroupByWithRollup(select_query_typed.group_by_with_rollup);
|
current_query_tree->setIsGroupByWithRollup(select_query_typed.group_by_with_rollup);
|
||||||
current_query_tree->setIsGroupByWithGroupingSets(select_query_typed.group_by_with_grouping_sets);
|
current_query_tree->setIsGroupByWithGroupingSets(select_query_typed.group_by_with_grouping_sets);
|
||||||
current_query_tree->setIsGroupByAll(select_query_typed.group_by_all);
|
current_query_tree->setIsGroupByAll(select_query_typed.group_by_all);
|
||||||
current_query_tree->setIsOrderByAll(select_query_typed.order_by_all);
|
/// order_by_all flag in AST is set w/o consideration of `enable_order_by_all` setting
|
||||||
|
/// since SETTINGS section has not been parsed yet, - so, check the setting here
|
||||||
|
if (enable_order_by_all)
|
||||||
|
current_query_tree->setIsOrderByAll(select_query_typed.order_by_all);
|
||||||
current_query_tree->setOriginalAST(select_query);
|
current_query_tree->setOriginalAST(select_query);
|
||||||
|
|
||||||
auto current_context = current_query_tree->getContext();
|
auto current_context = current_query_tree->getContext();
|
||||||
|
@ -64,6 +64,8 @@
|
|||||||
#include <Analyzer/Resolve/TableExpressionsAliasVisitor.h>
|
#include <Analyzer/Resolve/TableExpressionsAliasVisitor.h>
|
||||||
#include <Analyzer/Resolve/ReplaceColumnsVisitor.h>
|
#include <Analyzer/Resolve/ReplaceColumnsVisitor.h>
|
||||||
|
|
||||||
|
#include <Planner/PlannerActionsVisitor.h>
|
||||||
|
|
||||||
#include <Core/Settings.h>
|
#include <Core/Settings.h>
|
||||||
|
|
||||||
namespace ProfileEvents
|
namespace ProfileEvents
|
||||||
@ -503,7 +505,7 @@ void QueryAnalyzer::evaluateScalarSubqueryIfNeeded(QueryTreeNodePtr & node, Iden
|
|||||||
ProfileEvents::increment(ProfileEvents::ScalarSubqueriesCacheMiss);
|
ProfileEvents::increment(ProfileEvents::ScalarSubqueriesCacheMiss);
|
||||||
auto subquery_context = Context::createCopy(context);
|
auto subquery_context = Context::createCopy(context);
|
||||||
|
|
||||||
Settings subquery_settings = context->getSettings();
|
Settings subquery_settings = context->getSettingsCopy();
|
||||||
subquery_settings.max_result_rows = 1;
|
subquery_settings.max_result_rows = 1;
|
||||||
subquery_settings.extremes = false;
|
subquery_settings.extremes = false;
|
||||||
subquery_context->setSettings(subquery_settings);
|
subquery_context->setSettings(subquery_settings);
|
||||||
@ -1740,7 +1742,7 @@ QueryAnalyzer::QueryTreeNodesWithNames QueryAnalyzer::resolveQualifiedMatcher(Qu
|
|||||||
const auto * tuple_data_type = typeid_cast<const DataTypeTuple *>(result_type.get());
|
const auto * tuple_data_type = typeid_cast<const DataTypeTuple *>(result_type.get());
|
||||||
if (!tuple_data_type)
|
if (!tuple_data_type)
|
||||||
throw Exception(ErrorCodes::UNSUPPORTED_METHOD,
|
throw Exception(ErrorCodes::UNSUPPORTED_METHOD,
|
||||||
"Qualified matcher {} find non compound expression {} with type {}. Expected tuple or array of tuples. In scope {}",
|
"Qualified matcher {} found a non-compound expression {} with type {}. Expected a tuple or an array of tuples. In scope {}",
|
||||||
matcher_node->formatASTForErrorMessage(),
|
matcher_node->formatASTForErrorMessage(),
|
||||||
expression_query_tree_node->formatASTForErrorMessage(),
|
expression_query_tree_node->formatASTForErrorMessage(),
|
||||||
expression_query_tree_node->getResultType()->getName(),
|
expression_query_tree_node->getResultType()->getName(),
|
||||||
@ -4122,11 +4124,7 @@ void QueryAnalyzer::resolveInterpolateColumnsNodeList(QueryTreeNodePtr & interpo
|
|||||||
{
|
{
|
||||||
auto & interpolate_node_typed = interpolate_node->as<InterpolateNode &>();
|
auto & interpolate_node_typed = interpolate_node->as<InterpolateNode &>();
|
||||||
|
|
||||||
auto * column_to_interpolate = interpolate_node_typed.getExpression()->as<IdentifierNode>();
|
auto column_to_interpolate_name = interpolate_node_typed.getExpressionName();
|
||||||
if (!column_to_interpolate)
|
|
||||||
throw Exception(ErrorCodes::LOGICAL_ERROR, "INTERPOLATE can work only for indentifiers, but {} is found",
|
|
||||||
interpolate_node_typed.getExpression()->formatASTForErrorMessage());
|
|
||||||
auto column_to_interpolate_name = column_to_interpolate->getIdentifier().getFullName();
|
|
||||||
|
|
||||||
resolveExpressionNode(interpolate_node_typed.getExpression(), scope, false /*allow_lambda_expression*/, false /*allow_table_expression*/);
|
resolveExpressionNode(interpolate_node_typed.getExpression(), scope, false /*allow_lambda_expression*/, false /*allow_table_expression*/);
|
||||||
|
|
||||||
@ -4135,14 +4133,11 @@ void QueryAnalyzer::resolveInterpolateColumnsNodeList(QueryTreeNodePtr & interpo
|
|||||||
auto & interpolation_to_resolve = interpolate_node_typed.getInterpolateExpression();
|
auto & interpolation_to_resolve = interpolate_node_typed.getInterpolateExpression();
|
||||||
IdentifierResolveScope interpolate_scope(interpolation_to_resolve, &scope /*parent_scope*/);
|
IdentifierResolveScope interpolate_scope(interpolation_to_resolve, &scope /*parent_scope*/);
|
||||||
|
|
||||||
auto fake_column_node = std::make_shared<ColumnNode>(NameAndTypePair(column_to_interpolate_name, interpolate_node_typed.getExpression()->getResultType()), interpolate_node_typed.getExpression());
|
auto fake_column_node = std::make_shared<ColumnNode>(NameAndTypePair(column_to_interpolate_name, interpolate_node_typed.getExpression()->getResultType()), interpolate_node);
|
||||||
if (is_column_constant)
|
if (is_column_constant)
|
||||||
interpolate_scope.expression_argument_name_to_node.emplace(column_to_interpolate_name, fake_column_node);
|
interpolate_scope.expression_argument_name_to_node.emplace(column_to_interpolate_name, fake_column_node);
|
||||||
|
|
||||||
resolveExpressionNode(interpolation_to_resolve, interpolate_scope, false /*allow_lambda_expression*/, false /*allow_table_expression*/);
|
resolveExpressionNode(interpolation_to_resolve, interpolate_scope, false /*allow_lambda_expression*/, false /*allow_table_expression*/);
|
||||||
|
|
||||||
if (is_column_constant)
|
|
||||||
interpolation_to_resolve = interpolation_to_resolve->cloneAndReplace(fake_column_node, interpolate_node_typed.getExpression());
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -43,6 +43,12 @@ size_t getCompoundTypeDepth(const IDataType & type)
|
|||||||
const auto & tuple_elements = assert_cast<const DataTypeTuple &>(*current_type).getElements();
|
const auto & tuple_elements = assert_cast<const DataTypeTuple &>(*current_type).getElements();
|
||||||
if (!tuple_elements.empty())
|
if (!tuple_elements.empty())
|
||||||
current_type = tuple_elements.at(0).get();
|
current_type = tuple_elements.at(0).get();
|
||||||
|
else
|
||||||
|
{
|
||||||
|
/// Special case: tuple with no element - tuple(). In this case, what's the compound type depth?
|
||||||
|
/// I'm not certain about the theoretical answer, but from experiment, 1 is the most reasonable choice.
|
||||||
|
return 1;
|
||||||
|
}
|
||||||
|
|
||||||
++result;
|
++result;
|
||||||
}
|
}
|
||||||
|
@ -867,7 +867,7 @@ void updateContextForSubqueryExecution(ContextMutablePtr & mutable_context)
|
|||||||
* max_rows_in_join, max_bytes_in_join, join_overflow_mode,
|
* max_rows_in_join, max_bytes_in_join, join_overflow_mode,
|
||||||
* which are checked separately (in the Set, Join objects).
|
* which are checked separately (in the Set, Join objects).
|
||||||
*/
|
*/
|
||||||
Settings subquery_settings = mutable_context->getSettings();
|
Settings subquery_settings = mutable_context->getSettingsCopy();
|
||||||
subquery_settings.max_result_rows = 0;
|
subquery_settings.max_result_rows = 0;
|
||||||
subquery_settings.max_result_bytes = 0;
|
subquery_settings.max_result_bytes = 0;
|
||||||
/// The calculation of extremes does not make sense and is not necessary (if you do it, then the extremes of the subquery can be taken for whole query).
|
/// The calculation of extremes does not make sense and is not necessary (if you do it, then the extremes of the subquery can be taken for whole query).
|
||||||
|
@ -384,6 +384,7 @@ BackupsWorker::BackupsWorker(ContextMutablePtr global_context, size_t num_backup
|
|||||||
, allow_concurrent_backups(global_context->getConfigRef().getBool("backups.allow_concurrent_backups", true))
|
, allow_concurrent_backups(global_context->getConfigRef().getBool("backups.allow_concurrent_backups", true))
|
||||||
, allow_concurrent_restores(global_context->getConfigRef().getBool("backups.allow_concurrent_restores", true))
|
, allow_concurrent_restores(global_context->getConfigRef().getBool("backups.allow_concurrent_restores", true))
|
||||||
, remove_backup_files_after_failure(global_context->getConfigRef().getBool("backups.remove_backup_files_after_failure", true))
|
, remove_backup_files_after_failure(global_context->getConfigRef().getBool("backups.remove_backup_files_after_failure", true))
|
||||||
|
, test_randomize_order(global_context->getConfigRef().getBool("backups.test_randomize_order", false))
|
||||||
, test_inject_sleep(global_context->getConfigRef().getBool("backups.test_inject_sleep", false))
|
, test_inject_sleep(global_context->getConfigRef().getBool("backups.test_inject_sleep", false))
|
||||||
, log(getLogger("BackupsWorker"))
|
, log(getLogger("BackupsWorker"))
|
||||||
, backup_log(global_context->getBackupLog())
|
, backup_log(global_context->getBackupLog())
|
||||||
@ -713,14 +714,25 @@ void BackupsWorker::writeBackupEntries(
|
|||||||
bool always_single_threaded = !backup->supportsWritingInMultipleThreads();
|
bool always_single_threaded = !backup->supportsWritingInMultipleThreads();
|
||||||
auto & thread_pool = getThreadPool(ThreadPoolId::BACKUP_COPY_FILES);
|
auto & thread_pool = getThreadPool(ThreadPoolId::BACKUP_COPY_FILES);
|
||||||
|
|
||||||
|
std::vector<size_t> writing_order;
|
||||||
|
if (test_randomize_order)
|
||||||
|
{
|
||||||
|
/// Randomize the order in which we write backup entries to the backup.
|
||||||
|
writing_order.resize(backup_entries.size());
|
||||||
|
std::iota(writing_order.begin(), writing_order.end(), 0);
|
||||||
|
std::shuffle(writing_order.begin(), writing_order.end(), thread_local_rng);
|
||||||
|
}
|
||||||
|
|
||||||
ThreadPoolCallbackRunnerLocal<void> runner(thread_pool, "BackupWorker");
|
ThreadPoolCallbackRunnerLocal<void> runner(thread_pool, "BackupWorker");
|
||||||
for (size_t i = 0; i != backup_entries.size(); ++i)
|
for (size_t i = 0; i != backup_entries.size(); ++i)
|
||||||
{
|
{
|
||||||
if (failed)
|
if (failed)
|
||||||
break;
|
break;
|
||||||
|
|
||||||
auto & entry = backup_entries[i].second;
|
size_t index = !writing_order.empty() ? writing_order[i] : i;
|
||||||
const auto & file_info = file_infos[i];
|
|
||||||
|
auto & entry = backup_entries[index].second;
|
||||||
|
const auto & file_info = file_infos[index];
|
||||||
|
|
||||||
auto job = [&]()
|
auto job = [&]()
|
||||||
{
|
{
|
||||||
|
@ -119,6 +119,7 @@ private:
|
|||||||
const bool allow_concurrent_backups;
|
const bool allow_concurrent_backups;
|
||||||
const bool allow_concurrent_restores;
|
const bool allow_concurrent_restores;
|
||||||
const bool remove_backup_files_after_failure;
|
const bool remove_backup_files_after_failure;
|
||||||
|
const bool test_randomize_order;
|
||||||
const bool test_inject_sleep;
|
const bool test_inject_sleep;
|
||||||
|
|
||||||
LoggerPtr log;
|
LoggerPtr log;
|
||||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user