mirror of
https://github.com/ClickHouse/ClickHouse.git
synced 2024-11-25 09:02:00 +00:00
Merge remote-tracking branch 'origin/master' into FixBloomFilterCast
This commit is contained in:
commit
94dec67a46
21
.github/actions/check_workflow/action.yml
vendored
Normal file
21
.github/actions/check_workflow/action.yml
vendored
Normal file
@ -0,0 +1,21 @@
|
||||
name: CheckWorkflowResults
|
||||
|
||||
description: Check overall workflow status and post error to slack if any
|
||||
|
||||
inputs:
|
||||
needs:
|
||||
description: github needs context as a json string
|
||||
required: true
|
||||
type: string
|
||||
|
||||
runs:
|
||||
using: "composite"
|
||||
steps:
|
||||
- name: Check Workflow
|
||||
shell: bash
|
||||
run: |
|
||||
export WORKFLOW_RESULT_FILE="/tmp/workflow_results.json"
|
||||
cat > "$WORKFLOW_RESULT_FILE" << 'EOF'
|
||||
${{ inputs.needs }}
|
||||
EOF
|
||||
python3 ./tests/ci/ci_buddy.py --check-wf-status
|
168
.github/actions/release/action.yml
vendored
168
.github/actions/release/action.yml
vendored
@ -1,168 +0,0 @@
|
||||
name: Release
|
||||
|
||||
description: Makes patch releases and creates new release branch
|
||||
|
||||
inputs:
|
||||
ref:
|
||||
description: 'Git reference (branch or commit sha) from which to create the release'
|
||||
required: true
|
||||
type: string
|
||||
type:
|
||||
description: 'The type of release: "new" for a new release or "patch" for a patch release'
|
||||
required: true
|
||||
type: choice
|
||||
options:
|
||||
- patch
|
||||
- new
|
||||
dry-run:
|
||||
description: 'Dry run'
|
||||
required: false
|
||||
default: true
|
||||
type: boolean
|
||||
token:
|
||||
required: true
|
||||
type: string
|
||||
|
||||
runs:
|
||||
using: "composite"
|
||||
steps:
|
||||
- name: Prepare Release Info
|
||||
shell: bash
|
||||
run: |
|
||||
python3 ./tests/ci/create_release.py --prepare-release-info \
|
||||
--ref ${{ inputs.ref }} --release-type ${{ inputs.type }} \
|
||||
${{ inputs.dry-run && '--dry-run' || '' }}
|
||||
echo "::group::Release Info"
|
||||
python3 -m json.tool /tmp/release_info.json
|
||||
echo "::endgroup::"
|
||||
release_tag=$(jq -r '.release_tag' /tmp/release_info.json)
|
||||
commit_sha=$(jq -r '.commit_sha' /tmp/release_info.json)
|
||||
echo "Release Tag: $release_tag"
|
||||
echo "RELEASE_TAG=$release_tag" >> "$GITHUB_ENV"
|
||||
echo "COMMIT_SHA=$commit_sha" >> "$GITHUB_ENV"
|
||||
- name: Download All Release Artifacts
|
||||
if: ${{ inputs.type == 'patch' }}
|
||||
shell: bash
|
||||
run: |
|
||||
python3 ./tests/ci/create_release.py --download-packages ${{ inputs.dry-run && '--dry-run' || '' }}
|
||||
- name: Push Git Tag for the Release
|
||||
shell: bash
|
||||
run: |
|
||||
python3 ./tests/ci/create_release.py --push-release-tag ${{ inputs.dry-run && '--dry-run' || '' }}
|
||||
- name: Push New Release Branch
|
||||
if: ${{ inputs.type == 'new' }}
|
||||
shell: bash
|
||||
run: |
|
||||
python3 ./tests/ci/create_release.py --push-new-release-branch ${{ inputs.dry-run && '--dry-run' || '' }}
|
||||
- name: Bump CH Version and Update Contributors' List
|
||||
shell: bash
|
||||
run: |
|
||||
python3 ./tests/ci/create_release.py --create-bump-version-pr ${{ inputs.dry-run && '--dry-run' || '' }}
|
||||
- name: Bump Docker versions, Changelog, Security
|
||||
if: ${{ inputs.type == 'patch' }}
|
||||
shell: bash
|
||||
run: |
|
||||
git checkout master
|
||||
python3 ./tests/ci/create_release.py --set-progress-started --progress "update changelog, docker version, security"
|
||||
echo "List versions"
|
||||
./utils/list-versions/list-versions.sh > ./utils/list-versions/version_date.tsv
|
||||
echo "Update docker version"
|
||||
./utils/list-versions/update-docker-version.sh
|
||||
echo "Generate ChangeLog"
|
||||
export CI=1
|
||||
docker run -u "${UID}:${GID}" -e PYTHONUNBUFFERED=1 -e CI=1 --network=host \
|
||||
--volume=".:/ClickHouse" clickhouse/style-test \
|
||||
/ClickHouse/tests/ci/changelog.py -v --debug-helpers \
|
||||
--gh-user-or-token=${{ inputs.token }} --jobs=5 \
|
||||
--output="/ClickHouse/docs/changelogs/${{ env.RELEASE_TAG }}.md" ${{ env.RELEASE_TAG }}
|
||||
git add ./docs/changelogs/${{ env.RELEASE_TAG }}.md
|
||||
echo "Generate Security"
|
||||
python3 ./utils/security-generator/generate_security.py > SECURITY.md
|
||||
git diff HEAD
|
||||
- name: Create ChangeLog PR
|
||||
if: ${{ inputs.type == 'patch' && ! inputs.dry-run }}
|
||||
uses: peter-evans/create-pull-request@v6
|
||||
with:
|
||||
author: "robot-clickhouse <robot-clickhouse@users.noreply.github.com>"
|
||||
token: ${{ inputs.token }}
|
||||
committer: "robot-clickhouse <robot-clickhouse@users.noreply.github.com>"
|
||||
commit-message: Update version_date.tsv and changelogs after ${{ env.RELEASE_TAG }}
|
||||
branch: auto/${{ env.RELEASE_TAG }}
|
||||
assignees: ${{ github.event.sender.login }} # assign the PR to the tag pusher
|
||||
delete-branch: true
|
||||
title: Update version_date.tsv and changelog after ${{ env.RELEASE_TAG }}
|
||||
labels: do not test
|
||||
body: |
|
||||
Update version_date.tsv and changelogs after ${{ env.RELEASE_TAG }}
|
||||
### Changelog category (leave one):
|
||||
- Not for changelog (changelog entry is not required)
|
||||
- name: Complete previous steps and Restore git state
|
||||
if: ${{ inputs.type == 'patch' }}
|
||||
shell: bash
|
||||
run: |
|
||||
python3 ./tests/ci/create_release.py --set-progress-completed
|
||||
git reset --hard HEAD
|
||||
git checkout "$GITHUB_REF_NAME"
|
||||
- name: Create GH Release
|
||||
shell: bash
|
||||
if: ${{ inputs.type == 'patch' }}
|
||||
run: |
|
||||
python3 ./tests/ci/create_release.py --create-gh-release ${{ inputs.dry-run && '--dry-run' || '' }}
|
||||
- name: Export TGZ Packages
|
||||
if: ${{ inputs.type == 'patch' }}
|
||||
shell: bash
|
||||
run: |
|
||||
python3 ./tests/ci/artifactory.py --export-tgz ${{ inputs.dry-run && '--dry-run' || '' }}
|
||||
- name: Test TGZ Packages
|
||||
if: ${{ inputs.type == 'patch' }}
|
||||
shell: bash
|
||||
run: |
|
||||
python3 ./tests/ci/artifactory.py --test-tgz ${{ inputs.dry-run && '--dry-run' || '' }}
|
||||
- name: Export RPM Packages
|
||||
if: ${{ inputs.type == 'patch' }}
|
||||
shell: bash
|
||||
run: |
|
||||
python3 ./tests/ci/artifactory.py --export-rpm ${{ inputs.dry-run && '--dry-run' || '' }}
|
||||
- name: Test RPM Packages
|
||||
if: ${{ inputs.type == 'patch' }}
|
||||
shell: bash
|
||||
run: |
|
||||
python3 ./tests/ci/artifactory.py --test-rpm ${{ inputs.dry-run && '--dry-run' || '' }}
|
||||
- name: Export Debian Packages
|
||||
if: ${{ inputs.type == 'patch' }}
|
||||
shell: bash
|
||||
run: |
|
||||
python3 ./tests/ci/artifactory.py --export-debian ${{ inputs.dry-run && '--dry-run' || '' }}
|
||||
- name: Test Debian Packages
|
||||
if: ${{ inputs.type == 'patch' }}
|
||||
shell: bash
|
||||
run: |
|
||||
python3 ./tests/ci/artifactory.py --test-debian ${{ inputs.dry-run && '--dry-run' || '' }}
|
||||
- name: Docker clickhouse/clickhouse-server building
|
||||
if: ${{ inputs.type == 'patch' }}
|
||||
shell: bash
|
||||
run: |
|
||||
cd "./tests/ci"
|
||||
python3 ./create_release.py --set-progress-started --progress "docker server release"
|
||||
export CHECK_NAME="Docker server image"
|
||||
python3 docker_server.py --release-type auto --version ${{ env.RELEASE_TAG }} --check-name "$CHECK_NAME" --sha ${{ env.COMMIT_SHA }} ${{ ! inputs.dry-run && '--push' || '' }}
|
||||
python3 ./create_release.py --set-progress-completed
|
||||
- name: Docker clickhouse/clickhouse-keeper building
|
||||
if: ${{ inputs.type == 'patch' }}
|
||||
shell: bash
|
||||
run: |
|
||||
cd "./tests/ci"
|
||||
python3 ./create_release.py --set-progress-started --progress "docker keeper release"
|
||||
export CHECK_NAME="Docker keeper image"
|
||||
python3 docker_server.py --release-type auto --version ${{ env.RELEASE_TAG }} --check-name "$CHECK_NAME" --sha ${{ env.COMMIT_SHA }} ${{ ! inputs.dry-run && '--push' || '' }}
|
||||
python3 ./create_release.py --set-progress-completed
|
||||
- name: Set current Release progress to Completed with OK
|
||||
shell: bash
|
||||
run: |
|
||||
python3 ./tests/ci/create_release.py --set-progress-started --progress "completed"
|
||||
python3 ./tests/ci/create_release.py --set-progress-completed
|
||||
- name: Post Slack Message
|
||||
if: ${{ !cancelled() }}
|
||||
shell: bash
|
||||
run: |
|
||||
python3 ./tests/ci/create_release.py --post-status ${{ inputs.dry-run && '--dry-run' || '' }}
|
111
.github/workflows/auto_release.yml
vendored
111
.github/workflows/auto_release.yml
vendored
@ -1,111 +0,0 @@
|
||||
name: AutoRelease
|
||||
|
||||
env:
|
||||
PYTHONUNBUFFERED: 1
|
||||
DRY_RUN: true
|
||||
|
||||
concurrency:
|
||||
group: release
|
||||
on: # yamllint disable-line rule:truthy
|
||||
# Workflow uses a test bucket for packages and dry run mode (no real releases)
|
||||
schedule:
|
||||
- cron: '0 9 * * *'
|
||||
- cron: '0 15 * * *'
|
||||
workflow_dispatch:
|
||||
inputs:
|
||||
dry-run:
|
||||
description: 'Dry run'
|
||||
required: false
|
||||
default: true
|
||||
type: boolean
|
||||
|
||||
jobs:
|
||||
AutoRelease:
|
||||
runs-on: [self-hosted, release-maker]
|
||||
steps:
|
||||
- name: DebugInfo
|
||||
uses: hmarr/debug-action@f7318c783045ac39ed9bb497e22ce835fdafbfe6
|
||||
- name: Set envs
|
||||
run: |
|
||||
cat >> "$GITHUB_ENV" << 'EOF'
|
||||
ROBOT_CLICKHOUSE_SSH_KEY<<RCSK
|
||||
${{secrets.ROBOT_CLICKHOUSE_SSH_KEY}}
|
||||
RCSK
|
||||
EOF
|
||||
- name: Set DRY_RUN for schedule
|
||||
if: ${{ github.event_name == 'schedule' }}
|
||||
run: echo "DRY_RUN=true" >> "$GITHUB_ENV"
|
||||
- name: Set DRY_RUN for dispatch
|
||||
if: ${{ github.event_name == 'workflow_dispatch' }}
|
||||
run: echo "DRY_RUN=${{ github.event.inputs.dry-run }}" >> "$GITHUB_ENV"
|
||||
- name: Check out repository code
|
||||
uses: ClickHouse/checkout@v1
|
||||
with:
|
||||
token: ${{secrets.ROBOT_CLICKHOUSE_COMMIT_TOKEN}}
|
||||
fetch-depth: 0
|
||||
- name: Auto Release Prepare
|
||||
run: |
|
||||
cd "$GITHUB_WORKSPACE/tests/ci"
|
||||
python3 auto_release.py --prepare
|
||||
echo "::group::Auto Release Info"
|
||||
python3 -m json.tool /tmp/autorelease_info.json
|
||||
echo "::endgroup::"
|
||||
{
|
||||
echo 'AUTO_RELEASE_PARAMS<<EOF'
|
||||
cat /tmp/autorelease_info.json
|
||||
echo 'EOF'
|
||||
} >> "$GITHUB_ENV"
|
||||
- name: Post Release Branch statuses
|
||||
run: |
|
||||
cd "$GITHUB_WORKSPACE/tests/ci"
|
||||
python3 auto_release.py --post-status
|
||||
- name: Release ${{ fromJson(env.AUTO_RELEASE_PARAMS).releases[0].release_branch }}
|
||||
if: ${{ fromJson(env.AUTO_RELEASE_PARAMS).releases[0] && fromJson(env.AUTO_RELEASE_PARAMS).releases[0].ready }}
|
||||
uses: ./.github/actions/release
|
||||
with:
|
||||
ref: ${{ fromJson(env.AUTO_RELEASE_PARAMS).releases[0].commit_sha }}
|
||||
type: patch
|
||||
dry-run: ${{ env.DRY_RUN }}
|
||||
token: ${{secrets.ROBOT_CLICKHOUSE_COMMIT_TOKEN}}
|
||||
- name: Release ${{ fromJson(env.AUTO_RELEASE_PARAMS).releases[1].release_branch }}
|
||||
if: ${{ fromJson(env.AUTO_RELEASE_PARAMS).releases[0] && fromJson(env.AUTO_RELEASE_PARAMS).releases[1].ready }}
|
||||
uses: ./.github/actions/release
|
||||
with:
|
||||
ref: ${{ fromJson(env.AUTO_RELEASE_PARAMS).releases[1].commit_sha }}
|
||||
type: patch
|
||||
dry-run: ${{ env.DRY_RUN }}
|
||||
token: ${{secrets.ROBOT_CLICKHOUSE_COMMIT_TOKEN}}
|
||||
- name: Release ${{ fromJson(env.AUTO_RELEASE_PARAMS).releases[2].release_branch }}
|
||||
if: ${{ fromJson(env.AUTO_RELEASE_PARAMS).releases[2] && fromJson(env.AUTO_RELEASE_PARAMS).releases[2].ready }}
|
||||
uses: ./.github/actions/release
|
||||
with:
|
||||
ref: ${{ fromJson(env.AUTO_RELEASE_PARAMS).releases[2].commit_sha }}
|
||||
type: patch
|
||||
dry-run: ${{ env.DRY_RUN }}
|
||||
token: ${{secrets.ROBOT_CLICKHOUSE_COMMIT_TOKEN}}
|
||||
- name: Release ${{ fromJson(env.AUTO_RELEASE_PARAMS).releases[3].release_branch }}
|
||||
if: ${{ fromJson(env.AUTO_RELEASE_PARAMS).releases[3] && fromJson(env.AUTO_RELEASE_PARAMS).releases[3].ready }}
|
||||
uses: ./.github/actions/release
|
||||
with:
|
||||
ref: ${{ fromJson(env.AUTO_RELEASE_PARAMS).releases[3].commit_sha }}
|
||||
type: patch
|
||||
dry-run: ${{ env.DRY_RUN }}
|
||||
token: ${{secrets.ROBOT_CLICKHOUSE_COMMIT_TOKEN}}
|
||||
- name: Release ${{ fromJson(env.AUTO_RELEASE_PARAMS).releases[4].release_branch }}
|
||||
if: ${{ fromJson(env.AUTO_RELEASE_PARAMS).releases[4] && fromJson(env.AUTO_RELEASE_PARAMS).releases[4].ready }}
|
||||
uses: ./.github/actions/release
|
||||
with:
|
||||
ref: ${{ fromJson(env.AUTO_RELEASE_PARAMS).releases[4].commit_sha }}
|
||||
type: patch
|
||||
dry-run: ${{ env.DRY_RUN }}
|
||||
token: ${{secrets.ROBOT_CLICKHOUSE_COMMIT_TOKEN}}
|
||||
- name: Post Slack Message
|
||||
if: ${{ !cancelled() }}
|
||||
run: |
|
||||
cd "$GITHUB_WORKSPACE/tests/ci"
|
||||
python3 auto_release.py --post-auto-release-complete --wf-status ${{ job.status }}
|
||||
- name: Clean up
|
||||
run: |
|
||||
docker ps --quiet | xargs --no-run-if-empty docker kill ||:
|
||||
docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||:
|
||||
sudo rm -fr "$TEMP_PATH"
|
7
.github/workflows/backport_branches.yml
vendored
7
.github/workflows/backport_branches.yml
vendored
@ -260,13 +260,18 @@ jobs:
|
||||
- name: Finish label
|
||||
if: ${{ !failure() }}
|
||||
run: |
|
||||
export WORKFLOW_RESULT_FILE="/tmp/workflow_results.json"
|
||||
cat > "$WORKFLOW_RESULT_FILE" << 'EOF'
|
||||
${{ toJson(needs) }}
|
||||
EOF
|
||||
cd "$GITHUB_WORKSPACE/tests/ci"
|
||||
# update mergeable check
|
||||
python3 merge_pr.py --set-ci-status --wf-status ${{ contains(needs.*.result, 'failure') && 'failure' || 'success' }}
|
||||
python3 merge_pr.py --set-ci-status
|
||||
# update overall ci report
|
||||
python3 finish_check.py --wf-status ${{ contains(needs.*.result, 'failure') && 'failure' || 'success' }}
|
||||
python3 merge_pr.py
|
||||
- name: Check Workflow results
|
||||
if: ${{ !cancelled() }}
|
||||
run: |
|
||||
export WORKFLOW_RESULT_FILE="/tmp/workflow_results.json"
|
||||
cat > "$WORKFLOW_RESULT_FILE" << 'EOF'
|
||||
|
172
.github/workflows/create_release.yml
vendored
172
.github/workflows/create_release.yml
vendored
@ -16,10 +16,15 @@ concurrency:
|
||||
options:
|
||||
- patch
|
||||
- new
|
||||
only-repo:
|
||||
description: 'Run only repos updates including docker (repo-recovery, tests)'
|
||||
required: false
|
||||
default: false
|
||||
type: boolean
|
||||
dry-run:
|
||||
description: 'Dry run'
|
||||
required: false
|
||||
default: true
|
||||
default: false
|
||||
type: boolean
|
||||
|
||||
jobs:
|
||||
@ -35,10 +40,163 @@ jobs:
|
||||
with:
|
||||
token: ${{secrets.ROBOT_CLICKHOUSE_COMMIT_TOKEN}}
|
||||
fetch-depth: 0
|
||||
- name: Call Release Action
|
||||
uses: ./.github/actions/release
|
||||
- name: Prepare Release Info
|
||||
shell: bash
|
||||
run: |
|
||||
if [ ${{ inputs.only-repo }} == "true" ]; then
|
||||
git tag -l ${{ inputs.ref }} || { echo "With only-repo option ref must be a valid release tag"; exit 1; }
|
||||
fi
|
||||
python3 ./tests/ci/create_release.py --prepare-release-info \
|
||||
--ref ${{ inputs.ref }} --release-type ${{ inputs.type }} \
|
||||
${{ inputs.dry-run == true && '--dry-run' || '' }} \
|
||||
${{ inputs.only-repo == true && '--skip-tag-check' || '' }}
|
||||
echo "::group::Release Info"
|
||||
python3 -m json.tool /tmp/release_info.json
|
||||
echo "::endgroup::"
|
||||
release_tag=$(jq -r '.release_tag' /tmp/release_info.json)
|
||||
commit_sha=$(jq -r '.commit_sha' /tmp/release_info.json)
|
||||
is_latest=$(jq -r '.latest' /tmp/release_info.json)
|
||||
echo "Release Tag: $release_tag"
|
||||
echo "RELEASE_TAG=$release_tag" >> "$GITHUB_ENV"
|
||||
echo "COMMIT_SHA=$commit_sha" >> "$GITHUB_ENV"
|
||||
if [ "$is_latest" == "true" ]; then
|
||||
echo "DOCKER_TAG_TYPE=release-latest" >> "$GITHUB_ENV"
|
||||
else
|
||||
echo "DOCKER_TAG_TYPE=release" >> "$GITHUB_ENV"
|
||||
fi
|
||||
- name: Download All Release Artifacts
|
||||
if: ${{ inputs.type == 'patch' }}
|
||||
shell: bash
|
||||
run: |
|
||||
python3 ./tests/ci/create_release.py --download-packages ${{ inputs.dry-run == true && '--dry-run' || '' }}
|
||||
- name: Push Git Tag for the Release
|
||||
if: ${{ ! inputs.only-repo }}
|
||||
shell: bash
|
||||
run: |
|
||||
python3 ./tests/ci/create_release.py --push-release-tag ${{ inputs.dry-run == true && '--dry-run' || '' }}
|
||||
- name: Push New Release Branch
|
||||
if: ${{ inputs.type == 'new' && ! inputs.only-repo }}
|
||||
shell: bash
|
||||
run: |
|
||||
python3 ./tests/ci/create_release.py --push-new-release-branch ${{ inputs.dry-run == true && '--dry-run' || '' }}
|
||||
- name: Bump CH Version and Update Contributors' List
|
||||
if: ${{ ! inputs.only-repo }}
|
||||
shell: bash
|
||||
run: |
|
||||
python3 ./tests/ci/create_release.py --create-bump-version-pr ${{ inputs.dry-run == true && '--dry-run' || '' }}
|
||||
- name: Bump Docker versions, Changelog, Security
|
||||
if: ${{ inputs.type == 'patch' && ! inputs.only-repo }}
|
||||
shell: bash
|
||||
run: |
|
||||
python3 ./tests/ci/create_release.py --set-progress-started --progress "update changelog, docker version, security"
|
||||
|
||||
git checkout master # in case WF started from feature branch
|
||||
echo "List versions"
|
||||
./utils/list-versions/list-versions.sh > ./utils/list-versions/version_date.tsv
|
||||
echo "Update docker version"
|
||||
./utils/list-versions/update-docker-version.sh
|
||||
echo "Generate ChangeLog"
|
||||
export CI=1
|
||||
docker run -u "${UID}:${GID}" -e PYTHONUNBUFFERED=1 -e CI=1 --network=host \
|
||||
--volume=".:/wd" --workdir="/wd" \
|
||||
clickhouse/style-test \
|
||||
./tests/ci/changelog.py -v --debug-helpers \
|
||||
--jobs=5 \
|
||||
--output="./docs/changelogs/${{ env.RELEASE_TAG }}.md" ${{ env.RELEASE_TAG }}
|
||||
git add ./docs/changelogs/${{ env.RELEASE_TAG }}.md
|
||||
echo "Generate Security"
|
||||
python3 ./utils/security-generator/generate_security.py > SECURITY.md
|
||||
git diff HEAD
|
||||
- name: Create ChangeLog PR
|
||||
if: ${{ inputs.type == 'patch' && ! inputs.dry-run && ! inputs.only-repo }}
|
||||
uses: peter-evans/create-pull-request@v6
|
||||
with:
|
||||
ref: ${{ inputs.ref }}
|
||||
type: ${{ inputs.type }}
|
||||
dry-run: ${{ inputs.dry-run }}
|
||||
token: ${{secrets.ROBOT_CLICKHOUSE_COMMIT_TOKEN}}
|
||||
author: "robot-clickhouse <robot-clickhouse@users.noreply.github.com>"
|
||||
token: ${{ secrets.ROBOT_CLICKHOUSE_COMMIT_TOKEN }}
|
||||
committer: "robot-clickhouse <robot-clickhouse@users.noreply.github.com>"
|
||||
commit-message: Update version_date.tsv and changelogs after ${{ env.RELEASE_TAG }}
|
||||
branch: auto/${{ env.RELEASE_TAG }}
|
||||
base: master
|
||||
assignees: ${{ github.event.sender.login }} # assign the PR to the tag pusher
|
||||
delete-branch: true
|
||||
title: Update version_date.tsv and changelog after ${{ env.RELEASE_TAG }}
|
||||
labels: do not test
|
||||
body: |
|
||||
Update version_date.tsv and changelogs after ${{ env.RELEASE_TAG }}
|
||||
### Changelog category (leave one):
|
||||
- Not for changelog (changelog entry is not required)
|
||||
- name: Complete previous steps and Restore git state
|
||||
if: ${{ inputs.type == 'patch' && ! inputs.only-repo }}
|
||||
shell: bash
|
||||
run: |
|
||||
python3 ./tests/ci/create_release.py --set-progress-completed
|
||||
git reset --hard HEAD
|
||||
git checkout "$GITHUB_REF_NAME"
|
||||
- name: Create GH Release
|
||||
if: ${{ inputs.type == 'patch' && ! inputs.only-repo }}
|
||||
shell: bash
|
||||
run: |
|
||||
python3 ./tests/ci/create_release.py --create-gh-release ${{ inputs.dry-run == true && '--dry-run' || '' }}
|
||||
- name: Export TGZ Packages
|
||||
if: ${{ inputs.type == 'patch' }}
|
||||
shell: bash
|
||||
run: |
|
||||
python3 ./tests/ci/artifactory.py --export-tgz ${{ inputs.dry-run == true && '--dry-run' || '' }}
|
||||
- name: Test TGZ Packages
|
||||
if: ${{ inputs.type == 'patch' }}
|
||||
shell: bash
|
||||
run: |
|
||||
python3 ./tests/ci/artifactory.py --test-tgz ${{ inputs.dry-run == true && '--dry-run' || '' }}
|
||||
- name: Export RPM Packages
|
||||
if: ${{ inputs.type == 'patch' }}
|
||||
shell: bash
|
||||
run: |
|
||||
python3 ./tests/ci/artifactory.py --export-rpm ${{ inputs.dry-run == true && '--dry-run' || '' }}
|
||||
- name: Test RPM Packages
|
||||
if: ${{ inputs.type == 'patch' }}
|
||||
shell: bash
|
||||
run: |
|
||||
python3 ./tests/ci/artifactory.py --test-rpm ${{ inputs.dry-run == true && '--dry-run' || '' }}
|
||||
- name: Export Debian Packages
|
||||
if: ${{ inputs.type == 'patch' }}
|
||||
shell: bash
|
||||
run: |
|
||||
python3 ./tests/ci/artifactory.py --export-debian ${{ inputs.dry-run == true && '--dry-run' || '' }}
|
||||
- name: Test Debian Packages
|
||||
if: ${{ inputs.type == 'patch' }}
|
||||
shell: bash
|
||||
run: |
|
||||
python3 ./tests/ci/artifactory.py --test-debian ${{ inputs.dry-run == true && '--dry-run' || '' }}
|
||||
- name: Docker clickhouse/clickhouse-server building
|
||||
if: ${{ inputs.type == 'patch' }}
|
||||
shell: bash
|
||||
run: |
|
||||
cd "./tests/ci"
|
||||
python3 ./create_release.py --set-progress-started --progress "docker server release"
|
||||
export CHECK_NAME="Docker server image"
|
||||
python3 docker_server.py --tag-type ${{ env.DOCKER_TAG_TYPE }} --version ${{ env.RELEASE_TAG }} --check-name "$CHECK_NAME" --sha ${{ env.COMMIT_SHA }} ${{ ! inputs.dry-run && '--push' || '' }}
|
||||
python3 ./create_release.py --set-progress-completed
|
||||
- name: Docker clickhouse/clickhouse-keeper building
|
||||
if: ${{ inputs.type == 'patch' }}
|
||||
shell: bash
|
||||
run: |
|
||||
cd "./tests/ci"
|
||||
python3 ./create_release.py --set-progress-started --progress "docker keeper release"
|
||||
export CHECK_NAME="Docker keeper image"
|
||||
python3 docker_server.py --tag-type ${{ env.DOCKER_TAG_TYPE }} --version ${{ env.RELEASE_TAG }} --check-name "$CHECK_NAME" --sha ${{ env.COMMIT_SHA }} ${{ ! inputs.dry-run && '--push' || '' }}
|
||||
python3 ./create_release.py --set-progress-completed
|
||||
- name: Update release info. Merge created PRs
|
||||
shell: bash
|
||||
run: |
|
||||
python3 ./tests/ci/create_release.py --merge-prs ${{ inputs.dry-run == true && '--dry-run' || '' }}
|
||||
- name: Set current Release progress to Completed with OK
|
||||
shell: bash
|
||||
run: |
|
||||
# dummy stage to finalize release info with "progress: completed; status: OK"
|
||||
python3 ./tests/ci/create_release.py --set-progress-started --progress "completed"
|
||||
python3 ./tests/ci/create_release.py --set-progress-completed
|
||||
- name: Post Slack Message
|
||||
if: ${{ !cancelled() }}
|
||||
shell: bash
|
||||
run: |
|
||||
python3 ./tests/ci/create_release.py --post-status ${{ inputs.dry-run == true && '--dry-run' || '' }}
|
||||
|
1
.github/workflows/jepsen.yml
vendored
1
.github/workflows/jepsen.yml
vendored
@ -64,6 +64,7 @@ jobs:
|
||||
- name: Check out repository code
|
||||
uses: ClickHouse/checkout@v1
|
||||
- name: Check Workflow results
|
||||
if: ${{ !cancelled() }}
|
||||
run: |
|
||||
export WORKFLOW_RESULT_FILE="/tmp/workflow_results.json"
|
||||
cat >> "$WORKFLOW_RESULT_FILE" << 'EOF'
|
||||
|
7
.github/workflows/merge_queue.yml
vendored
7
.github/workflows/merge_queue.yml
vendored
@ -103,9 +103,14 @@ jobs:
|
||||
- name: Check and set merge status
|
||||
if: ${{ needs.StyleCheck.result == 'success' }}
|
||||
run: |
|
||||
export WORKFLOW_RESULT_FILE="/tmp/workflow_results.json"
|
||||
cat > "$WORKFLOW_RESULT_FILE" << 'EOF'
|
||||
${{ toJson(needs) }}
|
||||
EOF
|
||||
cd "$GITHUB_WORKSPACE/tests/ci"
|
||||
python3 merge_pr.py --set-ci-status --wf-status ${{ contains(needs.*.result, 'failure') && 'failure' || 'success' }}
|
||||
python3 merge_pr.py --set-ci-status
|
||||
- name: Check Workflow results
|
||||
if: ${{ !cancelled() }}
|
||||
run: |
|
||||
export WORKFLOW_RESULT_FILE="/tmp/workflow_results.json"
|
||||
cat > "$WORKFLOW_RESULT_FILE" << 'EOF'
|
||||
|
1
.github/workflows/nightly.yml
vendored
1
.github/workflows/nightly.yml
vendored
@ -52,6 +52,7 @@ jobs:
|
||||
- name: Check out repository code
|
||||
uses: ClickHouse/checkout@v1
|
||||
- name: Check Workflow results
|
||||
if: ${{ !cancelled() }}
|
||||
run: |
|
||||
export WORKFLOW_RESULT_FILE="/tmp/workflow_results.json"
|
||||
cat > "$WORKFLOW_RESULT_FILE" << 'EOF'
|
||||
|
18
.github/workflows/pull_request.yml
vendored
18
.github/workflows/pull_request.yml
vendored
@ -142,8 +142,13 @@ jobs:
|
||||
# Reports should run even if Builds_1/2 fail - run them separately (not in Tests_1/2/3)
|
||||
Builds_Report:
|
||||
# run report check for failed builds to indicate the CI error
|
||||
if: ${{ !cancelled() && needs.RunConfig.result == 'success' && contains(fromJson(needs.RunConfig.outputs.data).jobs_data.jobs_to_do, 'Builds') }}
|
||||
needs: [RunConfig, StyleCheck, Builds_1, Builds_2]
|
||||
if: ${{ !cancelled()
|
||||
&& needs.RunConfig.result == 'success'
|
||||
&& needs.StyleCheck.result != 'failure'
|
||||
&& needs.FastTest.result != 'failure'
|
||||
&& needs.BuildDockers.result != 'failure'
|
||||
&& contains(fromJson(needs.RunConfig.outputs.data).jobs_data.jobs_to_do, 'Builds') }}
|
||||
needs: [RunConfig, BuildDockers, StyleCheck, FastTest, Builds_1, Builds_2]
|
||||
uses: ./.github/workflows/reusable_test.yml
|
||||
with:
|
||||
test_name: Builds
|
||||
@ -165,14 +170,15 @@ jobs:
|
||||
if: ${{ needs.StyleCheck.result == 'success' }}
|
||||
run: |
|
||||
cd "$GITHUB_WORKSPACE/tests/ci"
|
||||
python3 merge_pr.py --set-ci-status --wf-status ${{ contains(needs.*.result, 'failure') && 'failure' || 'success' }}
|
||||
- name: Check Workflow results
|
||||
run: |
|
||||
export WORKFLOW_RESULT_FILE="/tmp/workflow_results.json"
|
||||
cat > "$WORKFLOW_RESULT_FILE" << 'EOF'
|
||||
${{ toJson(needs) }}
|
||||
EOF
|
||||
python3 ./tests/ci/ci_buddy.py --check-wf-status
|
||||
python3 merge_pr.py --set-ci-status
|
||||
- name: Check Workflow results
|
||||
uses: ./.github/actions/check_workflow
|
||||
with:
|
||||
needs: ${{ toJson(needs) }}
|
||||
|
||||
################################# Stage Final #################################
|
||||
#
|
||||
|
69
.github/workflows/release.yml
vendored
69
.github/workflows/release.yml
vendored
@ -1,69 +0,0 @@
|
||||
name: PublishedReleaseCI
|
||||
# - Gets artifacts from S3
|
||||
# - Sends it to JFROG Artifactory
|
||||
# - Adds them to the release assets
|
||||
|
||||
on: # yamllint disable-line rule:truthy
|
||||
release:
|
||||
types:
|
||||
- published
|
||||
workflow_dispatch:
|
||||
inputs:
|
||||
tag:
|
||||
description: 'Release tag'
|
||||
required: true
|
||||
type: string
|
||||
|
||||
jobs:
|
||||
ReleasePublish:
|
||||
runs-on: [self-hosted, style-checker]
|
||||
steps:
|
||||
- name: Set tag from input
|
||||
if: github.event_name == 'workflow_dispatch'
|
||||
run: |
|
||||
echo "GITHUB_TAG=${{ github.event.inputs.tag }}" >> "$GITHUB_ENV"
|
||||
- name: Set tag from REF
|
||||
if: github.event_name == 'release'
|
||||
run: |
|
||||
echo "GITHUB_TAG=${GITHUB_REF#refs/tags/}" >> "$GITHUB_ENV"
|
||||
- name: Deploy packages and assets
|
||||
run: |
|
||||
curl --silent --data '' --no-buffer \
|
||||
'${{ secrets.PACKAGES_RELEASE_URL }}/release/'"${GITHUB_TAG}"'?binary=binary_darwin&binary=binary_darwin_aarch64&sync=true'
|
||||
############################################################################################
|
||||
##################################### Docker images #######################################
|
||||
############################################################################################
|
||||
DockerServerImages:
|
||||
runs-on: [self-hosted, style-checker]
|
||||
steps:
|
||||
- name: Set tag from input
|
||||
if: github.event_name == 'workflow_dispatch'
|
||||
run: |
|
||||
echo "GITHUB_TAG=${{ github.event.inputs.tag }}" >> "$GITHUB_ENV"
|
||||
- name: Set tag from REF
|
||||
if: github.event_name == 'release'
|
||||
run: |
|
||||
echo "GITHUB_TAG=${GITHUB_REF#refs/tags/}" >> "$GITHUB_ENV"
|
||||
- name: Check out repository code
|
||||
uses: ClickHouse/checkout@v1
|
||||
with:
|
||||
clear-repository: true
|
||||
fetch-depth: 0 # otherwise we will have no version info
|
||||
filter: tree:0
|
||||
ref: ${{ env.GITHUB_TAG }}
|
||||
- name: Check docker clickhouse/clickhouse-server building
|
||||
run: |
|
||||
cd "$GITHUB_WORKSPACE/tests/ci"
|
||||
export CHECK_NAME="Docker server image"
|
||||
python3 docker_server.py --release-type auto --version "$GITHUB_TAG" --check-name "$CHECK_NAME" --push
|
||||
- name: Check docker clickhouse/clickhouse-keeper building
|
||||
run: |
|
||||
cd "$GITHUB_WORKSPACE/tests/ci"
|
||||
export CHECK_NAME="Docker keeper image"
|
||||
python3 docker_server.py --release-type auto --version "$GITHUB_TAG" --check-name "$CHECK_NAME" --push
|
||||
- name: Cleanup
|
||||
if: always()
|
||||
run: |
|
||||
docker ps --quiet | xargs --no-run-if-empty docker kill ||:
|
||||
docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||:
|
||||
sudo rm -fr "$TEMP_PATH"
|
4
.github/workflows/release_branches.yml
vendored
4
.github/workflows/release_branches.yml
vendored
@ -481,12 +481,10 @@ jobs:
|
||||
- name: Finish label
|
||||
if: ${{ !failure() }}
|
||||
run: |
|
||||
cd "$GITHUB_WORKSPACE/tests/ci"
|
||||
# update mergeable check
|
||||
python3 merge_pr.py --set-ci-status --wf-status ${{ contains(needs.*.result, 'failure') && 'failure' || 'success' }}
|
||||
# update overall ci report
|
||||
python3 finish_check.py --wf-status ${{ contains(needs.*.result, 'failure') && 'failure' || 'success' }}
|
||||
- name: Check Workflow results
|
||||
if: ${{ !cancelled() }}
|
||||
run: |
|
||||
export WORKFLOW_RESULT_FILE="/tmp/workflow_results.json"
|
||||
cat > "$WORKFLOW_RESULT_FILE" << 'EOF'
|
||||
|
74
.github/workflows/tags_stable.yml
vendored
74
.github/workflows/tags_stable.yml
vendored
@ -1,74 +0,0 @@
|
||||
name: TagsStableWorkflow
|
||||
# - Gets artifacts from S3
|
||||
# - Sends it to JFROG Artifactory
|
||||
# - Adds them to the release assets
|
||||
|
||||
env:
|
||||
# Force the stdout and stderr streams to be unbuffered
|
||||
PYTHONUNBUFFERED: 1
|
||||
|
||||
on: # yamllint disable-line rule:truthy
|
||||
push:
|
||||
tags:
|
||||
- 'v*-prestable'
|
||||
- 'v*-stable'
|
||||
- 'v*-lts'
|
||||
workflow_dispatch:
|
||||
inputs:
|
||||
tag:
|
||||
description: 'Test tag'
|
||||
required: true
|
||||
type: string
|
||||
|
||||
|
||||
jobs:
|
||||
UpdateVersions:
|
||||
runs-on: [self-hosted, style-checker]
|
||||
steps:
|
||||
- name: Set test tag
|
||||
if: github.event_name == 'workflow_dispatch'
|
||||
run: |
|
||||
echo "GITHUB_TAG=${{ github.event.inputs.tag }}" >> "$GITHUB_ENV"
|
||||
- name: Get tag name
|
||||
if: github.event_name != 'workflow_dispatch'
|
||||
run: |
|
||||
echo "GITHUB_TAG=${GITHUB_REF#refs/tags/}" >> "$GITHUB_ENV"
|
||||
- name: Check out repository code
|
||||
uses: ClickHouse/checkout@v1
|
||||
with:
|
||||
ref: master
|
||||
fetch-depth: 0
|
||||
filter: tree:0
|
||||
- name: Update versions, docker version, changelog, security
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.ROBOT_CLICKHOUSE_COMMIT_TOKEN }}
|
||||
run: |
|
||||
./utils/list-versions/list-versions.sh > ./utils/list-versions/version_date.tsv
|
||||
./utils/list-versions/update-docker-version.sh
|
||||
GID=$(id -g "${UID}")
|
||||
# --network=host and CI=1 are required for the S3 access from a container
|
||||
docker run -u "${UID}:${GID}" -e PYTHONUNBUFFERED=1 -e CI=1 --network=host \
|
||||
--volume="${GITHUB_WORKSPACE}:/ClickHouse" clickhouse/style-test \
|
||||
/ClickHouse/tests/ci/changelog.py -v --debug-helpers \
|
||||
--gh-user-or-token="$GITHUB_TOKEN" --jobs=5 \
|
||||
--output="/ClickHouse/docs/changelogs/${GITHUB_TAG}.md" "${GITHUB_TAG}"
|
||||
git add "./docs/changelogs/${GITHUB_TAG}.md"
|
||||
python3 ./utils/security-generator/generate_security.py > SECURITY.md
|
||||
git diff HEAD
|
||||
- name: Create Pull Request
|
||||
uses: peter-evans/create-pull-request@v6
|
||||
with:
|
||||
author: "robot-clickhouse <robot-clickhouse@users.noreply.github.com>"
|
||||
token: ${{ secrets.ROBOT_CLICKHOUSE_COMMIT_TOKEN }}
|
||||
committer: "robot-clickhouse <robot-clickhouse@users.noreply.github.com>"
|
||||
commit-message: Update version_date.tsv and changelogs after ${{ env.GITHUB_TAG }}
|
||||
branch: auto/${{ env.GITHUB_TAG }}
|
||||
assignees: ${{ github.event.sender.login }} # assign the PR to the tag pusher
|
||||
delete-branch: true
|
||||
title: Update version_date.tsv and changelogs after ${{ env.GITHUB_TAG }}
|
||||
labels: do not test
|
||||
body: |
|
||||
Update version_date.tsv and changelogs after ${{ env.GITHUB_TAG }}
|
||||
|
||||
### Changelog category (leave one):
|
||||
- Not for changelog (changelog entry is not required)
|
@ -5,12 +5,6 @@ rules:
|
||||
indentation:
|
||||
level: warning
|
||||
indent-sequences: consistent
|
||||
line-length:
|
||||
# there are:
|
||||
# - bash -c "", so this is OK
|
||||
# - yaml in tests
|
||||
max: 1000
|
||||
level: warning
|
||||
comments:
|
||||
min-spaces-from-content: 1
|
||||
document-start: disable
|
||||
|
@ -64,6 +64,7 @@
|
||||
* The setting `optimize_trivial_insert_select` is disabled by default. In most cases, it should be beneficial. Nevertheless, if you are seeing slower INSERT SELECT or increased memory usage, you can enable it back or `SET compatibility = '24.6'`. [#58970](https://github.com/ClickHouse/ClickHouse/pull/58970) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* Print stacktrace and diagnostic info if `clickhouse-client` or `clickhouse-local` crashes. [#61109](https://github.com/ClickHouse/ClickHouse/pull/61109) ([Alexander Tokmakov](https://github.com/tavplubix)).
|
||||
* The result of `SHOW INDEX | INDEXES | INDICES | KEYS` was previously sorted by the primary key column names. Since this was unintuitive, the result is now sorted by the position of the primary key columns within the primary key. [#61131](https://github.com/ClickHouse/ClickHouse/pull/61131) ([Robert Schulze](https://github.com/rschu1ze)).
|
||||
* Change how deduplication for Materialized Views works. Fixed a lot of cases like: - on destination table: data is split for 2 or more blocks and that blocks is considered as duplicate when that block is inserted in parallel. - on MV destination table: the equal blocks are deduplicated, that happens when MV often produces equal data as a result for different input data due to performing aggregation. - on MV destination table: the equal blocks which comes from different MV are deduplicated. [#61601](https://github.com/ClickHouse/ClickHouse/pull/61601) ([Sema Checherinda](https://github.com/CheSema)).
|
||||
* Support reading partitioned data DeltaLake data. Infer DeltaLake schema by reading metadata instead of data. [#63201](https://github.com/ClickHouse/ClickHouse/pull/63201) ([Kseniia Sumarokova](https://github.com/kssenii)).
|
||||
* In composable protocols TLS layer accepted only `certificateFile` and `privateKeyFile` parameters. https://clickhouse.com/docs/en/operations/settings/composable-protocols. [#63985](https://github.com/ClickHouse/ClickHouse/pull/63985) ([Anton Ivashkin](https://github.com/ianton-ru)).
|
||||
* Added profile event `SelectQueriesWithPrimaryKeyUsage` which indicates how many SELECT queries use the primary key to evaluate the WHERE clause. [#64492](https://github.com/ClickHouse/ClickHouse/pull/64492) ([0x01f](https://github.com/0xfei)).
|
||||
|
@ -19,6 +19,7 @@
|
||||
|
||||
|
||||
#include "Poco/Foundation.h"
|
||||
#include <Poco/Types.h>
|
||||
|
||||
|
||||
namespace Poco
|
||||
@ -135,6 +136,12 @@ public:
|
||||
static const UUID & x500();
|
||||
/// Returns the namespace identifier for the X500 namespace.
|
||||
|
||||
UInt32 getTimeLow() const { return _timeLow; }
|
||||
UInt16 getTimeMid() const { return _timeMid; }
|
||||
UInt16 getTimeHiAndVersion() const { return _timeHiAndVersion; }
|
||||
UInt16 getClockSeq() const { return _clockSeq; }
|
||||
std::array<UInt8, 6> getNode() const { return std::array<UInt8, 6>{_node[0], _node[1], _node[2], _node[3], _node[4], _node[5]}; }
|
||||
|
||||
protected:
|
||||
UUID(UInt32 timeLow, UInt32 timeMid, UInt32 timeHiAndVersion, UInt16 clockSeq, UInt8 node[]);
|
||||
UUID(const char * bytes, Version version);
|
||||
|
@ -76,13 +76,13 @@ std::string Binary::toString(int indent) const
|
||||
|
||||
UUID Binary::uuid() const
|
||||
{
|
||||
if (_subtype == 0x04 && _buffer.size() == 16)
|
||||
if ((_subtype == 0x04 || _subtype == 0x03) && _buffer.size() == 16)
|
||||
{
|
||||
UUID uuid;
|
||||
uuid.copyFrom((const char*) _buffer.begin());
|
||||
return uuid;
|
||||
}
|
||||
throw BadCastException("Invalid subtype");
|
||||
throw BadCastException("Invalid subtype: " + std::to_string(_subtype) + ", size: " + std::to_string(_buffer.size()));
|
||||
}
|
||||
|
||||
|
||||
|
2
contrib/NuRaft
vendored
2
contrib/NuRaft
vendored
@ -1 +1 @@
|
||||
Subproject commit cb5dc3c906e80f253e9ce9535807caef827cc2e0
|
||||
Subproject commit c2b0811f164a7948208489562dab4f186eb305ce
|
@ -4,9 +4,7 @@ else ()
|
||||
option(ENABLE_ICU "Enable ICU" 0)
|
||||
endif ()
|
||||
|
||||
# Temporarily disabled s390x because the ICU build links a blob (icudt71b_dat.S) and our friends from IBM did not explain how they generated
|
||||
# the blob on s390x: https://github.com/ClickHouse/icudata/pull/2#issuecomment-2226957255
|
||||
if (NOT ENABLE_ICU OR ARCH_S390X)
|
||||
if (NOT ENABLE_ICU)
|
||||
message(STATUS "Not using ICU")
|
||||
return()
|
||||
endif()
|
||||
|
2
contrib/icudata
vendored
2
contrib/icudata
vendored
@ -1 +1 @@
|
||||
Subproject commit d345d6ac22f381c882420de9053d30ae1ff38d75
|
||||
Subproject commit 4904951339a70b4814d2d3723436b20d079cb01b
|
2
contrib/rocksdb
vendored
2
contrib/rocksdb
vendored
@ -1 +1 @@
|
||||
Subproject commit 01e43568fa9f3f7bf107b2b66c00b286b456f33e
|
||||
Subproject commit 49ce8a1064dd1ad89117899839bf136365e49e79
|
@ -1,6 +1,6 @@
|
||||
option (ENABLE_ROCKSDB "Enable RocksDB" ${ENABLE_LIBRARIES})
|
||||
|
||||
if (NOT ENABLE_ROCKSDB)
|
||||
if (NOT ENABLE_ROCKSDB OR NO_SSE3_OR_HIGHER) # assumes SSE4.2 and PCLMUL
|
||||
message (STATUS "Not using RocksDB")
|
||||
return()
|
||||
endif()
|
||||
@ -39,13 +39,6 @@ if(WITH_ZSTD)
|
||||
list(APPEND THIRDPARTY_LIBS ch_contrib::zstd)
|
||||
endif()
|
||||
|
||||
add_definitions(-DROCKSDB_PORTABLE)
|
||||
|
||||
if(ENABLE_SSE42 AND ENABLE_PCLMULQDQ)
|
||||
add_definitions(-DHAVE_SSE42)
|
||||
add_definitions(-DHAVE_PCLMUL)
|
||||
endif()
|
||||
|
||||
if(CMAKE_SYSTEM_PROCESSOR MATCHES "arm64|aarch64|AARCH64")
|
||||
set (HAS_ARMV8_CRC 1)
|
||||
# the original build descriptions set specific flags for ARM. These flags are already subsumed by ClickHouse's general
|
||||
@ -91,7 +84,9 @@ set(SOURCES
|
||||
${ROCKSDB_SOURCE_DIR}/cache/compressed_secondary_cache.cc
|
||||
${ROCKSDB_SOURCE_DIR}/cache/lru_cache.cc
|
||||
${ROCKSDB_SOURCE_DIR}/cache/secondary_cache.cc
|
||||
${ROCKSDB_SOURCE_DIR}/cache/secondary_cache_adapter.cc
|
||||
${ROCKSDB_SOURCE_DIR}/cache/sharded_cache.cc
|
||||
${ROCKSDB_SOURCE_DIR}/cache/tiered_secondary_cache.cc
|
||||
${ROCKSDB_SOURCE_DIR}/db/arena_wrapped_db_iter.cc
|
||||
${ROCKSDB_SOURCE_DIR}/db/blob/blob_contents.cc
|
||||
${ROCKSDB_SOURCE_DIR}/db/blob/blob_fetcher.cc
|
||||
@ -174,9 +169,11 @@ set(SOURCES
|
||||
${ROCKSDB_SOURCE_DIR}/db/wal_manager.cc
|
||||
${ROCKSDB_SOURCE_DIR}/db/wide/wide_column_serialization.cc
|
||||
${ROCKSDB_SOURCE_DIR}/db/wide/wide_columns.cc
|
||||
${ROCKSDB_SOURCE_DIR}/db/wide/wide_columns_helper.cc
|
||||
${ROCKSDB_SOURCE_DIR}/db/write_batch.cc
|
||||
${ROCKSDB_SOURCE_DIR}/db/write_batch_base.cc
|
||||
${ROCKSDB_SOURCE_DIR}/db/write_controller.cc
|
||||
${ROCKSDB_SOURCE_DIR}/db/write_stall_stats.cc
|
||||
${ROCKSDB_SOURCE_DIR}/db/write_thread.cc
|
||||
${ROCKSDB_SOURCE_DIR}/env/composite_env.cc
|
||||
${ROCKSDB_SOURCE_DIR}/env/env.cc
|
||||
@ -229,6 +226,7 @@ set(SOURCES
|
||||
${ROCKSDB_SOURCE_DIR}/options/configurable.cc
|
||||
${ROCKSDB_SOURCE_DIR}/options/customizable.cc
|
||||
${ROCKSDB_SOURCE_DIR}/options/db_options.cc
|
||||
${ROCKSDB_SOURCE_DIR}/options/offpeak_time_info.cc
|
||||
${ROCKSDB_SOURCE_DIR}/options/options.cc
|
||||
${ROCKSDB_SOURCE_DIR}/options/options_helper.cc
|
||||
${ROCKSDB_SOURCE_DIR}/options/options_parser.cc
|
||||
@ -268,6 +266,7 @@ set(SOURCES
|
||||
${ROCKSDB_SOURCE_DIR}/table/get_context.cc
|
||||
${ROCKSDB_SOURCE_DIR}/table/iterator.cc
|
||||
${ROCKSDB_SOURCE_DIR}/table/merging_iterator.cc
|
||||
${ROCKSDB_SOURCE_DIR}/table/compaction_merging_iterator.cc
|
||||
${ROCKSDB_SOURCE_DIR}/table/meta_blocks.cc
|
||||
${ROCKSDB_SOURCE_DIR}/table/persistent_cache_helper.cc
|
||||
${ROCKSDB_SOURCE_DIR}/table/plain/plain_table_bloom.cc
|
||||
@ -309,6 +308,7 @@ set(SOURCES
|
||||
${ROCKSDB_SOURCE_DIR}/util/compression_context_cache.cc
|
||||
${ROCKSDB_SOURCE_DIR}/util/concurrent_task_limiter_impl.cc
|
||||
${ROCKSDB_SOURCE_DIR}/util/crc32c.cc
|
||||
${ROCKSDB_SOURCE_DIR}/util/data_structure.cc
|
||||
${ROCKSDB_SOURCE_DIR}/util/dynamic_bloom.cc
|
||||
${ROCKSDB_SOURCE_DIR}/util/hash.cc
|
||||
${ROCKSDB_SOURCE_DIR}/util/murmurhash.cc
|
||||
@ -322,6 +322,8 @@ set(SOURCES
|
||||
${ROCKSDB_SOURCE_DIR}/util/string_util.cc
|
||||
${ROCKSDB_SOURCE_DIR}/util/thread_local.cc
|
||||
${ROCKSDB_SOURCE_DIR}/util/threadpool_imp.cc
|
||||
${ROCKSDB_SOURCE_DIR}/util/udt_util.cc
|
||||
${ROCKSDB_SOURCE_DIR}/util/write_batch_util.cc
|
||||
${ROCKSDB_SOURCE_DIR}/util/xxhash.cc
|
||||
${ROCKSDB_SOURCE_DIR}/utilities/agg_merge/agg_merge.cc
|
||||
${ROCKSDB_SOURCE_DIR}/utilities/backup/backup_engine.cc
|
||||
@ -404,12 +406,6 @@ set(SOURCES
|
||||
${ROCKSDB_SOURCE_DIR}/utilities/transactions/lock/range/range_tree/lib/util/memarena.cc
|
||||
build_version.cc) # generated by hand
|
||||
|
||||
if(ENABLE_SSE42 AND ENABLE_PCLMULQDQ)
|
||||
set_source_files_properties(
|
||||
"${ROCKSDB_SOURCE_DIR}/util/crc32c.cc"
|
||||
PROPERTIES COMPILE_FLAGS "-msse4.2 -mpclmul")
|
||||
endif()
|
||||
|
||||
if(CMAKE_SYSTEM_PROCESSOR MATCHES "^(powerpc|ppc)64")
|
||||
list(APPEND SOURCES
|
||||
"${ROCKSDB_SOURCE_DIR}/util/crc32c_ppc.c"
|
||||
|
@ -26,7 +26,6 @@ sed -i '/onBrokenMarkdownLinks:/ s/ignore/error/g' docusaurus.config.js
|
||||
|
||||
if [[ $# -lt 1 ]] || [[ "$1" == "--"* ]]; then
|
||||
export CI=true
|
||||
yarn install
|
||||
exec yarn build "$@"
|
||||
fi
|
||||
|
||||
|
@ -35,7 +35,9 @@ ENV UBSAN_OPTIONS='print_stacktrace=1 max_allocation_size_mb=32768'
|
||||
ENV MSAN_OPTIONS='abort_on_error=1 poison_in_dtor=1 max_allocation_size_mb=32768'
|
||||
ENV LSAN_OPTIONS='max_allocation_size_mb=32768'
|
||||
|
||||
# for external_symbolizer_path
|
||||
# for external_symbolizer_path, and also ensure that llvm-symbolizer really
|
||||
# exists (since you don't want to fallback to addr2line, it is very slow)
|
||||
RUN test -f /usr/bin/llvm-symbolizer-${LLVM_VERSION}
|
||||
RUN ln -s /usr/bin/llvm-symbolizer-${LLVM_VERSION} /usr/bin/llvm-symbolizer
|
||||
|
||||
RUN echo "en_US.UTF-8 UTF-8" > /etc/locale.gen && locale-gen en_US.UTF-8
|
||||
|
@ -218,6 +218,6 @@ function stop_logs_replication
|
||||
clickhouse-client --query "select database||'.'||table from system.tables where database = 'system' and (table like '%_sender' or table like '%_watcher')" | {
|
||||
tee /dev/stderr
|
||||
} | {
|
||||
xargs -n1 -r -i clickhouse-client --query "drop table {}"
|
||||
timeout --preserve-status --signal TERM --kill-after 5m 15m xargs -n1 -r -i clickhouse-client --query "drop table {}"
|
||||
}
|
||||
}
|
||||
|
@ -41,7 +41,7 @@ export FASTTEST_WORKSPACE
|
||||
export FASTTEST_SOURCE
|
||||
export FASTTEST_BUILD
|
||||
export FASTTEST_DATA
|
||||
export FASTTEST_OUT
|
||||
export FASTTEST_OUTPUT
|
||||
export PATH
|
||||
|
||||
function ccache_status
|
||||
|
@ -28,9 +28,9 @@
|
||||
</table_function_remote_max_addresses>
|
||||
|
||||
<!-- Don't waste cycles testing the old interpreter. Spend time in the new analyzer instead -->
|
||||
<allow_experimental_analyzer>
|
||||
<enable_analyzer>
|
||||
<readonly/>
|
||||
</allow_experimental_analyzer>
|
||||
</enable_analyzer>
|
||||
|
||||
<!-- This feature is broken, deprecated and will be removed. We don't want more reports about it -->
|
||||
<allow_experimental_object_type>
|
||||
|
@ -11,7 +11,8 @@ function attach_gdb_to_clickhouse()
|
||||
# explicitly ignore non-fatal signals that are used by server.
|
||||
# Number of SIGRTMIN can be determined only in runtime.
|
||||
RTMIN=$(kill -l SIGRTMIN)
|
||||
echo "
|
||||
# shellcheck disable=SC2016
|
||||
echo "
|
||||
set follow-fork-mode parent
|
||||
handle SIGHUP nostop noprint pass
|
||||
handle SIGINT nostop noprint pass
|
||||
@ -24,8 +25,11 @@ handle SIG$RTMIN nostop noprint pass
|
||||
info signals
|
||||
continue
|
||||
backtrace full
|
||||
thread apply all backtrace full
|
||||
info registers
|
||||
p "top 1 KiB of the stack:"
|
||||
p/x *(uint64_t[128]*)"'$sp'"
|
||||
maintenance info sections
|
||||
thread apply all backtrace full
|
||||
disassemble /s
|
||||
up
|
||||
disassemble /s
|
||||
|
@ -3,6 +3,12 @@
|
||||
# shellcheck disable=SC1091
|
||||
source /setup_export_logs.sh
|
||||
|
||||
# shellcheck source=../stateless/stress_tests.lib
|
||||
source /stress_tests.lib
|
||||
|
||||
# Avoid overlaps with previous runs
|
||||
dmesg --clear
|
||||
|
||||
# fail on errors, verbose and export all env variables
|
||||
set -e -x -a
|
||||
|
||||
@ -72,8 +78,12 @@ if [[ -n "$BUGFIX_VALIDATE_CHECK" ]] && [[ "$BUGFIX_VALIDATE_CHECK" -eq 1 ]]; th
|
||||
remove_keeper_config "latest_logs_cache_size_threshold" "[[:digit:]]\+"
|
||||
fi
|
||||
|
||||
export IS_FLAKY_CHECK=0
|
||||
|
||||
# For flaky check we also enable thread fuzzer
|
||||
if [ "$NUM_TRIES" -gt "1" ]; then
|
||||
export IS_FLAKY_CHECK=1
|
||||
|
||||
export THREAD_FUZZER_CPU_TIME_PERIOD_US=1000
|
||||
export THREAD_FUZZER_SLEEP_PROBABILITY=0.1
|
||||
export THREAD_FUZZER_SLEEP_TIME_US_MAX=100000
|
||||
@ -212,6 +222,10 @@ function run_tests()
|
||||
ADDITIONAL_OPTIONS+=('--shared-catalog')
|
||||
fi
|
||||
|
||||
if [[ "$USE_DISTRIBUTED_CACHE" -eq 1 ]]; then
|
||||
ADDITIONAL_OPTIONS+=('--distributed-cache')
|
||||
fi
|
||||
|
||||
if [[ "$USE_DATABASE_REPLICATED" -eq 1 ]]; then
|
||||
ADDITIONAL_OPTIONS+=('--replicated-database')
|
||||
# Too many tests fail for DatabaseReplicated in parallel.
|
||||
@ -256,7 +270,7 @@ function run_tests()
|
||||
| ts '%Y-%m-%d %H:%M:%S' \
|
||||
| tee -a test_output/test_result.txt
|
||||
set -e
|
||||
DURATION=$((START_TIME - SECONDS))
|
||||
DURATION=$((SECONDS - START_TIME))
|
||||
|
||||
echo "Elapsed ${DURATION} seconds."
|
||||
if [[ $DURATION -ge $TIMEOUT ]]
|
||||
@ -295,22 +309,22 @@ stop_logs_replication
|
||||
failed_to_save_logs=0
|
||||
for table in query_log zookeeper_log trace_log transactions_info_log metric_log blob_storage_log error_log
|
||||
do
|
||||
err=$(clickhouse-client -q "select * from system.$table into outfile '/test_output/$table.tsv.gz' format TSVWithNamesAndTypes")
|
||||
echo "$err"
|
||||
[[ "0" != "${#err}" ]] && failed_to_save_logs=1
|
||||
if ! clickhouse-client -q "select * from system.$table into outfile '/test_output/$table.tsv.zst' format TSVWithNamesAndTypes"; then
|
||||
failed_to_save_logs=1
|
||||
fi
|
||||
if [[ "$USE_DATABASE_REPLICATED" -eq 1 ]]; then
|
||||
err=$( { clickhouse-client --port 19000 -q "select * from system.$table format TSVWithNamesAndTypes" | zstd --threads=0 > /test_output/$table.1.tsv.zst; } 2>&1 )
|
||||
echo "$err"
|
||||
[[ "0" != "${#err}" ]] && failed_to_save_logs=1
|
||||
err=$( { clickhouse-client --port 29000 -q "select * from system.$table format TSVWithNamesAndTypes" | zstd --threads=0 > /test_output/$table.2.tsv.zst; } 2>&1 )
|
||||
echo "$err"
|
||||
[[ "0" != "${#err}" ]] && failed_to_save_logs=1
|
||||
if ! clickhouse-client --port 19000 -q "select * from system.$table into outfile '/test_output/$table.1.tsv.zst' format TSVWithNamesAndTypes"; then
|
||||
failed_to_save_logs=1
|
||||
fi
|
||||
if ! clickhouse-client --port 29000 -q "select * from system.$table into outfile '/test_output/$table.2.tsv.zst' format TSVWithNamesAndTypes"; then
|
||||
failed_to_save_logs=1
|
||||
fi
|
||||
fi
|
||||
|
||||
if [[ "$USE_SHARED_CATALOG" -eq 1 ]]; then
|
||||
err=$( { clickhouse-client --port 19000 -q "select * from system.$table format TSVWithNamesAndTypes" | zstd --threads=0 > /test_output/$table.1.tsv.zst; } 2>&1 )
|
||||
echo "$err"
|
||||
[[ "0" != "${#err}" ]] && failed_to_save_logs=1
|
||||
if ! clickhouse-client --port 29000 -q "select * from system.$table into outfile '/test_output/$table.2.tsv.zst' format TSVWithNamesAndTypes"; then
|
||||
failed_to_save_logs=1
|
||||
fi
|
||||
fi
|
||||
done
|
||||
|
||||
@ -383,6 +397,8 @@ do
|
||||
| zstd --threads=0 > "/test_output/trace-log-$trace_type-flamegraph.tsv.zst" ||:
|
||||
done
|
||||
|
||||
# Grep logs for sanitizer asserts, crashes and other critical errors
|
||||
check_logs_for_critical_errors
|
||||
|
||||
# Compressed (FIXME: remove once only github actions will be left)
|
||||
rm /var/log/clickhouse-server/clickhouse-server.log
|
||||
|
@ -139,9 +139,9 @@ EOL
|
||||
</table_function_remote_max_addresses>
|
||||
|
||||
<!-- Don't waste cycles testing the old interpreter. Spend time in the new analyzer instead -->
|
||||
<allow_experimental_analyzer>
|
||||
<enable_analyzer>
|
||||
<readonly/>
|
||||
</allow_experimental_analyzer>
|
||||
</enable_analyzer>
|
||||
|
||||
<!-- This feature is broken, deprecated and will be removed. We don't want more reports about it -->
|
||||
<allow_experimental_object_type>
|
||||
@ -242,7 +242,7 @@ function check_server_start()
|
||||
function check_logs_for_critical_errors()
|
||||
{
|
||||
# Sanitizer asserts
|
||||
sed -n '/WARNING:.*anitizer/,/^$/p' /var/log/clickhouse-server/stderr.log >> /test_output/tmp
|
||||
sed -n '/WARNING:.*anitizer/,/^$/p' /var/log/clickhouse-server/stderr*.log >> /test_output/tmp
|
||||
rg -Fav -e "ASan doesn't fully support makecontext/swapcontext functions" -e "DB::Exception" /test_output/tmp > /dev/null \
|
||||
&& echo -e "Sanitizer assert (in stderr.log)$FAIL$(head_escaped /test_output/tmp)" >> /test_output/test_results.tsv \
|
||||
|| echo -e "No sanitizer asserts$OK" >> /test_output/test_results.tsv
|
||||
|
@ -5,14 +5,6 @@ FROM ubuntu:22.04
|
||||
ARG apt_archive="http://archive.ubuntu.com"
|
||||
RUN sed -i "s|http://archive.ubuntu.com|$apt_archive|g" /etc/apt/sources.list
|
||||
|
||||
# FIXME: rebuild for clang 18.1.3, that contains a workaround [1] for
|
||||
# sanitizers issue [2]:
|
||||
#
|
||||
# $ git tag --contains c2a57034eff048cd36c563c8e0051db3a70991b3 | tail -1
|
||||
# llvmorg-18.1.3
|
||||
#
|
||||
# [1]: https://github.com/llvm/llvm-project/commit/c2a57034eff048cd36c563c8e0051db3a70991b3
|
||||
# [2]: https://github.com/ClickHouse/ClickHouse/issues/64086
|
||||
ENV DEBIAN_FRONTEND=noninteractive LLVM_VERSION=18
|
||||
|
||||
RUN apt-get update \
|
||||
|
@ -161,11 +161,11 @@ def process_result(result_path, broken_tests):
|
||||
retries,
|
||||
test_results,
|
||||
) = process_test_log(result_path, broken_tests)
|
||||
is_flacky_check = 1 < int(os.environ.get("NUM_TRIES", 1))
|
||||
logging.info("Is flaky check: %s", is_flacky_check)
|
||||
is_flaky_check = 1 < int(os.environ.get("NUM_TRIES", 1))
|
||||
logging.info("Is flaky check: %s", is_flaky_check)
|
||||
# If no tests were run (success == 0) it indicates an error (e.g. server did not start or crashed immediately)
|
||||
# But it's Ok for "flaky checks" - they can contain just one test for check which is marked as skipped.
|
||||
if failed != 0 or unknown != 0 or (success == 0 and (not is_flacky_check)):
|
||||
if failed != 0 or unknown != 0 or (success == 0 and (not is_flaky_check)):
|
||||
state = "failure"
|
||||
|
||||
if hung:
|
||||
|
@ -331,7 +331,7 @@
|
||||
* Fix several non significant errors in unit tests. [#11262](https://github.com/ClickHouse/ClickHouse/pull/11262) ([alesapin](https://github.com/alesapin)).
|
||||
* Add a test for Join table engine from @donmikel. This closes [#9158](https://github.com/ClickHouse/ClickHouse/issues/9158). [#11265](https://github.com/ClickHouse/ClickHouse/pull/11265) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* Repeat test in CI if `curl` invocation was timed out. It is possible due to system hangups for 10+ seconds that are typical in our CI infrastructure. This fixes [#11267](https://github.com/ClickHouse/ClickHouse/issues/11267). [#11268](https://github.com/ClickHouse/ClickHouse/pull/11268) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* Fix potentially flacky test `00731_long_merge_tree_select_opened_files.sh`. It does not fail frequently but we have discovered potential race condition in this test while experimenting with ThreadFuzzer: [#9814](https://github.com/ClickHouse/ClickHouse/issues/9814) See [link](https://clickhouse-test-reports.s3.yandex.net/9814/40e3023e215df22985d275bf85f4d2290897b76b/functional_stateless_tests_(unbundled).html#fail1) for the example. [#11270](https://github.com/ClickHouse/ClickHouse/pull/11270) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* Fix potentially flaky test `00731_long_merge_tree_select_opened_files.sh`. It does not fail frequently but we have discovered potential race condition in this test while experimenting with ThreadFuzzer: [#9814](https://github.com/ClickHouse/ClickHouse/issues/9814) See [link](https://clickhouse-test-reports.s3.yandex.net/9814/40e3023e215df22985d275bf85f4d2290897b76b/functional_stateless_tests_(unbundled).html#fail1) for the example. [#11270](https://github.com/ClickHouse/ClickHouse/pull/11270) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* Now clickhouse-test check the server aliveness before tests run. [#11285](https://github.com/ClickHouse/ClickHouse/pull/11285) ([alesapin](https://github.com/alesapin)).
|
||||
* Emit a warning if server was build in debug or with sanitizers. [#11304](https://github.com/ClickHouse/ClickHouse/pull/11304) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* Better check for hung queries in clickhouse-test. [#11321](https://github.com/ClickHouse/ClickHouse/pull/11321) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
|
@ -280,7 +280,7 @@ sidebar_label: 2022
|
||||
* Cleanup unbundled image [#29689](https://github.com/ClickHouse/ClickHouse/pull/29689) ([Azat Khuzhin](https://github.com/azat)).
|
||||
* Fix memory tracking for merges and mutations [#29691](https://github.com/ClickHouse/ClickHouse/pull/29691) ([Azat Khuzhin](https://github.com/azat)).
|
||||
* Fix data-race in WriteIndirectBuffer (used in DiskMemory) [#29692](https://github.com/ClickHouse/ClickHouse/pull/29692) ([Azat Khuzhin](https://github.com/azat)).
|
||||
* Fix flacky test [#29706](https://github.com/ClickHouse/ClickHouse/pull/29706) ([Kseniia Sumarokova](https://github.com/kssenii)).
|
||||
* Fix flaky test [#29706](https://github.com/ClickHouse/ClickHouse/pull/29706) ([Kseniia Sumarokova](https://github.com/kssenii)).
|
||||
* BorrowedObjectPool condition variable notify fix [#29722](https://github.com/ClickHouse/ClickHouse/pull/29722) ([Maksim Kita](https://github.com/kitaisreal)).
|
||||
* Better exception message for local interactive [#29737](https://github.com/ClickHouse/ClickHouse/pull/29737) ([Kseniia Sumarokova](https://github.com/kssenii)).
|
||||
* Fix --stage for clickhouse-local [#29745](https://github.com/ClickHouse/ClickHouse/pull/29745) ([Azat Khuzhin](https://github.com/azat)).
|
||||
@ -308,7 +308,7 @@ sidebar_label: 2022
|
||||
* Fix client [#29864](https://github.com/ClickHouse/ClickHouse/pull/29864) ([Kseniia Sumarokova](https://github.com/kssenii)).
|
||||
* Remove some more streams. [#29898](https://github.com/ClickHouse/ClickHouse/pull/29898) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
|
||||
* Add logging in ZooKeeper client [#29901](https://github.com/ClickHouse/ClickHouse/pull/29901) ([Alexander Tokmakov](https://github.com/tavplubix)).
|
||||
* Fix some flacky tests [#29902](https://github.com/ClickHouse/ClickHouse/pull/29902) ([Kseniia Sumarokova](https://github.com/kssenii)).
|
||||
* Fix some flaky tests [#29902](https://github.com/ClickHouse/ClickHouse/pull/29902) ([Kseniia Sumarokova](https://github.com/kssenii)).
|
||||
* Grep server log even if it contains binary data [#29903](https://github.com/ClickHouse/ClickHouse/pull/29903) ([Alexander Tokmakov](https://github.com/tavplubix)).
|
||||
* Cosmetic refactoring of server constants. [#29913](https://github.com/ClickHouse/ClickHouse/pull/29913) ([Amos Bird](https://github.com/amosbird)).
|
||||
* Format improvement of AlterQuery [#29916](https://github.com/ClickHouse/ClickHouse/pull/29916) ([flynn](https://github.com/ucasfl)).
|
||||
@ -465,7 +465,7 @@ sidebar_label: 2022
|
||||
* Fix docs release [#30933](https://github.com/ClickHouse/ClickHouse/pull/30933) ([alesapin](https://github.com/alesapin)).
|
||||
* Fix style check [#30937](https://github.com/ClickHouse/ClickHouse/pull/30937) ([alesapin](https://github.com/alesapin)).
|
||||
* Fix file progress for clickhouse-local [#30938](https://github.com/ClickHouse/ClickHouse/pull/30938) ([Kseniia Sumarokova](https://github.com/kssenii)).
|
||||
* Fix flacky test [#30940](https://github.com/ClickHouse/ClickHouse/pull/30940) ([Kseniia Sumarokova](https://github.com/kssenii)).
|
||||
* Fix flaky test [#30940](https://github.com/ClickHouse/ClickHouse/pull/30940) ([Kseniia Sumarokova](https://github.com/kssenii)).
|
||||
* Fix reading from TinyLog [#30941](https://github.com/ClickHouse/ClickHouse/pull/30941) ([Vitaly Baranov](https://github.com/vitlibar)).
|
||||
* Add github to known hosts in docs release [#30947](https://github.com/ClickHouse/ClickHouse/pull/30947) ([alesapin](https://github.com/alesapin)).
|
||||
* Parse json from response in ci checks [#30948](https://github.com/ClickHouse/ClickHouse/pull/30948) ([alesapin](https://github.com/alesapin)).
|
||||
|
@ -220,7 +220,7 @@ sidebar_label: 2022
|
||||
* Fix test_backward_compatibility [#30950](https://github.com/ClickHouse/ClickHouse/pull/30950) ([Ilya Yatsishin](https://github.com/qoega)).
|
||||
* Add stress test to github actions [#30952](https://github.com/ClickHouse/ClickHouse/pull/30952) ([alesapin](https://github.com/alesapin)).
|
||||
* Try smaller blacklist of non parallel integration tests [#30963](https://github.com/ClickHouse/ClickHouse/pull/30963) ([Ilya Yatsishin](https://github.com/qoega)).
|
||||
* Fix flacky test [#30967](https://github.com/ClickHouse/ClickHouse/pull/30967) ([Kseniia Sumarokova](https://github.com/kssenii)).
|
||||
* Fix flaky test [#30967](https://github.com/ClickHouse/ClickHouse/pull/30967) ([Kseniia Sumarokova](https://github.com/kssenii)).
|
||||
* Move access-rights source code [#30973](https://github.com/ClickHouse/ClickHouse/pull/30973) ([Vitaly Baranov](https://github.com/vitlibar)).
|
||||
* Set output_format_avro_rows_in_file default to 1 [#30990](https://github.com/ClickHouse/ClickHouse/pull/30990) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||
* Remove remaining usages of Y_IGNORE [#30993](https://github.com/ClickHouse/ClickHouse/pull/30993) ([Yuriy Chernyshov](https://github.com/georgthegreat)).
|
||||
@ -353,7 +353,7 @@ sidebar_label: 2022
|
||||
* Support toUInt8/toInt8 for if constant condition optimization. [#31866](https://github.com/ClickHouse/ClickHouse/pull/31866) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
|
||||
* Added -no-sanitize=unsigned-integer-overflow build flag [#31881](https://github.com/ClickHouse/ClickHouse/pull/31881) ([Nikita Mikhaylov](https://github.com/nikitamikhaylov)).
|
||||
* Fix typos [#31886](https://github.com/ClickHouse/ClickHouse/pull/31886) ([Anton Popov](https://github.com/CurtizJ)).
|
||||
* Try to fix flacky test. [#31889](https://github.com/ClickHouse/ClickHouse/pull/31889) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
|
||||
* Try to fix flaky test. [#31889](https://github.com/ClickHouse/ClickHouse/pull/31889) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
|
||||
* Reduce the files that depend on parser headers [#31896](https://github.com/ClickHouse/ClickHouse/pull/31896) ([Raúl Marín](https://github.com/Algunenano)).
|
||||
* Fix magic_enum for debug helpers (fixes build w/ USE_DEBUG_HELPERS) [#31922](https://github.com/ClickHouse/ClickHouse/pull/31922) ([Azat Khuzhin](https://github.com/azat)).
|
||||
* Remove some trash from build [#31923](https://github.com/ClickHouse/ClickHouse/pull/31923) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
@ -387,7 +387,7 @@ sidebar_label: 2022
|
||||
* make looping in H3 funcs uniform [#32110](https://github.com/ClickHouse/ClickHouse/pull/32110) ([Bharat Nallan](https://github.com/bharatnc)).
|
||||
* Remove PVS check from master [#32114](https://github.com/ClickHouse/ClickHouse/pull/32114) ([alesapin](https://github.com/alesapin)).
|
||||
* Fix flaky keeper whitelist test [#32115](https://github.com/ClickHouse/ClickHouse/pull/32115) ([alesapin](https://github.com/alesapin)).
|
||||
* Fix flacky test test_executable_storage_input [#32118](https://github.com/ClickHouse/ClickHouse/pull/32118) ([Maksim Kita](https://github.com/kitaisreal)).
|
||||
* Fix flaky test test_executable_storage_input [#32118](https://github.com/ClickHouse/ClickHouse/pull/32118) ([Maksim Kita](https://github.com/kitaisreal)).
|
||||
* Fix data race in `removePartAndEnqueueFetch(...)` [#32119](https://github.com/ClickHouse/ClickHouse/pull/32119) ([Alexander Tokmakov](https://github.com/tavplubix)).
|
||||
* Move fuzzers and unit tests to another group [#32120](https://github.com/ClickHouse/ClickHouse/pull/32120) ([alesapin](https://github.com/alesapin)).
|
||||
* Add a test with 20000 mutations in one query [#32122](https://github.com/ClickHouse/ClickHouse/pull/32122) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
|
||||
@ -411,11 +411,11 @@ sidebar_label: 2022
|
||||
* Add test for [#32186](https://github.com/ClickHouse/ClickHouse/issues/32186) [#32203](https://github.com/ClickHouse/ClickHouse/pull/32203) ([Raúl Marín](https://github.com/Algunenano)).
|
||||
* Fix uncaught exception in DatabaseLazy [#32206](https://github.com/ClickHouse/ClickHouse/pull/32206) ([Alexander Tokmakov](https://github.com/tavplubix)).
|
||||
* Update ASTCreateQuery.cpp [#32208](https://github.com/ClickHouse/ClickHouse/pull/32208) ([Kseniia Sumarokova](https://github.com/kssenii)).
|
||||
* Fix flacky fileLog test (probably) [#32209](https://github.com/ClickHouse/ClickHouse/pull/32209) ([Kseniia Sumarokova](https://github.com/kssenii)).
|
||||
* Fix flaky fileLog test (probably) [#32209](https://github.com/ClickHouse/ClickHouse/pull/32209) ([Kseniia Sumarokova](https://github.com/kssenii)).
|
||||
* Fix jemalloc under osx [#32219](https://github.com/ClickHouse/ClickHouse/pull/32219) ([Azat Khuzhin](https://github.com/azat)).
|
||||
* Add missing timezones to some tests [#32222](https://github.com/ClickHouse/ClickHouse/pull/32222) ([Kseniia Sumarokova](https://github.com/kssenii)).
|
||||
* Fix versioning of aggregate functions (fixes performance tests) [#32236](https://github.com/ClickHouse/ClickHouse/pull/32236) ([Azat Khuzhin](https://github.com/azat)).
|
||||
* Disable window view tests temporarily because still flacky [#32257](https://github.com/ClickHouse/ClickHouse/pull/32257) ([Kseniia Sumarokova](https://github.com/kssenii)).
|
||||
* Disable window view tests temporarily because still flaky [#32257](https://github.com/ClickHouse/ClickHouse/pull/32257) ([Kseniia Sumarokova](https://github.com/kssenii)).
|
||||
* Fix typo in tupleToNameValuePairs doc [#32262](https://github.com/ClickHouse/ClickHouse/pull/32262) ([Vladimir C](https://github.com/vdimir)).
|
||||
* Fix possible Pipeline stuck in case of StrictResize processor. [#32270](https://github.com/ClickHouse/ClickHouse/pull/32270) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
|
||||
* Fix possible crash in DataTypeAggregateFunction [#32287](https://github.com/ClickHouse/ClickHouse/pull/32287) ([Nikita Mikhaylov](https://github.com/nikitamikhaylov)).
|
||||
|
@ -158,7 +158,7 @@ sidebar_label: 2022
|
||||
* MemoryStorage sync comments and code [#22721](https://github.com/ClickHouse/ClickHouse/pull/22721) ([Maksim Kita](https://github.com/kitaisreal)).
|
||||
* Fix potential segfault on Keeper startup [#22743](https://github.com/ClickHouse/ClickHouse/pull/22743) ([alesapin](https://github.com/alesapin)).
|
||||
* Avoid using harmful function rand() [#22744](https://github.com/ClickHouse/ClickHouse/pull/22744) ([Amos Bird](https://github.com/amosbird)).
|
||||
* Fix flacky hedged tests [#22746](https://github.com/ClickHouse/ClickHouse/pull/22746) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||
* Fix flaky hedged tests [#22746](https://github.com/ClickHouse/ClickHouse/pull/22746) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||
* add more messages when flushing the logs [#22761](https://github.com/ClickHouse/ClickHouse/pull/22761) ([Alexander Kuzmenkov](https://github.com/akuzm)).
|
||||
* Moved BorrowedObjectPool to common [#22764](https://github.com/ClickHouse/ClickHouse/pull/22764) ([Maksim Kita](https://github.com/kitaisreal)).
|
||||
* Functions ExternalDictionaries standardize exception throw [#22821](https://github.com/ClickHouse/ClickHouse/pull/22821) ([Maksim Kita](https://github.com/kitaisreal)).
|
||||
|
@ -55,7 +55,7 @@ sidebar_label: 2022
|
||||
* Try fix rabbitmq tests [#26826](https://github.com/ClickHouse/ClickHouse/pull/26826) ([Kseniia Sumarokova](https://github.com/kssenii)).
|
||||
* One more library bridge fix [#26873](https://github.com/ClickHouse/ClickHouse/pull/26873) ([Kseniia Sumarokova](https://github.com/kssenii)).
|
||||
* Update PVS checksum [#27317](https://github.com/ClickHouse/ClickHouse/pull/27317) ([Alexander Tokmakov](https://github.com/tavplubix)).
|
||||
* Fix flacky test [#27383](https://github.com/ClickHouse/ClickHouse/pull/27383) ([Kseniia Sumarokova](https://github.com/kssenii)).
|
||||
* Fix flaky test [#27383](https://github.com/ClickHouse/ClickHouse/pull/27383) ([Kseniia Sumarokova](https://github.com/kssenii)).
|
||||
* Fix throw without exception in MySQL source. [#28027](https://github.com/ClickHouse/ClickHouse/pull/28027) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
|
||||
* Fix race between REPLACE PARTITION and MOVE PARTITION [#28035](https://github.com/ClickHouse/ClickHouse/pull/28035) ([Alexander Tokmakov](https://github.com/tavplubix)).
|
||||
* Follow-up to [#28016](https://github.com/ClickHouse/ClickHouse/issues/28016) [#28036](https://github.com/ClickHouse/ClickHouse/pull/28036) ([Alexander Tokmakov](https://github.com/tavplubix)).
|
||||
|
@ -35,7 +35,7 @@ sidebar_label: 2022
|
||||
#### NOT FOR CHANGELOG / INSIGNIFICANT
|
||||
|
||||
* Fix prometheus metric name [#26140](https://github.com/ClickHouse/ClickHouse/pull/26140) ([Vladimir C](https://github.com/vdimir)).
|
||||
* Fix flacky test [#27383](https://github.com/ClickHouse/ClickHouse/pull/27383) ([Kseniia Sumarokova](https://github.com/kssenii)).
|
||||
* Fix flaky test [#27383](https://github.com/ClickHouse/ClickHouse/pull/27383) ([Kseniia Sumarokova](https://github.com/kssenii)).
|
||||
* Fix throw without exception in MySQL source. [#28027](https://github.com/ClickHouse/ClickHouse/pull/28027) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
|
||||
* Fix race between REPLACE PARTITION and MOVE PARTITION [#28035](https://github.com/ClickHouse/ClickHouse/pull/28035) ([Alexander Tokmakov](https://github.com/tavplubix)).
|
||||
* Follow-up to [#28016](https://github.com/ClickHouse/ClickHouse/issues/28016) [#28036](https://github.com/ClickHouse/ClickHouse/pull/28036) ([Alexander Tokmakov](https://github.com/tavplubix)).
|
||||
|
@ -101,7 +101,7 @@ sidebar_label: 2022
|
||||
* Separate log files for separate runs in stress test [#25741](https://github.com/ClickHouse/ClickHouse/pull/25741) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* Fix slow performance test [#25742](https://github.com/ClickHouse/ClickHouse/pull/25742) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* DatabaseAtomic EXCHANGE DICTIONARIES fix test [#25753](https://github.com/ClickHouse/ClickHouse/pull/25753) ([Maksim Kita](https://github.com/kitaisreal)).
|
||||
* Try fix flacky rabbitmq test [#25756](https://github.com/ClickHouse/ClickHouse/pull/25756) ([Kseniia Sumarokova](https://github.com/kssenii)).
|
||||
* Try fix flaky rabbitmq test [#25756](https://github.com/ClickHouse/ClickHouse/pull/25756) ([Kseniia Sumarokova](https://github.com/kssenii)).
|
||||
* Add a test for [#13993](https://github.com/ClickHouse/ClickHouse/issues/13993) [#25758](https://github.com/ClickHouse/ClickHouse/pull/25758) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* Set follow-fork-mode child for gdb in stress/fasttest/fuzzer [#25769](https://github.com/ClickHouse/ClickHouse/pull/25769) ([Azat Khuzhin](https://github.com/azat)).
|
||||
* Ignore TOO_DEEP_RECURSION server exception during fuzzing [#25770](https://github.com/ClickHouse/ClickHouse/pull/25770) ([Azat Khuzhin](https://github.com/azat)).
|
||||
|
@ -40,7 +40,7 @@ sidebar_label: 2022
|
||||
|
||||
* Fix several bugs in ZooKeeper snapshots deserialization [#26127](https://github.com/ClickHouse/ClickHouse/pull/26127) ([alesapin](https://github.com/alesapin)).
|
||||
* Fix prometheus metric name [#26140](https://github.com/ClickHouse/ClickHouse/pull/26140) ([Vladimir C](https://github.com/vdimir)).
|
||||
* Fix flacky test [#27383](https://github.com/ClickHouse/ClickHouse/pull/27383) ([Kseniia Sumarokova](https://github.com/kssenii)).
|
||||
* Fix flaky test [#27383](https://github.com/ClickHouse/ClickHouse/pull/27383) ([Kseniia Sumarokova](https://github.com/kssenii)).
|
||||
* Fix throw without exception in MySQL source. [#28027](https://github.com/ClickHouse/ClickHouse/pull/28027) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
|
||||
* Fix race between REPLACE PARTITION and MOVE PARTITION [#28035](https://github.com/ClickHouse/ClickHouse/pull/28035) ([Alexander Tokmakov](https://github.com/tavplubix)).
|
||||
* Follow-up to [#28016](https://github.com/ClickHouse/ClickHouse/issues/28016) [#28036](https://github.com/ClickHouse/ClickHouse/pull/28036) ([Alexander Tokmakov](https://github.com/tavplubix)).
|
||||
|
@ -346,7 +346,7 @@ sidebar_label: 2022
|
||||
* Update PVS checksum [#27317](https://github.com/ClickHouse/ClickHouse/pull/27317) ([Alexander Tokmakov](https://github.com/tavplubix)).
|
||||
* Fix 01300_client_save_history_when_terminated_long [#27324](https://github.com/ClickHouse/ClickHouse/pull/27324) ([Raúl Marín](https://github.com/Algunenano)).
|
||||
* Try update contrib/zlib-ng [#27327](https://github.com/ClickHouse/ClickHouse/pull/27327) ([Ilya Yatsishin](https://github.com/qoega)).
|
||||
* Fix flacky test [#27383](https://github.com/ClickHouse/ClickHouse/pull/27383) ([Kseniia Sumarokova](https://github.com/kssenii)).
|
||||
* Fix flaky test [#27383](https://github.com/ClickHouse/ClickHouse/pull/27383) ([Kseniia Sumarokova](https://github.com/kssenii)).
|
||||
* Add and check system.mutations for database filter [#27384](https://github.com/ClickHouse/ClickHouse/pull/27384) ([Azat Khuzhin](https://github.com/azat)).
|
||||
* Correct the key data type used in mapContains [#27423](https://github.com/ClickHouse/ClickHouse/pull/27423) ([Fuwang Hu](https://github.com/fuwhu)).
|
||||
* Fix tests for WithMergeableStateAfterAggregationAndLimit [#27424](https://github.com/ClickHouse/ClickHouse/pull/27424) ([Azat Khuzhin](https://github.com/azat)).
|
||||
|
@ -398,7 +398,7 @@ sidebar_label: 2022
|
||||
* test for [#24410](https://github.com/ClickHouse/ClickHouse/issues/24410) [#33265](https://github.com/ClickHouse/ClickHouse/pull/33265) ([Denny Crane](https://github.com/den-crane)).
|
||||
* Wait for RabbitMQ container to actually start when it was restarted in test on purpose [#33266](https://github.com/ClickHouse/ClickHouse/pull/33266) ([Kseniia Sumarokova](https://github.com/kssenii)).
|
||||
* Mark max_alter_threads as obsolete [#33268](https://github.com/ClickHouse/ClickHouse/pull/33268) ([Denny Crane](https://github.com/den-crane)).
|
||||
* Fix azure tests flackyness because of azure server closing connection [#33269](https://github.com/ClickHouse/ClickHouse/pull/33269) ([Kseniia Sumarokova](https://github.com/kssenii)).
|
||||
* Fix azure tests flakyness because of azure server closing connection [#33269](https://github.com/ClickHouse/ClickHouse/pull/33269) ([Kseniia Sumarokova](https://github.com/kssenii)).
|
||||
* Test for [#26920](https://github.com/ClickHouse/ClickHouse/issues/26920) [#33272](https://github.com/ClickHouse/ClickHouse/pull/33272) ([Denny Crane](https://github.com/den-crane)).
|
||||
* Fix test_storage_kafka failures by adjusting retention.ms [#33278](https://github.com/ClickHouse/ClickHouse/pull/33278) ([Azat Khuzhin](https://github.com/azat)).
|
||||
* Disable FunctionConvertFromString::canBeExecutedOnDefaultArguments [#33286](https://github.com/ClickHouse/ClickHouse/pull/33286) ([Vladimir C](https://github.com/vdimir)).
|
||||
@ -447,7 +447,7 @@ sidebar_label: 2022
|
||||
* Update mongodb.md [#33585](https://github.com/ClickHouse/ClickHouse/pull/33585) ([Kseniia Sumarokova](https://github.com/kssenii)).
|
||||
* Restore existing static builds links [#33597](https://github.com/ClickHouse/ClickHouse/pull/33597) ([Mikhail f. Shiryaev](https://github.com/Felixoid)).
|
||||
* Fix pylint for run_check.py [#33600](https://github.com/ClickHouse/ClickHouse/pull/33600) ([Mikhail f. Shiryaev](https://github.com/Felixoid)).
|
||||
* Fix flacky test_dictionaries_postgresql/ [#33601](https://github.com/ClickHouse/ClickHouse/pull/33601) ([Kseniia Sumarokova](https://github.com/kssenii)).
|
||||
* Fix flaky test_dictionaries_postgresql/ [#33601](https://github.com/ClickHouse/ClickHouse/pull/33601) ([Kseniia Sumarokova](https://github.com/kssenii)).
|
||||
* Make ZooKeeper client better interpret keeper server connection reject [#33602](https://github.com/ClickHouse/ClickHouse/pull/33602) ([alesapin](https://github.com/alesapin)).
|
||||
* Fix broken workflow dependencies [#33608](https://github.com/ClickHouse/ClickHouse/pull/33608) ([Mikhail f. Shiryaev](https://github.com/Felixoid)).
|
||||
* Force rebuild images in CI [#33609](https://github.com/ClickHouse/ClickHouse/pull/33609) ([Mikhail f. Shiryaev](https://github.com/Felixoid)).
|
||||
|
@ -410,7 +410,7 @@ sidebar_label: 2022
|
||||
* Fix mongodb test with new cert [#36161](https://github.com/ClickHouse/ClickHouse/pull/36161) ([alesapin](https://github.com/alesapin)).
|
||||
* Some fixes for ReplicatedMergeTree [#36163](https://github.com/ClickHouse/ClickHouse/pull/36163) ([Alexander Tokmakov](https://github.com/tavplubix)).
|
||||
* clickhouse-client: properly cancel query in case of error during formatting data [#36164](https://github.com/ClickHouse/ClickHouse/pull/36164) ([Azat Khuzhin](https://github.com/azat)).
|
||||
* Fix flacky test 01161_all_system_tables under s3 storage [#36175](https://github.com/ClickHouse/ClickHouse/pull/36175) ([Kseniia Sumarokova](https://github.com/kssenii)).
|
||||
* Fix flaky test 01161_all_system_tables under s3 storage [#36175](https://github.com/ClickHouse/ClickHouse/pull/36175) ([Kseniia Sumarokova](https://github.com/kssenii)).
|
||||
* Revert "Fix possible mutation stuck due to race with DROP_RANGE" [#36190](https://github.com/ClickHouse/ClickHouse/pull/36190) ([Azat Khuzhin](https://github.com/azat)).
|
||||
* Use atomic instead of mutex + condvar in ParallelReadBuffer [#36192](https://github.com/ClickHouse/ClickHouse/pull/36192) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||
* Follow-up to [#36138](https://github.com/ClickHouse/ClickHouse/issues/36138) [#36194](https://github.com/ClickHouse/ClickHouse/pull/36194) ([Alexander Tokmakov](https://github.com/tavplubix)).
|
||||
|
@ -321,7 +321,7 @@ sidebar_label: 2023
|
||||
* Add a test for [#38128](https://github.com/ClickHouse/ClickHouse/issues/38128) [#48817](https://github.com/ClickHouse/ClickHouse/pull/48817) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* Remove excessive logging [#48826](https://github.com/ClickHouse/ClickHouse/pull/48826) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* remove duplicate indentwith in clang-format [#48834](https://github.com/ClickHouse/ClickHouse/pull/48834) ([cluster](https://github.com/infdahai)).
|
||||
* Try fix flacky test_concurrent_alter_move_and_drop [#48843](https://github.com/ClickHouse/ClickHouse/pull/48843) ([Sergei Trifonov](https://github.com/serxa)).
|
||||
* Try fix flaky test_concurrent_alter_move_and_drop [#48843](https://github.com/ClickHouse/ClickHouse/pull/48843) ([Sergei Trifonov](https://github.com/serxa)).
|
||||
* fix the race wait loading parts [#48844](https://github.com/ClickHouse/ClickHouse/pull/48844) ([Sema Checherinda](https://github.com/CheSema)).
|
||||
* suppress assert of progress for test_system_replicated_fetches [#48856](https://github.com/ClickHouse/ClickHouse/pull/48856) ([Han Fei](https://github.com/hanfei1991)).
|
||||
* Fix: do not run test_store_cleanup_disk_s3 in parallel [#48863](https://github.com/ClickHouse/ClickHouse/pull/48863) ([Igor Nikonov](https://github.com/devcrafter)).
|
||||
@ -372,4 +372,3 @@ sidebar_label: 2023
|
||||
* suppress two timeout tests [#49175](https://github.com/ClickHouse/ClickHouse/pull/49175) ([Han Fei](https://github.com/hanfei1991)).
|
||||
* Document makeDateTime() and its variants [#49183](https://github.com/ClickHouse/ClickHouse/pull/49183) ([Robert Schulze](https://github.com/rschu1ze)).
|
||||
* Fix after [#49110](https://github.com/ClickHouse/ClickHouse/issues/49110) [#49206](https://github.com/ClickHouse/ClickHouse/pull/49206) ([Kseniia Sumarokova](https://github.com/kssenii)).
|
||||
|
||||
|
@ -263,7 +263,7 @@ sidebar_label: 2023
|
||||
* Fix broken labeling for `manual approve` [#51405](https://github.com/ClickHouse/ClickHouse/pull/51405) ([Mikhail f. Shiryaev](https://github.com/Felixoid)).
|
||||
* Fix parts lifetime in `MergeTreeTransaction` [#51407](https://github.com/ClickHouse/ClickHouse/pull/51407) ([Alexander Tokmakov](https://github.com/tavplubix)).
|
||||
* Fix flaky test test_skip_empty_files [#51409](https://github.com/ClickHouse/ClickHouse/pull/51409) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||
* fix flacky test test_profile_events_s3 [#51412](https://github.com/ClickHouse/ClickHouse/pull/51412) ([Sema Checherinda](https://github.com/CheSema)).
|
||||
* fix flaky test test_profile_events_s3 [#51412](https://github.com/ClickHouse/ClickHouse/pull/51412) ([Sema Checherinda](https://github.com/CheSema)).
|
||||
* Update README.md [#51413](https://github.com/ClickHouse/ClickHouse/pull/51413) ([Tyler Hannan](https://github.com/tylerhannan)).
|
||||
* Replace try/catch logic in hasTokenOrNull() by something more lightweight [#51425](https://github.com/ClickHouse/ClickHouse/pull/51425) ([Robert Schulze](https://github.com/rschu1ze)).
|
||||
* Add retries to `tlsv1_3` tests [#51434](https://github.com/ClickHouse/ClickHouse/pull/51434) ([János Benjamin Antal](https://github.com/antaljanosbenjamin)).
|
||||
|
35
docs/changelogs/v23.8.16.40-lts.md
Normal file
35
docs/changelogs/v23.8.16.40-lts.md
Normal file
@ -0,0 +1,35 @@
|
||||
---
|
||||
sidebar_position: 1
|
||||
sidebar_label: 2024
|
||||
---
|
||||
|
||||
# 2024 Changelog
|
||||
|
||||
### ClickHouse release v23.8.16.40-lts (e143a9039ba) FIXME as compared to v23.8.15.35-lts (060ff8e813a)
|
||||
|
||||
#### Improvement
|
||||
* Backported in [#66962](https://github.com/ClickHouse/ClickHouse/issues/66962): Added support for parameterized view with analyzer to not analyze create parameterized view. Refactor existing parameterized view logic to not analyze create parameterized view. [#54211](https://github.com/ClickHouse/ClickHouse/pull/54211) ([SmitaRKulkarni](https://github.com/SmitaRKulkarni)).
|
||||
* Backported in [#65461](https://github.com/ClickHouse/ClickHouse/issues/65461): Reload certificate chain during certificate reload. [#61671](https://github.com/ClickHouse/ClickHouse/pull/61671) ([Pervakov Grigorii](https://github.com/GrigoryPervakov)).
|
||||
* Backported in [#65880](https://github.com/ClickHouse/ClickHouse/issues/65880): Always start Keeper with sufficient amount of threads in global thread pool. [#64444](https://github.com/ClickHouse/ClickHouse/pull/64444) ([Duc Canh Le](https://github.com/canhld94)).
|
||||
* Backported in [#65912](https://github.com/ClickHouse/ClickHouse/issues/65912): Respect cgroup CPU limit in Keeper. [#65819](https://github.com/ClickHouse/ClickHouse/pull/65819) ([Antonio Andelic](https://github.com/antonio2368)).
|
||||
|
||||
#### Critical Bug Fix (crash, LOGICAL_ERROR, data loss, RBAC)
|
||||
* Backported in [#65281](https://github.com/ClickHouse/ClickHouse/issues/65281): Fix crash with UniqInjectiveFunctionsEliminationPass and uniqCombined. [#65188](https://github.com/ClickHouse/ClickHouse/pull/65188) ([Raúl Marín](https://github.com/Algunenano)).
|
||||
* Backported in [#65368](https://github.com/ClickHouse/ClickHouse/issues/65368): Fix a bug in ClickHouse Keeper that causes digest mismatch during closing session. [#65198](https://github.com/ClickHouse/ClickHouse/pull/65198) ([Aleksei Filatov](https://github.com/aalexfvk)).
|
||||
* Backported in [#65743](https://github.com/ClickHouse/ClickHouse/issues/65743): Fix crash in maxIntersections. [#65689](https://github.com/ClickHouse/ClickHouse/pull/65689) ([Raúl Marín](https://github.com/Algunenano)).
|
||||
|
||||
#### Bug Fix (user-visible misbehavior in an official stable release)
|
||||
* Backported in [#65351](https://github.com/ClickHouse/ClickHouse/issues/65351): Fix possible abort on uncaught exception in ~WriteBufferFromFileDescriptor in StatusFile. [#64206](https://github.com/ClickHouse/ClickHouse/pull/64206) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||
* Backported in [#66037](https://github.com/ClickHouse/ClickHouse/issues/66037): Fix crash on destroying AccessControl: add explicit shutdown. [#64993](https://github.com/ClickHouse/ClickHouse/pull/64993) ([Vitaly Baranov](https://github.com/vitlibar)).
|
||||
* Backported in [#65782](https://github.com/ClickHouse/ClickHouse/issues/65782): Fixed bug in MergeJoin. Column in sparse serialisation might be treated as a column of its nested type though the required conversion wasn't performed. [#65632](https://github.com/ClickHouse/ClickHouse/pull/65632) ([Nikita Taranov](https://github.com/nickitat)).
|
||||
* Backported in [#65926](https://github.com/ClickHouse/ClickHouse/issues/65926): For queries that read from `PostgreSQL`, cancel the internal `PostgreSQL` query if the ClickHouse query is finished. Otherwise, `ClickHouse` query cannot be canceled until the internal `PostgreSQL` query is finished. [#65771](https://github.com/ClickHouse/ClickHouse/pull/65771) ([Maksim Kita](https://github.com/kitaisreal)).
|
||||
* Backported in [#65822](https://github.com/ClickHouse/ClickHouse/issues/65822): Fix a bug in short circuit logic when old analyzer and dictGetOrDefault is used. [#65802](https://github.com/ClickHouse/ClickHouse/pull/65802) ([jsc0218](https://github.com/jsc0218)).
|
||||
* Backported in [#66449](https://github.com/ClickHouse/ClickHouse/issues/66449): Fixed a bug in ZooKeeper client: a session could get stuck in unusable state after receiving a hardware error from ZooKeeper. For example, this might happen due to "soft memory limit" in ClickHouse Keeper. [#66140](https://github.com/ClickHouse/ClickHouse/pull/66140) ([Alexander Tokmakov](https://github.com/tavplubix)).
|
||||
* Backported in [#66717](https://github.com/ClickHouse/ClickHouse/issues/66717): Correctly track memory for `Allocator::realloc`. [#66548](https://github.com/ClickHouse/ClickHouse/pull/66548) ([Antonio Andelic](https://github.com/antonio2368)).
|
||||
|
||||
#### NOT FOR CHANGELOG / INSIGNIFICANT
|
||||
|
||||
* Backported in [#65080](https://github.com/ClickHouse/ClickHouse/issues/65080): Follow up to [#56541](https://github.com/ClickHouse/ClickHouse/issues/56541). [#57141](https://github.com/ClickHouse/ClickHouse/pull/57141) ([Kseniia Sumarokova](https://github.com/kssenii)).
|
||||
* Backported in [#65913](https://github.com/ClickHouse/ClickHouse/issues/65913): Fix bug with session closing in Keeper. [#65735](https://github.com/ClickHouse/ClickHouse/pull/65735) ([Antonio Andelic](https://github.com/antonio2368)).
|
||||
* Backported in [#66853](https://github.com/ClickHouse/ClickHouse/issues/66853): Fix data race in S3::ClientCache. [#66644](https://github.com/ClickHouse/ClickHouse/pull/66644) ([Konstantin Morozov](https://github.com/k-morozov)).
|
||||
|
39
docs/changelogs/v24.3.6.48-lts.md
Normal file
39
docs/changelogs/v24.3.6.48-lts.md
Normal file
@ -0,0 +1,39 @@
|
||||
---
|
||||
sidebar_position: 1
|
||||
sidebar_label: 2024
|
||||
---
|
||||
|
||||
# 2024 Changelog
|
||||
|
||||
### ClickHouse release v24.3.6.48-lts (b2d33c3c45d) FIXME as compared to v24.3.5.46-lts (fe54cead6b6)
|
||||
|
||||
#### Critical Bug Fix (crash, LOGICAL_ERROR, data loss, RBAC)
|
||||
* Backported in [#66889](https://github.com/ClickHouse/ClickHouse/issues/66889): Fix unexpeced size of low cardinality column in function calls. [#65298](https://github.com/ClickHouse/ClickHouse/pull/65298) ([Raúl Marín](https://github.com/Algunenano)).
|
||||
* Backported in [#66687](https://github.com/ClickHouse/ClickHouse/issues/66687): Fix the VALID UNTIL clause in the user definition resetting after a restart. Closes [#66405](https://github.com/ClickHouse/ClickHouse/issues/66405). [#66409](https://github.com/ClickHouse/ClickHouse/pull/66409) ([Nikolay Degterinsky](https://github.com/evillique)).
|
||||
* Backported in [#67497](https://github.com/ClickHouse/ClickHouse/issues/67497): Fix crash in DistributedAsyncInsert when connection is empty. [#67219](https://github.com/ClickHouse/ClickHouse/pull/67219) ([Pablo Marcos](https://github.com/pamarcos)).
|
||||
|
||||
#### Bug Fix (user-visible misbehavior in an official stable release)
|
||||
* Backported in [#66324](https://github.com/ClickHouse/ClickHouse/issues/66324): Add missing settings `input_format_csv_skip_first_lines/input_format_tsv_skip_first_lines/input_format_csv_try_infer_numbers_from_strings/input_format_csv_try_infer_strings_from_quoted_tuples` in schema inference cache because they can change the resulting schema. It prevents from incorrect result of schema inference with these settings changed. [#65980](https://github.com/ClickHouse/ClickHouse/pull/65980) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||
* Backported in [#66151](https://github.com/ClickHouse/ClickHouse/issues/66151): Fixed buffer overflow bug in `unbin`/`unhex` implementation. [#66106](https://github.com/ClickHouse/ClickHouse/pull/66106) ([Nikita Taranov](https://github.com/nickitat)).
|
||||
* Backported in [#66451](https://github.com/ClickHouse/ClickHouse/issues/66451): Fixed a bug in ZooKeeper client: a session could get stuck in unusable state after receiving a hardware error from ZooKeeper. For example, this might happen due to "soft memory limit" in ClickHouse Keeper. [#66140](https://github.com/ClickHouse/ClickHouse/pull/66140) ([Alexander Tokmakov](https://github.com/tavplubix)).
|
||||
* Backported in [#66222](https://github.com/ClickHouse/ClickHouse/issues/66222): Fix issue in SumIfToCountIfVisitor and signed integers. [#66146](https://github.com/ClickHouse/ClickHouse/pull/66146) ([Raúl Marín](https://github.com/Algunenano)).
|
||||
* Backported in [#66676](https://github.com/ClickHouse/ClickHouse/issues/66676): Fix handling limit for `system.numbers_mt` when no index can be used. [#66231](https://github.com/ClickHouse/ClickHouse/pull/66231) ([János Benjamin Antal](https://github.com/antaljanosbenjamin)).
|
||||
* Backported in [#66602](https://github.com/ClickHouse/ClickHouse/issues/66602): Fixed how the ClickHouse server detects the maximum number of usable CPU cores as specified by cgroups v2 if the server runs in a container such as Docker. In more detail, containers often run their process in the root cgroup which has an empty name. In that case, ClickHouse ignored the CPU limits set by cgroups v2. [#66237](https://github.com/ClickHouse/ClickHouse/pull/66237) ([filimonov](https://github.com/filimonov)).
|
||||
* Backported in [#66356](https://github.com/ClickHouse/ClickHouse/issues/66356): Fix the `Not-ready set` error when a subquery with `IN` is used in the constraint. [#66261](https://github.com/ClickHouse/ClickHouse/pull/66261) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
|
||||
* Backported in [#66970](https://github.com/ClickHouse/ClickHouse/issues/66970): Fix `Column identifier is already registered` error with `group_by_use_nulls=true` and new analyzer. [#66400](https://github.com/ClickHouse/ClickHouse/pull/66400) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
|
||||
* Backported in [#66967](https://github.com/ClickHouse/ClickHouse/issues/66967): Fix `Cannot find column` error for queries with constant expression in `GROUP BY` key and new analyzer enabled. [#66433](https://github.com/ClickHouse/ClickHouse/pull/66433) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
|
||||
* Backported in [#66718](https://github.com/ClickHouse/ClickHouse/issues/66718): Correctly track memory for `Allocator::realloc`. [#66548](https://github.com/ClickHouse/ClickHouse/pull/66548) ([Antonio Andelic](https://github.com/antonio2368)).
|
||||
* Backported in [#66949](https://github.com/ClickHouse/ClickHouse/issues/66949): Fix an invalid result for queries with `WINDOW`. This could happen when `PARTITION` columns have sparse serialization and window functions are executed in parallel. [#66579](https://github.com/ClickHouse/ClickHouse/pull/66579) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
|
||||
* Backported in [#66946](https://github.com/ClickHouse/ClickHouse/issues/66946): Fix `Method getResultType is not supported for QUERY query node` error when scalar subquery was used as the first argument of IN (with new analyzer). [#66655](https://github.com/ClickHouse/ClickHouse/pull/66655) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
|
||||
* Backported in [#67629](https://github.com/ClickHouse/ClickHouse/issues/67629): Fix for occasional deadlock in Context::getDDLWorker. [#66843](https://github.com/ClickHouse/ClickHouse/pull/66843) ([Alexander Gololobov](https://github.com/davenger)).
|
||||
* Backported in [#67193](https://github.com/ClickHouse/ClickHouse/issues/67193): TRUNCATE DATABASE used to stop replication as if it was a DROP DATABASE query, it's fixed. [#67129](https://github.com/ClickHouse/ClickHouse/pull/67129) ([Alexander Tokmakov](https://github.com/tavplubix)).
|
||||
* Backported in [#67375](https://github.com/ClickHouse/ClickHouse/issues/67375): Fix error `Cannot convert column because it is non constant in source stream but must be constant in result.` for a query that reads from the `Merge` table over the `Distriburted` table with one shard. [#67146](https://github.com/ClickHouse/ClickHouse/pull/67146) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
|
||||
* Backported in [#67572](https://github.com/ClickHouse/ClickHouse/issues/67572): Fix execution of nested short-circuit functions. [#67520](https://github.com/ClickHouse/ClickHouse/pull/67520) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||
|
||||
#### NOT FOR CHANGELOG / INSIGNIFICANT
|
||||
|
||||
* Backported in [#66422](https://github.com/ClickHouse/ClickHouse/issues/66422): Ignore subquery for IN in DDLLoadingDependencyVisitor. [#66395](https://github.com/ClickHouse/ClickHouse/pull/66395) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
|
||||
* Backported in [#66855](https://github.com/ClickHouse/ClickHouse/issues/66855): Fix data race in S3::ClientCache. [#66644](https://github.com/ClickHouse/ClickHouse/pull/66644) ([Konstantin Morozov](https://github.com/k-morozov)).
|
||||
* Backported in [#67055](https://github.com/ClickHouse/ClickHouse/issues/67055): Increase asio pool size in case the server is tiny. [#66761](https://github.com/ClickHouse/ClickHouse/pull/66761) ([alesapin](https://github.com/alesapin)).
|
||||
* Backported in [#66943](https://github.com/ClickHouse/ClickHouse/issues/66943): Small fix in realloc memory tracking. [#66820](https://github.com/ClickHouse/ClickHouse/pull/66820) ([Antonio Andelic](https://github.com/antonio2368)).
|
||||
|
73
docs/changelogs/v24.4.4.113-stable.md
Normal file
73
docs/changelogs/v24.4.4.113-stable.md
Normal file
@ -0,0 +1,73 @@
|
||||
---
|
||||
sidebar_position: 1
|
||||
sidebar_label: 2024
|
||||
---
|
||||
|
||||
# 2024 Changelog
|
||||
|
||||
### ClickHouse release v24.4.4.113-stable (d63a54957bd) FIXME as compared to v24.4.3.25-stable (a915dd4eda4)
|
||||
|
||||
#### Improvement
|
||||
* Backported in [#65884](https://github.com/ClickHouse/ClickHouse/issues/65884): Always start Keeper with sufficient amount of threads in global thread pool. [#64444](https://github.com/ClickHouse/ClickHouse/pull/64444) ([Duc Canh Le](https://github.com/canhld94)).
|
||||
* Backported in [#65303](https://github.com/ClickHouse/ClickHouse/issues/65303): Returned back the behaviour of how ClickHouse works and interprets Tuples in CSV format. This change effectively reverts https://github.com/ClickHouse/ClickHouse/pull/60994 and makes it available only under a few settings: `output_format_csv_serialize_tuple_into_separate_columns`, `input_format_csv_deserialize_separate_columns_into_tuple` and `input_format_csv_try_infer_strings_from_quoted_tuples`. [#65170](https://github.com/ClickHouse/ClickHouse/pull/65170) ([Nikita Mikhaylov](https://github.com/nikitamikhaylov)).
|
||||
* Backported in [#65894](https://github.com/ClickHouse/ClickHouse/issues/65894): Respect cgroup CPU limit in Keeper. [#65819](https://github.com/ClickHouse/ClickHouse/pull/65819) ([Antonio Andelic](https://github.com/antonio2368)).
|
||||
|
||||
#### Critical Bug Fix (crash, LOGICAL_ERROR, data loss, RBAC)
|
||||
* Backported in [#65372](https://github.com/ClickHouse/ClickHouse/issues/65372): Fix a bug in ClickHouse Keeper that causes digest mismatch during closing session. [#65198](https://github.com/ClickHouse/ClickHouse/pull/65198) ([Aleksei Filatov](https://github.com/aalexfvk)).
|
||||
* Backported in [#66883](https://github.com/ClickHouse/ClickHouse/issues/66883): Fix unexpeced size of low cardinality column in function calls. [#65298](https://github.com/ClickHouse/ClickHouse/pull/65298) ([Raúl Marín](https://github.com/Algunenano)).
|
||||
* Backported in [#65435](https://github.com/ClickHouse/ClickHouse/issues/65435): Forbid `QUALIFY` clause in the old analyzer. The old analyzer ignored `QUALIFY`, so it could lead to unexpected data removal in mutations. [#65356](https://github.com/ClickHouse/ClickHouse/pull/65356) ([Dmitry Novik](https://github.com/novikd)).
|
||||
* Backported in [#65448](https://github.com/ClickHouse/ClickHouse/issues/65448): Use correct memory alignment for Distinct combinator. Previously, crash could happen because of invalid memory allocation when the combinator was used. [#65379](https://github.com/ClickHouse/ClickHouse/pull/65379) ([Antonio Andelic](https://github.com/antonio2368)).
|
||||
* Backported in [#65710](https://github.com/ClickHouse/ClickHouse/issues/65710): Fix crash in maxIntersections. [#65689](https://github.com/ClickHouse/ClickHouse/pull/65689) ([Raúl Marín](https://github.com/Algunenano)).
|
||||
* Backported in [#66689](https://github.com/ClickHouse/ClickHouse/issues/66689): Fix the VALID UNTIL clause in the user definition resetting after a restart. Closes [#66405](https://github.com/ClickHouse/ClickHouse/issues/66405). [#66409](https://github.com/ClickHouse/ClickHouse/pull/66409) ([Nikolay Degterinsky](https://github.com/evillique)).
|
||||
* Backported in [#67499](https://github.com/ClickHouse/ClickHouse/issues/67499): Fix crash in DistributedAsyncInsert when connection is empty. [#67219](https://github.com/ClickHouse/ClickHouse/pull/67219) ([Pablo Marcos](https://github.com/pamarcos)).
|
||||
|
||||
#### Bug Fix (user-visible misbehavior in an official stable release)
|
||||
* Backported in [#65353](https://github.com/ClickHouse/ClickHouse/issues/65353): Fix possible abort on uncaught exception in ~WriteBufferFromFileDescriptor in StatusFile. [#64206](https://github.com/ClickHouse/ClickHouse/pull/64206) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||
* Backported in [#65060](https://github.com/ClickHouse/ClickHouse/issues/65060): Fix the `Expression nodes list expected 1 projection names` and `Unknown expression or identifier` errors for queries with aliases to `GLOBAL IN.`. [#64517](https://github.com/ClickHouse/ClickHouse/pull/64517) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
|
||||
* Backported in [#65329](https://github.com/ClickHouse/ClickHouse/issues/65329): Fix the crash loop when restoring from backup is blocked by creating an MV with a definer that hasn't been restored yet. [#64595](https://github.com/ClickHouse/ClickHouse/pull/64595) ([pufit](https://github.com/pufit)).
|
||||
* Backported in [#64833](https://github.com/ClickHouse/ClickHouse/issues/64833): Fix bug which could lead to non-working TTLs with expressions. [#64694](https://github.com/ClickHouse/ClickHouse/pull/64694) ([alesapin](https://github.com/alesapin)).
|
||||
* Backported in [#65086](https://github.com/ClickHouse/ClickHouse/issues/65086): Fix removing the `WHERE` and `PREWHERE` expressions, which are always true (for the new analyzer). [#64695](https://github.com/ClickHouse/ClickHouse/pull/64695) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
|
||||
* Backported in [#65540](https://github.com/ClickHouse/ClickHouse/issues/65540): Fix crash for `ALTER TABLE ... ON CLUSTER ... MODIFY SQL SECURITY`. [#64957](https://github.com/ClickHouse/ClickHouse/pull/64957) ([pufit](https://github.com/pufit)).
|
||||
* Backported in [#65578](https://github.com/ClickHouse/ClickHouse/issues/65578): Fix crash on destroying AccessControl: add explicit shutdown. [#64993](https://github.com/ClickHouse/ClickHouse/pull/64993) ([Vitaly Baranov](https://github.com/vitlibar)).
|
||||
* Backported in [#65161](https://github.com/ClickHouse/ClickHouse/issues/65161): Fix pushing arithmetic operations out of aggregation. In the new analyzer, optimization was applied only once. [#65104](https://github.com/ClickHouse/ClickHouse/pull/65104) ([Dmitry Novik](https://github.com/novikd)).
|
||||
* Backported in [#65616](https://github.com/ClickHouse/ClickHouse/issues/65616): Fix aggregate function name rewriting in the new analyzer. [#65110](https://github.com/ClickHouse/ClickHouse/pull/65110) ([Dmitry Novik](https://github.com/novikd)).
|
||||
* Backported in [#65730](https://github.com/ClickHouse/ClickHouse/issues/65730): Eliminate injective function in argument of functions `uniq*` recursively. This used to work correctly but was broken in the new analyzer. [#65140](https://github.com/ClickHouse/ClickHouse/pull/65140) ([Duc Canh Le](https://github.com/canhld94)).
|
||||
* Backported in [#65668](https://github.com/ClickHouse/ClickHouse/issues/65668): Disable `non-intersecting-parts` optimization for queries with `FINAL` in case of `read-in-order` optimization was enabled. This could lead to an incorrect query result. As a workaround, disable `do_not_merge_across_partitions_select_final` and `split_parts_ranges_into_intersecting_and_non_intersecting_final` before this fix is merged. [#65505](https://github.com/ClickHouse/ClickHouse/pull/65505) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
|
||||
* Backported in [#65786](https://github.com/ClickHouse/ClickHouse/issues/65786): Fixed bug in MergeJoin. Column in sparse serialisation might be treated as a column of its nested type though the required conversion wasn't performed. [#65632](https://github.com/ClickHouse/ClickHouse/pull/65632) ([Nikita Taranov](https://github.com/nickitat)).
|
||||
* Backported in [#65810](https://github.com/ClickHouse/ClickHouse/issues/65810): Fix invalid exceptions in function `parseDateTime` with `%F` and `%D` placeholders. [#65768](https://github.com/ClickHouse/ClickHouse/pull/65768) ([Antonio Andelic](https://github.com/antonio2368)).
|
||||
* Backported in [#65931](https://github.com/ClickHouse/ClickHouse/issues/65931): For queries that read from `PostgreSQL`, cancel the internal `PostgreSQL` query if the ClickHouse query is finished. Otherwise, `ClickHouse` query cannot be canceled until the internal `PostgreSQL` query is finished. [#65771](https://github.com/ClickHouse/ClickHouse/pull/65771) ([Maksim Kita](https://github.com/kitaisreal)).
|
||||
* Backported in [#65826](https://github.com/ClickHouse/ClickHouse/issues/65826): Fix a bug in short circuit logic when old analyzer and dictGetOrDefault is used. [#65802](https://github.com/ClickHouse/ClickHouse/pull/65802) ([jsc0218](https://github.com/jsc0218)).
|
||||
* Backported in [#66299](https://github.com/ClickHouse/ClickHouse/issues/66299): Better handling of join conditions involving `IS NULL` checks (for example `ON (a = b AND (a IS NOT NULL) AND (b IS NOT NULL) ) OR ( (a IS NULL) AND (b IS NULL) )` is rewritten to `ON a <=> b`), fix incorrect optimization when condition other then `IS NULL` are present. [#65835](https://github.com/ClickHouse/ClickHouse/pull/65835) ([vdimir](https://github.com/vdimir)).
|
||||
* Backported in [#66326](https://github.com/ClickHouse/ClickHouse/issues/66326): Add missing settings `input_format_csv_skip_first_lines/input_format_tsv_skip_first_lines/input_format_csv_try_infer_numbers_from_strings/input_format_csv_try_infer_strings_from_quoted_tuples` in schema inference cache because they can change the resulting schema. It prevents from incorrect result of schema inference with these settings changed. [#65980](https://github.com/ClickHouse/ClickHouse/pull/65980) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||
* Backported in [#66153](https://github.com/ClickHouse/ClickHouse/issues/66153): Fixed buffer overflow bug in `unbin`/`unhex` implementation. [#66106](https://github.com/ClickHouse/ClickHouse/pull/66106) ([Nikita Taranov](https://github.com/nickitat)).
|
||||
* Backported in [#66459](https://github.com/ClickHouse/ClickHouse/issues/66459): Fixed a bug in ZooKeeper client: a session could get stuck in unusable state after receiving a hardware error from ZooKeeper. For example, this might happen due to "soft memory limit" in ClickHouse Keeper. [#66140](https://github.com/ClickHouse/ClickHouse/pull/66140) ([Alexander Tokmakov](https://github.com/tavplubix)).
|
||||
* Backported in [#66224](https://github.com/ClickHouse/ClickHouse/issues/66224): Fix issue in SumIfToCountIfVisitor and signed integers. [#66146](https://github.com/ClickHouse/ClickHouse/pull/66146) ([Raúl Marín](https://github.com/Algunenano)).
|
||||
* Backported in [#66267](https://github.com/ClickHouse/ClickHouse/issues/66267): Don't throw `TIMEOUT_EXCEEDED` for `none_only_active` mode of `distributed_ddl_output_mode`. [#66218](https://github.com/ClickHouse/ClickHouse/pull/66218) ([Alexander Tokmakov](https://github.com/tavplubix)).
|
||||
* Backported in [#66678](https://github.com/ClickHouse/ClickHouse/issues/66678): Fix handling limit for `system.numbers_mt` when no index can be used. [#66231](https://github.com/ClickHouse/ClickHouse/pull/66231) ([János Benjamin Antal](https://github.com/antaljanosbenjamin)).
|
||||
* Backported in [#66603](https://github.com/ClickHouse/ClickHouse/issues/66603): Fixed how the ClickHouse server detects the maximum number of usable CPU cores as specified by cgroups v2 if the server runs in a container such as Docker. In more detail, containers often run their process in the root cgroup which has an empty name. In that case, ClickHouse ignored the CPU limits set by cgroups v2. [#66237](https://github.com/ClickHouse/ClickHouse/pull/66237) ([filimonov](https://github.com/filimonov)).
|
||||
* Backported in [#66358](https://github.com/ClickHouse/ClickHouse/issues/66358): Fix the `Not-ready set` error when a subquery with `IN` is used in the constraint. [#66261](https://github.com/ClickHouse/ClickHouse/pull/66261) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
|
||||
* Backported in [#66971](https://github.com/ClickHouse/ClickHouse/issues/66971): Fix `Column identifier is already registered` error with `group_by_use_nulls=true` and new analyzer. [#66400](https://github.com/ClickHouse/ClickHouse/pull/66400) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
|
||||
* Backported in [#66968](https://github.com/ClickHouse/ClickHouse/issues/66968): Fix `Cannot find column` error for queries with constant expression in `GROUP BY` key and new analyzer enabled. [#66433](https://github.com/ClickHouse/ClickHouse/pull/66433) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
|
||||
* Backported in [#66719](https://github.com/ClickHouse/ClickHouse/issues/66719): Correctly track memory for `Allocator::realloc`. [#66548](https://github.com/ClickHouse/ClickHouse/pull/66548) ([Antonio Andelic](https://github.com/antonio2368)).
|
||||
* Backported in [#66950](https://github.com/ClickHouse/ClickHouse/issues/66950): Fix an invalid result for queries with `WINDOW`. This could happen when `PARTITION` columns have sparse serialization and window functions are executed in parallel. [#66579](https://github.com/ClickHouse/ClickHouse/pull/66579) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
|
||||
* Backported in [#66947](https://github.com/ClickHouse/ClickHouse/issues/66947): Fix `Method getResultType is not supported for QUERY query node` error when scalar subquery was used as the first argument of IN (with new analyzer). [#66655](https://github.com/ClickHouse/ClickHouse/pull/66655) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
|
||||
* Backported in [#67631](https://github.com/ClickHouse/ClickHouse/issues/67631): Fix for occasional deadlock in Context::getDDLWorker. [#66843](https://github.com/ClickHouse/ClickHouse/pull/66843) ([Alexander Gololobov](https://github.com/davenger)).
|
||||
* Backported in [#67195](https://github.com/ClickHouse/ClickHouse/issues/67195): TRUNCATE DATABASE used to stop replication as if it was a DROP DATABASE query, it's fixed. [#67129](https://github.com/ClickHouse/ClickHouse/pull/67129) ([Alexander Tokmakov](https://github.com/tavplubix)).
|
||||
* Backported in [#67377](https://github.com/ClickHouse/ClickHouse/issues/67377): Fix error `Cannot convert column because it is non constant in source stream but must be constant in result.` for a query that reads from the `Merge` table over the `Distriburted` table with one shard. [#67146](https://github.com/ClickHouse/ClickHouse/pull/67146) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
|
||||
* Backported in [#67240](https://github.com/ClickHouse/ClickHouse/issues/67240): This closes [#67156](https://github.com/ClickHouse/ClickHouse/issues/67156). This closes [#66447](https://github.com/ClickHouse/ClickHouse/issues/66447). The bug was introduced in https://github.com/ClickHouse/ClickHouse/pull/62907. [#67178](https://github.com/ClickHouse/ClickHouse/pull/67178) ([Maksim Kita](https://github.com/kitaisreal)).
|
||||
* Backported in [#67574](https://github.com/ClickHouse/ClickHouse/issues/67574): Fix execution of nested short-circuit functions. [#67520](https://github.com/ClickHouse/ClickHouse/pull/67520) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||
|
||||
#### NOT FOR CHANGELOG / INSIGNIFICANT
|
||||
|
||||
* Backported in [#65410](https://github.com/ClickHouse/ClickHouse/issues/65410): Re-enable OpenSSL session caching. [#65111](https://github.com/ClickHouse/ClickHouse/pull/65111) ([Robert Schulze](https://github.com/rschu1ze)).
|
||||
* Backported in [#65903](https://github.com/ClickHouse/ClickHouse/issues/65903): Fix bug with session closing in Keeper. [#65735](https://github.com/ClickHouse/ClickHouse/pull/65735) ([Antonio Andelic](https://github.com/antonio2368)).
|
||||
* Backported in [#66385](https://github.com/ClickHouse/ClickHouse/issues/66385): Disable broken cases from 02911_join_on_nullsafe_optimization. [#66310](https://github.com/ClickHouse/ClickHouse/pull/66310) ([vdimir](https://github.com/vdimir)).
|
||||
* Backported in [#66424](https://github.com/ClickHouse/ClickHouse/issues/66424): Ignore subquery for IN in DDLLoadingDependencyVisitor. [#66395](https://github.com/ClickHouse/ClickHouse/pull/66395) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
|
||||
* Backported in [#66542](https://github.com/ClickHouse/ClickHouse/issues/66542): Add additional log masking in CI. [#66523](https://github.com/ClickHouse/ClickHouse/pull/66523) ([Raúl Marín](https://github.com/Algunenano)).
|
||||
* Backported in [#66857](https://github.com/ClickHouse/ClickHouse/issues/66857): Fix data race in S3::ClientCache. [#66644](https://github.com/ClickHouse/ClickHouse/pull/66644) ([Konstantin Morozov](https://github.com/k-morozov)).
|
||||
* Backported in [#66873](https://github.com/ClickHouse/ClickHouse/issues/66873): Support one more case in JOIN ON ... IS NULL. [#66725](https://github.com/ClickHouse/ClickHouse/pull/66725) ([vdimir](https://github.com/vdimir)).
|
||||
* Backported in [#67057](https://github.com/ClickHouse/ClickHouse/issues/67057): Increase asio pool size in case the server is tiny. [#66761](https://github.com/ClickHouse/ClickHouse/pull/66761) ([alesapin](https://github.com/alesapin)).
|
||||
* Backported in [#66944](https://github.com/ClickHouse/ClickHouse/issues/66944): Small fix in realloc memory tracking. [#66820](https://github.com/ClickHouse/ClickHouse/pull/66820) ([Antonio Andelic](https://github.com/antonio2368)).
|
||||
* Backported in [#67250](https://github.com/ClickHouse/ClickHouse/issues/67250): Followup [#66725](https://github.com/ClickHouse/ClickHouse/issues/66725). [#66869](https://github.com/ClickHouse/ClickHouse/pull/66869) ([vdimir](https://github.com/vdimir)).
|
||||
* Backported in [#67410](https://github.com/ClickHouse/ClickHouse/issues/67410): CI: Fix build results for release branches. [#67402](https://github.com/ClickHouse/ClickHouse/pull/67402) ([Max K.](https://github.com/maxknv)).
|
||||
|
55
docs/changelogs/v24.5.5.78-stable.md
Normal file
55
docs/changelogs/v24.5.5.78-stable.md
Normal file
@ -0,0 +1,55 @@
|
||||
---
|
||||
sidebar_position: 1
|
||||
sidebar_label: 2024
|
||||
---
|
||||
|
||||
# 2024 Changelog
|
||||
|
||||
### ClickHouse release v24.5.5.78-stable (0138248cb62) FIXME as compared to v24.5.4.49-stable (63b760955a0)
|
||||
|
||||
#### Improvement
|
||||
* Backported in [#66768](https://github.com/ClickHouse/ClickHouse/issues/66768): Make allow_experimental_analyzer be controlled by the initiator for distributed queries. This ensures compatibility and correctness during operations in mixed version clusters. [#65777](https://github.com/ClickHouse/ClickHouse/pull/65777) ([Nikita Mikhaylov](https://github.com/nikitamikhaylov)).
|
||||
|
||||
#### Critical Bug Fix (crash, LOGICAL_ERROR, data loss, RBAC)
|
||||
* Backported in [#66884](https://github.com/ClickHouse/ClickHouse/issues/66884): Fix unexpeced size of low cardinality column in function calls. [#65298](https://github.com/ClickHouse/ClickHouse/pull/65298) ([Raúl Marín](https://github.com/Algunenano)).
|
||||
* Backported in [#66691](https://github.com/ClickHouse/ClickHouse/issues/66691): Fix the VALID UNTIL clause in the user definition resetting after a restart. Closes [#66405](https://github.com/ClickHouse/ClickHouse/issues/66405). [#66409](https://github.com/ClickHouse/ClickHouse/pull/66409) ([Nikolay Degterinsky](https://github.com/evillique)).
|
||||
* Backported in [#67814](https://github.com/ClickHouse/ClickHouse/issues/67814): Only relevant to the experimental Variant data type. Fix crash with Variant + AggregateFunction type. [#67122](https://github.com/ClickHouse/ClickHouse/pull/67122) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||
* Backported in [#67501](https://github.com/ClickHouse/ClickHouse/issues/67501): Fix crash in DistributedAsyncInsert when connection is empty. [#67219](https://github.com/ClickHouse/ClickHouse/pull/67219) ([Pablo Marcos](https://github.com/pamarcos)).
|
||||
* Backported in [#67850](https://github.com/ClickHouse/ClickHouse/issues/67850): Fixes [#66026](https://github.com/ClickHouse/ClickHouse/issues/66026). Avoid unresolved table function arguments traversal in `ReplaceTableNodeToDummyVisitor`. [#67522](https://github.com/ClickHouse/ClickHouse/pull/67522) ([Dmitry Novik](https://github.com/novikd)).
|
||||
|
||||
#### Bug Fix (user-visible misbehavior in an official stable release)
|
||||
* Backported in [#65350](https://github.com/ClickHouse/ClickHouse/issues/65350): Fix possible abort on uncaught exception in ~WriteBufferFromFileDescriptor in StatusFile. [#64206](https://github.com/ClickHouse/ClickHouse/pull/64206) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||
* Backported in [#65621](https://github.com/ClickHouse/ClickHouse/issues/65621): Fix `Cannot find column` in distributed query with `ARRAY JOIN` by `Nested` column. Fixes [#64755](https://github.com/ClickHouse/ClickHouse/issues/64755). [#64801](https://github.com/ClickHouse/ClickHouse/pull/64801) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
|
||||
* Backported in [#65933](https://github.com/ClickHouse/ClickHouse/issues/65933): For queries that read from `PostgreSQL`, cancel the internal `PostgreSQL` query if the ClickHouse query is finished. Otherwise, `ClickHouse` query cannot be canceled until the internal `PostgreSQL` query is finished. [#65771](https://github.com/ClickHouse/ClickHouse/pull/65771) ([Maksim Kita](https://github.com/kitaisreal)).
|
||||
* Backported in [#66301](https://github.com/ClickHouse/ClickHouse/issues/66301): Better handling of join conditions involving `IS NULL` checks (for example `ON (a = b AND (a IS NOT NULL) AND (b IS NOT NULL) ) OR ( (a IS NULL) AND (b IS NULL) )` is rewritten to `ON a <=> b`), fix incorrect optimization when condition other then `IS NULL` are present. [#65835](https://github.com/ClickHouse/ClickHouse/pull/65835) ([vdimir](https://github.com/vdimir)).
|
||||
* Backported in [#66328](https://github.com/ClickHouse/ClickHouse/issues/66328): Add missing settings `input_format_csv_skip_first_lines/input_format_tsv_skip_first_lines/input_format_csv_try_infer_numbers_from_strings/input_format_csv_try_infer_strings_from_quoted_tuples` in schema inference cache because they can change the resulting schema. It prevents from incorrect result of schema inference with these settings changed. [#65980](https://github.com/ClickHouse/ClickHouse/pull/65980) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||
* Backported in [#66155](https://github.com/ClickHouse/ClickHouse/issues/66155): Fixed buffer overflow bug in `unbin`/`unhex` implementation. [#66106](https://github.com/ClickHouse/ClickHouse/pull/66106) ([Nikita Taranov](https://github.com/nickitat)).
|
||||
* Backported in [#66454](https://github.com/ClickHouse/ClickHouse/issues/66454): Fixed a bug in ZooKeeper client: a session could get stuck in unusable state after receiving a hardware error from ZooKeeper. For example, this might happen due to "soft memory limit" in ClickHouse Keeper. [#66140](https://github.com/ClickHouse/ClickHouse/pull/66140) ([Alexander Tokmakov](https://github.com/tavplubix)).
|
||||
* Backported in [#66226](https://github.com/ClickHouse/ClickHouse/issues/66226): Fix issue in SumIfToCountIfVisitor and signed integers. [#66146](https://github.com/ClickHouse/ClickHouse/pull/66146) ([Raúl Marín](https://github.com/Algunenano)).
|
||||
* Backported in [#66680](https://github.com/ClickHouse/ClickHouse/issues/66680): Fix handling limit for `system.numbers_mt` when no index can be used. [#66231](https://github.com/ClickHouse/ClickHouse/pull/66231) ([János Benjamin Antal](https://github.com/antaljanosbenjamin)).
|
||||
* Backported in [#66604](https://github.com/ClickHouse/ClickHouse/issues/66604): Fixed how the ClickHouse server detects the maximum number of usable CPU cores as specified by cgroups v2 if the server runs in a container such as Docker. In more detail, containers often run their process in the root cgroup which has an empty name. In that case, ClickHouse ignored the CPU limits set by cgroups v2. [#66237](https://github.com/ClickHouse/ClickHouse/pull/66237) ([filimonov](https://github.com/filimonov)).
|
||||
* Backported in [#66360](https://github.com/ClickHouse/ClickHouse/issues/66360): Fix the `Not-ready set` error when a subquery with `IN` is used in the constraint. [#66261](https://github.com/ClickHouse/ClickHouse/pull/66261) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
|
||||
* Backported in [#66972](https://github.com/ClickHouse/ClickHouse/issues/66972): Fix `Column identifier is already registered` error with `group_by_use_nulls=true` and new analyzer. [#66400](https://github.com/ClickHouse/ClickHouse/pull/66400) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
|
||||
* Backported in [#66969](https://github.com/ClickHouse/ClickHouse/issues/66969): Fix `Cannot find column` error for queries with constant expression in `GROUP BY` key and new analyzer enabled. [#66433](https://github.com/ClickHouse/ClickHouse/pull/66433) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
|
||||
* Backported in [#66720](https://github.com/ClickHouse/ClickHouse/issues/66720): Correctly track memory for `Allocator::realloc`. [#66548](https://github.com/ClickHouse/ClickHouse/pull/66548) ([Antonio Andelic](https://github.com/antonio2368)).
|
||||
* Backported in [#66951](https://github.com/ClickHouse/ClickHouse/issues/66951): Fix an invalid result for queries with `WINDOW`. This could happen when `PARTITION` columns have sparse serialization and window functions are executed in parallel. [#66579](https://github.com/ClickHouse/ClickHouse/pull/66579) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
|
||||
* Backported in [#66757](https://github.com/ClickHouse/ClickHouse/issues/66757): Fix `Unknown identifier` and `Column is not under aggregate function` errors for queries with the expression `(column IS NULL).` The bug was triggered by [#65088](https://github.com/ClickHouse/ClickHouse/issues/65088), with the disabled analyzer only. [#66654](https://github.com/ClickHouse/ClickHouse/pull/66654) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
|
||||
* Backported in [#66948](https://github.com/ClickHouse/ClickHouse/issues/66948): Fix `Method getResultType is not supported for QUERY query node` error when scalar subquery was used as the first argument of IN (with new analyzer). [#66655](https://github.com/ClickHouse/ClickHouse/pull/66655) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
|
||||
* Backported in [#67633](https://github.com/ClickHouse/ClickHouse/issues/67633): Fix for occasional deadlock in Context::getDDLWorker. [#66843](https://github.com/ClickHouse/ClickHouse/pull/66843) ([Alexander Gololobov](https://github.com/davenger)).
|
||||
* Backported in [#67481](https://github.com/ClickHouse/ClickHouse/issues/67481): In rare cases ClickHouse could consider parts as broken because of some unexpected projections on disk. Now it's fixed. [#66898](https://github.com/ClickHouse/ClickHouse/pull/66898) ([alesapin](https://github.com/alesapin)).
|
||||
* Backported in [#67197](https://github.com/ClickHouse/ClickHouse/issues/67197): TRUNCATE DATABASE used to stop replication as if it was a DROP DATABASE query, it's fixed. [#67129](https://github.com/ClickHouse/ClickHouse/pull/67129) ([Alexander Tokmakov](https://github.com/tavplubix)).
|
||||
* Backported in [#67379](https://github.com/ClickHouse/ClickHouse/issues/67379): Fix error `Cannot convert column because it is non constant in source stream but must be constant in result.` for a query that reads from the `Merge` table over the `Distriburted` table with one shard. [#67146](https://github.com/ClickHouse/ClickHouse/pull/67146) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
|
||||
* Backported in [#67576](https://github.com/ClickHouse/ClickHouse/issues/67576): Fix execution of nested short-circuit functions. [#67520](https://github.com/ClickHouse/ClickHouse/pull/67520) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||
|
||||
#### NOT FOR CHANGELOG / INSIGNIFICANT
|
||||
|
||||
* Backported in [#66387](https://github.com/ClickHouse/ClickHouse/issues/66387): Disable broken cases from 02911_join_on_nullsafe_optimization. [#66310](https://github.com/ClickHouse/ClickHouse/pull/66310) ([vdimir](https://github.com/vdimir)).
|
||||
* Backported in [#66426](https://github.com/ClickHouse/ClickHouse/issues/66426): Ignore subquery for IN in DDLLoadingDependencyVisitor. [#66395](https://github.com/ClickHouse/ClickHouse/pull/66395) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
|
||||
* Backported in [#66544](https://github.com/ClickHouse/ClickHouse/issues/66544): Add additional log masking in CI. [#66523](https://github.com/ClickHouse/ClickHouse/pull/66523) ([Raúl Marín](https://github.com/Algunenano)).
|
||||
* Backported in [#66859](https://github.com/ClickHouse/ClickHouse/issues/66859): Fix data race in S3::ClientCache. [#66644](https://github.com/ClickHouse/ClickHouse/pull/66644) ([Konstantin Morozov](https://github.com/k-morozov)).
|
||||
* Backported in [#66875](https://github.com/ClickHouse/ClickHouse/issues/66875): Support one more case in JOIN ON ... IS NULL. [#66725](https://github.com/ClickHouse/ClickHouse/pull/66725) ([vdimir](https://github.com/vdimir)).
|
||||
* Backported in [#67059](https://github.com/ClickHouse/ClickHouse/issues/67059): Increase asio pool size in case the server is tiny. [#66761](https://github.com/ClickHouse/ClickHouse/pull/66761) ([alesapin](https://github.com/alesapin)).
|
||||
* Backported in [#66945](https://github.com/ClickHouse/ClickHouse/issues/66945): Small fix in realloc memory tracking. [#66820](https://github.com/ClickHouse/ClickHouse/pull/66820) ([Antonio Andelic](https://github.com/antonio2368)).
|
||||
* Backported in [#67252](https://github.com/ClickHouse/ClickHouse/issues/67252): Followup [#66725](https://github.com/ClickHouse/ClickHouse/issues/66725). [#66869](https://github.com/ClickHouse/ClickHouse/pull/66869) ([vdimir](https://github.com/vdimir)).
|
||||
* Backported in [#67412](https://github.com/ClickHouse/ClickHouse/issues/67412): CI: Fix build results for release branches. [#67402](https://github.com/ClickHouse/ClickHouse/pull/67402) ([Max K.](https://github.com/maxknv)).
|
||||
|
67
docs/changelogs/v24.6.3.95-stable.md
Normal file
67
docs/changelogs/v24.6.3.95-stable.md
Normal file
@ -0,0 +1,67 @@
|
||||
---
|
||||
sidebar_position: 1
|
||||
sidebar_label: 2024
|
||||
---
|
||||
|
||||
# 2024 Changelog
|
||||
|
||||
### ClickHouse release v24.6.3.95-stable (8325c920d11) FIXME as compared to v24.6.2.17-stable (5710a8b5c0c)
|
||||
|
||||
#### Improvement
|
||||
* Backported in [#66770](https://github.com/ClickHouse/ClickHouse/issues/66770): Make allow_experimental_analyzer be controlled by the initiator for distributed queries. This ensures compatibility and correctness during operations in mixed version clusters. [#65777](https://github.com/ClickHouse/ClickHouse/pull/65777) ([Nikita Mikhaylov](https://github.com/nikitamikhaylov)).
|
||||
|
||||
#### Critical Bug Fix (crash, LOGICAL_ERROR, data loss, RBAC)
|
||||
* Backported in [#66885](https://github.com/ClickHouse/ClickHouse/issues/66885): Fix unexpeced size of low cardinality column in function calls. [#65298](https://github.com/ClickHouse/ClickHouse/pull/65298) ([Raúl Marín](https://github.com/Algunenano)).
|
||||
* Backported in [#66693](https://github.com/ClickHouse/ClickHouse/issues/66693): Fix the VALID UNTIL clause in the user definition resetting after a restart. Closes [#66405](https://github.com/ClickHouse/ClickHouse/issues/66405). [#66409](https://github.com/ClickHouse/ClickHouse/pull/66409) ([Nikolay Degterinsky](https://github.com/evillique)).
|
||||
* Backported in [#67816](https://github.com/ClickHouse/ClickHouse/issues/67816): Only relevant to the experimental Variant data type. Fix crash with Variant + AggregateFunction type. [#67122](https://github.com/ClickHouse/ClickHouse/pull/67122) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||
* Backported in [#67503](https://github.com/ClickHouse/ClickHouse/issues/67503): Fix crash in DistributedAsyncInsert when connection is empty. [#67219](https://github.com/ClickHouse/ClickHouse/pull/67219) ([Pablo Marcos](https://github.com/pamarcos)).
|
||||
* Backported in [#67852](https://github.com/ClickHouse/ClickHouse/issues/67852): Fixes [#66026](https://github.com/ClickHouse/ClickHouse/issues/66026). Avoid unresolved table function arguments traversal in `ReplaceTableNodeToDummyVisitor`. [#67522](https://github.com/ClickHouse/ClickHouse/pull/67522) ([Dmitry Novik](https://github.com/novikd)).
|
||||
* Backported in [#67838](https://github.com/ClickHouse/ClickHouse/issues/67838): Fix potential stack overflow in `JSONMergePatch` function. Renamed this function from `jsonMergePatch` to `JSONMergePatch` because the previous name was wrong. The previous name is still kept for compatibility. Improved diagnostic of errors in the function. This closes [#67304](https://github.com/ClickHouse/ClickHouse/issues/67304). [#67756](https://github.com/ClickHouse/ClickHouse/pull/67756) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
|
||||
#### Bug Fix (user-visible misbehavior in an official stable release)
|
||||
* Backported in [#66303](https://github.com/ClickHouse/ClickHouse/issues/66303): Better handling of join conditions involving `IS NULL` checks (for example `ON (a = b AND (a IS NOT NULL) AND (b IS NOT NULL) ) OR ( (a IS NULL) AND (b IS NULL) )` is rewritten to `ON a <=> b`), fix incorrect optimization when condition other then `IS NULL` are present. [#65835](https://github.com/ClickHouse/ClickHouse/pull/65835) ([vdimir](https://github.com/vdimir)).
|
||||
* Backported in [#66330](https://github.com/ClickHouse/ClickHouse/issues/66330): Add missing settings `input_format_csv_skip_first_lines/input_format_tsv_skip_first_lines/input_format_csv_try_infer_numbers_from_strings/input_format_csv_try_infer_strings_from_quoted_tuples` in schema inference cache because they can change the resulting schema. It prevents from incorrect result of schema inference with these settings changed. [#65980](https://github.com/ClickHouse/ClickHouse/pull/65980) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||
* Backported in [#66157](https://github.com/ClickHouse/ClickHouse/issues/66157): Fixed buffer overflow bug in `unbin`/`unhex` implementation. [#66106](https://github.com/ClickHouse/ClickHouse/pull/66106) ([Nikita Taranov](https://github.com/nickitat)).
|
||||
* Backported in [#66210](https://github.com/ClickHouse/ClickHouse/issues/66210): Disable the `merge-filters` optimization introduced in [#64760](https://github.com/ClickHouse/ClickHouse/issues/64760). It may cause an exception if optimization merges two filter expressions and does not apply a short-circuit evaluation. [#66126](https://github.com/ClickHouse/ClickHouse/pull/66126) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
|
||||
* Backported in [#66456](https://github.com/ClickHouse/ClickHouse/issues/66456): Fixed a bug in ZooKeeper client: a session could get stuck in unusable state after receiving a hardware error from ZooKeeper. For example, this might happen due to "soft memory limit" in ClickHouse Keeper. [#66140](https://github.com/ClickHouse/ClickHouse/pull/66140) ([Alexander Tokmakov](https://github.com/tavplubix)).
|
||||
* Backported in [#66228](https://github.com/ClickHouse/ClickHouse/issues/66228): Fix issue in SumIfToCountIfVisitor and signed integers. [#66146](https://github.com/ClickHouse/ClickHouse/pull/66146) ([Raúl Marín](https://github.com/Algunenano)).
|
||||
* Backported in [#66183](https://github.com/ClickHouse/ClickHouse/issues/66183): Fix rare case with missing data in the result of distributed query, close [#61432](https://github.com/ClickHouse/ClickHouse/issues/61432). [#66174](https://github.com/ClickHouse/ClickHouse/pull/66174) ([vdimir](https://github.com/vdimir)).
|
||||
* Backported in [#66271](https://github.com/ClickHouse/ClickHouse/issues/66271): Don't throw `TIMEOUT_EXCEEDED` for `none_only_active` mode of `distributed_ddl_output_mode`. [#66218](https://github.com/ClickHouse/ClickHouse/pull/66218) ([Alexander Tokmakov](https://github.com/tavplubix)).
|
||||
* Backported in [#66682](https://github.com/ClickHouse/ClickHouse/issues/66682): Fix handling limit for `system.numbers_mt` when no index can be used. [#66231](https://github.com/ClickHouse/ClickHouse/pull/66231) ([János Benjamin Antal](https://github.com/antaljanosbenjamin)).
|
||||
* Backported in [#66587](https://github.com/ClickHouse/ClickHouse/issues/66587): Fixed how the ClickHouse server detects the maximum number of usable CPU cores as specified by cgroups v2 if the server runs in a container such as Docker. In more detail, containers often run their process in the root cgroup which has an empty name. In that case, ClickHouse ignored the CPU limits set by cgroups v2. [#66237](https://github.com/ClickHouse/ClickHouse/pull/66237) ([filimonov](https://github.com/filimonov)).
|
||||
* Backported in [#66362](https://github.com/ClickHouse/ClickHouse/issues/66362): Fix the `Not-ready set` error when a subquery with `IN` is used in the constraint. [#66261](https://github.com/ClickHouse/ClickHouse/pull/66261) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
|
||||
* Backported in [#66613](https://github.com/ClickHouse/ClickHouse/issues/66613): Fix `Column identifier is already registered` error with `group_by_use_nulls=true` and new analyzer. [#66400](https://github.com/ClickHouse/ClickHouse/pull/66400) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
|
||||
* Backported in [#66577](https://github.com/ClickHouse/ClickHouse/issues/66577): Fix `Cannot find column` error for queries with constant expression in `GROUP BY` key and new analyzer enabled. [#66433](https://github.com/ClickHouse/ClickHouse/pull/66433) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
|
||||
* Backported in [#66721](https://github.com/ClickHouse/ClickHouse/issues/66721): Correctly track memory for `Allocator::realloc`. [#66548](https://github.com/ClickHouse/ClickHouse/pull/66548) ([Antonio Andelic](https://github.com/antonio2368)).
|
||||
* Backported in [#66670](https://github.com/ClickHouse/ClickHouse/issues/66670): Fix reading of uninitialized memory when hashing empty tuples. This closes [#66559](https://github.com/ClickHouse/ClickHouse/issues/66559). [#66562](https://github.com/ClickHouse/ClickHouse/pull/66562) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* Backported in [#66952](https://github.com/ClickHouse/ClickHouse/issues/66952): Fix an invalid result for queries with `WINDOW`. This could happen when `PARTITION` columns have sparse serialization and window functions are executed in parallel. [#66579](https://github.com/ClickHouse/ClickHouse/pull/66579) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
|
||||
* Backported in [#66956](https://github.com/ClickHouse/ClickHouse/issues/66956): Fix removing named collections in local storage. [#66599](https://github.com/ClickHouse/ClickHouse/pull/66599) ([János Benjamin Antal](https://github.com/antaljanosbenjamin)).
|
||||
* Backported in [#66716](https://github.com/ClickHouse/ClickHouse/issues/66716): Fix removing named collections in local storage. [#66599](https://github.com/ClickHouse/ClickHouse/pull/66599) ([János Benjamin Antal](https://github.com/antaljanosbenjamin)).
|
||||
* Backported in [#66759](https://github.com/ClickHouse/ClickHouse/issues/66759): Fix `Unknown identifier` and `Column is not under aggregate function` errors for queries with the expression `(column IS NULL).` The bug was triggered by [#65088](https://github.com/ClickHouse/ClickHouse/issues/65088), with the disabled analyzer only. [#66654](https://github.com/ClickHouse/ClickHouse/pull/66654) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
|
||||
* Backported in [#66751](https://github.com/ClickHouse/ClickHouse/issues/66751): Fix `Method getResultType is not supported for QUERY query node` error when scalar subquery was used as the first argument of IN (with new analyzer). [#66655](https://github.com/ClickHouse/ClickHouse/pull/66655) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
|
||||
* Backported in [#67635](https://github.com/ClickHouse/ClickHouse/issues/67635): Fix for occasional deadlock in Context::getDDLWorker. [#66843](https://github.com/ClickHouse/ClickHouse/pull/66843) ([Alexander Gololobov](https://github.com/davenger)).
|
||||
* Backported in [#67482](https://github.com/ClickHouse/ClickHouse/issues/67482): In rare cases ClickHouse could consider parts as broken because of some unexpected projections on disk. Now it's fixed. [#66898](https://github.com/ClickHouse/ClickHouse/pull/66898) ([alesapin](https://github.com/alesapin)).
|
||||
* Backported in [#67199](https://github.com/ClickHouse/ClickHouse/issues/67199): TRUNCATE DATABASE used to stop replication as if it was a DROP DATABASE query, it's fixed. [#67129](https://github.com/ClickHouse/ClickHouse/pull/67129) ([Alexander Tokmakov](https://github.com/tavplubix)).
|
||||
* Backported in [#67381](https://github.com/ClickHouse/ClickHouse/issues/67381): Fix error `Cannot convert column because it is non constant in source stream but must be constant in result.` for a query that reads from the `Merge` table over the `Distriburted` table with one shard. [#67146](https://github.com/ClickHouse/ClickHouse/pull/67146) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
|
||||
* Backported in [#67244](https://github.com/ClickHouse/ClickHouse/issues/67244): This closes [#67156](https://github.com/ClickHouse/ClickHouse/issues/67156). This closes [#66447](https://github.com/ClickHouse/ClickHouse/issues/66447). The bug was introduced in https://github.com/ClickHouse/ClickHouse/pull/62907. [#67178](https://github.com/ClickHouse/ClickHouse/pull/67178) ([Maksim Kita](https://github.com/kitaisreal)).
|
||||
* Backported in [#67578](https://github.com/ClickHouse/ClickHouse/issues/67578): Fix execution of nested short-circuit functions. [#67520](https://github.com/ClickHouse/ClickHouse/pull/67520) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||
* Backported in [#67808](https://github.com/ClickHouse/ClickHouse/issues/67808): Fix reloading SQL UDFs with UNION. Previously, restarting the server could make UDF invalid. [#67665](https://github.com/ClickHouse/ClickHouse/pull/67665) ([Antonio Andelic](https://github.com/antonio2368)).
|
||||
|
||||
#### NO CL ENTRY
|
||||
|
||||
* NO CL ENTRY: 'Revert "Backport [#66599](https://github.com/ClickHouse/ClickHouse/issues/66599) to 24.6: Fix dropping named collection in local storage"'. [#66922](https://github.com/ClickHouse/ClickHouse/pull/66922) ([János Benjamin Antal](https://github.com/antaljanosbenjamin)).
|
||||
|
||||
#### NOT FOR CHANGELOG / INSIGNIFICANT
|
||||
|
||||
* Backported in [#66332](https://github.com/ClickHouse/ClickHouse/issues/66332): Do not raise a NOT_IMPLEMENTED error when getting s3 metrics with a multiple disk configuration. [#65403](https://github.com/ClickHouse/ClickHouse/pull/65403) ([Elena Torró](https://github.com/elenatorro)).
|
||||
* Backported in [#66142](https://github.com/ClickHouse/ClickHouse/issues/66142): Fix flaky test_storage_s3_queue tests. [#66009](https://github.com/ClickHouse/ClickHouse/pull/66009) ([Kseniia Sumarokova](https://github.com/kssenii)).
|
||||
* Backported in [#66389](https://github.com/ClickHouse/ClickHouse/issues/66389): Disable broken cases from 02911_join_on_nullsafe_optimization. [#66310](https://github.com/ClickHouse/ClickHouse/pull/66310) ([vdimir](https://github.com/vdimir)).
|
||||
* Backported in [#66428](https://github.com/ClickHouse/ClickHouse/issues/66428): Ignore subquery for IN in DDLLoadingDependencyVisitor. [#66395](https://github.com/ClickHouse/ClickHouse/pull/66395) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
|
||||
* Backported in [#66546](https://github.com/ClickHouse/ClickHouse/issues/66546): Add additional log masking in CI. [#66523](https://github.com/ClickHouse/ClickHouse/pull/66523) ([Raúl Marín](https://github.com/Algunenano)).
|
||||
* Backported in [#66861](https://github.com/ClickHouse/ClickHouse/issues/66861): Fix data race in S3::ClientCache. [#66644](https://github.com/ClickHouse/ClickHouse/pull/66644) ([Konstantin Morozov](https://github.com/k-morozov)).
|
||||
* Backported in [#66877](https://github.com/ClickHouse/ClickHouse/issues/66877): Support one more case in JOIN ON ... IS NULL. [#66725](https://github.com/ClickHouse/ClickHouse/pull/66725) ([vdimir](https://github.com/vdimir)).
|
||||
* Backported in [#67061](https://github.com/ClickHouse/ClickHouse/issues/67061): Increase asio pool size in case the server is tiny. [#66761](https://github.com/ClickHouse/ClickHouse/pull/66761) ([alesapin](https://github.com/alesapin)).
|
||||
* Backported in [#66940](https://github.com/ClickHouse/ClickHouse/issues/66940): Small fix in realloc memory tracking. [#66820](https://github.com/ClickHouse/ClickHouse/pull/66820) ([Antonio Andelic](https://github.com/antonio2368)).
|
||||
* Backported in [#67254](https://github.com/ClickHouse/ClickHouse/issues/67254): Followup [#66725](https://github.com/ClickHouse/ClickHouse/issues/66725). [#66869](https://github.com/ClickHouse/ClickHouse/pull/66869) ([vdimir](https://github.com/vdimir)).
|
||||
* Backported in [#67414](https://github.com/ClickHouse/ClickHouse/issues/67414): CI: Fix build results for release branches. [#67402](https://github.com/ClickHouse/ClickHouse/pull/67402) ([Max K.](https://github.com/maxknv)).
|
||||
|
@ -118,7 +118,7 @@ And the result of interpreting the `INSERT SELECT` query is a "completed" `Query
|
||||
|
||||
`InterpreterSelectQuery` uses `ExpressionAnalyzer` and `ExpressionActions` machinery for query analysis and transformations. This is where most rule-based query optimizations are performed. `ExpressionAnalyzer` is quite messy and should be rewritten: various query transformations and optimizations should be extracted into separate classes to allow for modular transformations of the query.
|
||||
|
||||
To address current problems that exist in interpreters, a new `InterpreterSelectQueryAnalyzer` is being developed. It is a new version of `InterpreterSelectQuery` that does not use `ExpressionAnalyzer` and introduces an additional abstraction level between `AST` and `QueryPipeline` called `QueryTree`. It is not production-ready yet, but it can be tested with the `allow_experimental_analyzer` flag.
|
||||
To address problems that exist in interpreters, a new `InterpreterSelectQueryAnalyzer` has been developed. This is a new version of the `InterpreterSelectQuery`, which does not use the `ExpressionAnalyzer` and introduces an additional layer of abstraction between `AST` and `QueryPipeline`, called `QueryTree'. It is fully ready for use in production, but just in case it can be turned off by setting the value of the `enable_analyzer` setting to `false`.
|
||||
|
||||
## Functions {#functions}
|
||||
|
||||
|
@ -123,7 +123,7 @@ To ensure consistent and expected results, especially when migrating old queries
|
||||
In the new version of the analyzer, the rules for determining the common supertype for columns specified in the `USING` clause have been standardized to produce more predictable outcomes, especially when dealing with type modifiers like `LowCardinality` and `Nullable`.
|
||||
|
||||
- `LowCardinality(T)` and `T`: When a column of type `LowCardinality(T)` is joined with a column of type `T`, the resulting common supertype will be `T`, effectively discarding the `LowCardinality` modifier.
|
||||
|
||||
|
||||
- `Nullable(T)` and `T`: When a column of type `Nullable(T)` is joined with a column of type `T`, the resulting common supertype will be `Nullable(T)`, ensuring that the nullable property is preserved.
|
||||
|
||||
**Example:**
|
||||
@ -144,7 +144,7 @@ During projection names computation, aliases are not substituted.
|
||||
SELECT
|
||||
1 + 1 AS x,
|
||||
x + 1
|
||||
SETTINGS allow_experimental_analyzer = 0
|
||||
SETTINGS enable_analyzer = 0
|
||||
FORMAT PrettyCompact
|
||||
|
||||
┌─x─┬─plus(plus(1, 1), 1)─┐
|
||||
@ -154,7 +154,7 @@ FORMAT PrettyCompact
|
||||
SELECT
|
||||
1 + 1 AS x,
|
||||
x + 1
|
||||
SETTINGS allow_experimental_analyzer = 1
|
||||
SETTINGS enable_analyzer = 1
|
||||
FORMAT PrettyCompact
|
||||
|
||||
┌─x─┬─plus(x, 1)─┐
|
||||
@ -177,7 +177,7 @@ SELECT toTypeName(if(0, [2, 3, 4], 'String'))
|
||||
|
||||
### Heterogeneous clusters
|
||||
|
||||
The new analyzer significantly changed the communication protocol between servers in the cluster. Thus, it's impossible to run distributed queries on servers with different `allow_experimental_analyzer` setting values.
|
||||
The new analyzer significantly changed the communication protocol between servers in the cluster. Thus, it's impossible to run distributed queries on servers with different `enable_analyzer` setting values.
|
||||
|
||||
### Mutations are interpreted by previous analyzer
|
||||
|
||||
|
@ -85,6 +85,7 @@ The BACKUP and RESTORE statements take a list of DATABASE and TABLE names, a des
|
||||
- `password` for the file on disk
|
||||
- `base_backup`: the destination of the previous backup of this source. For example, `Disk('backups', '1.zip')`
|
||||
- `use_same_s3_credentials_for_base_backup`: whether base backup to S3 should inherit credentials from the query. Only works with `S3`.
|
||||
- `use_same_password_for_base_backup`: whether base backup archive should inherit the password from the query.
|
||||
- `structure_only`: if enabled, allows to only backup or restore the CREATE statements without the data of tables
|
||||
- `storage_policy`: storage policy for the tables being restored. See [Using Multiple Block Devices for Data Storage](../engines/table-engines/mergetree-family/mergetree.md#table_engine-mergetree-multiple-volumes). This setting is only applicable to the `RESTORE` command. The specified storage policy applies only to tables with an engine from the `MergeTree` family.
|
||||
- `s3_storage_class`: the storage class used for S3 backup. For example, `STANDARD`
|
||||
|
@ -119,11 +119,6 @@ Minimum size of blocks of uncompressed data required for compression when writin
|
||||
You can also specify this setting in the global settings (see [min_compress_block_size](/docs/en/operations/settings/settings.md/#min-compress-block-size) setting).
|
||||
The value specified when table is created overrides the global value for this setting.
|
||||
|
||||
## max_partitions_to_read
|
||||
|
||||
Limits the maximum number of partitions that can be accessed in one query.
|
||||
You can also specify setting [max_partitions_to_read](/docs/en/operations/settings/merge-tree-settings.md/#max-partitions-to-read) in the global setting.
|
||||
|
||||
## max_suspicious_broken_parts
|
||||
|
||||
If the number of broken parts in a single partition exceeds the `max_suspicious_broken_parts` value, automatic deletion is denied.
|
||||
@ -691,6 +686,8 @@ Possible values:
|
||||
|
||||
Default value: -1 (unlimited).
|
||||
|
||||
You can also specify a query complexity setting [max_partitions_to_read](query-complexity#max-partitions-to-read) at a query / session / profile level.
|
||||
|
||||
## min_age_to_force_merge_seconds {#min_age_to_force_merge_seconds}
|
||||
|
||||
Merge parts if every part in the range is older than the value of `min_age_to_force_merge_seconds`.
|
||||
|
@ -188,7 +188,7 @@ If you set `timeout_before_checking_execution_speed `to 0, ClickHouse will use c
|
||||
|
||||
What to do if the query is run longer than `max_execution_time` or the estimated running time is longer than `max_estimated_execution_time`: `throw` or `break`. By default, `throw`.
|
||||
|
||||
# max_execution_time_leaf
|
||||
## max_execution_time_leaf
|
||||
|
||||
Similar semantic to `max_execution_time` but only apply on leaf node for distributed or remote queries.
|
||||
|
||||
@ -204,7 +204,7 @@ We can use `max_execution_time_leaf` as the query settings:
|
||||
SELECT count() FROM cluster(cluster, view(SELECT * FROM t)) SETTINGS max_execution_time_leaf = 10;
|
||||
```
|
||||
|
||||
# timeout_overflow_mode_leaf
|
||||
## timeout_overflow_mode_leaf
|
||||
|
||||
What to do when the query in leaf node run longer than `max_execution_time_leaf`: `throw` or `break`. By default, `throw`.
|
||||
|
||||
@ -426,3 +426,17 @@ Example:
|
||||
```
|
||||
|
||||
Default value: 0 (Infinite count of simultaneous sessions).
|
||||
|
||||
## max_partitions_to_read {#max-partitions-to-read}
|
||||
|
||||
Limits the maximum number of partitions that can be accessed in one query.
|
||||
|
||||
The setting value specified when the table is created can be overridden via query-level setting.
|
||||
|
||||
Possible values:
|
||||
|
||||
- Any positive integer.
|
||||
|
||||
Default value: -1 (unlimited).
|
||||
|
||||
You can also specify a MergeTree setting [max_partitions_to_read](merge-tree-settings#max-partitions-to-read) in tables' setting.
|
||||
|
@ -4051,7 +4051,7 @@ Rewrite aggregate functions with if expression as argument when logically equiva
|
||||
For example, `avg(if(cond, col, null))` can be rewritten to `avgOrNullIf(cond, col)`. It may improve performance.
|
||||
|
||||
:::note
|
||||
Supported only with experimental analyzer (`allow_experimental_analyzer = 1`).
|
||||
Supported only with experimental analyzer (`enable_analyzer = 1`).
|
||||
:::
|
||||
|
||||
## database_replicated_initial_query_timeout_sec {#database_replicated_initial_query_timeout_sec}
|
||||
|
@ -0,0 +1,90 @@
|
||||
---
|
||||
slug: /en/sql-reference/aggregate-functions/reference/groupconcat
|
||||
sidebar_position: 363
|
||||
sidebar_label: groupConcat
|
||||
title: groupConcat
|
||||
---
|
||||
|
||||
Calculates a concatenated string from a group of strings, optionally separated by a delimiter, and optionally limited by a maximum number of elements.
|
||||
|
||||
**Syntax**
|
||||
|
||||
``` sql
|
||||
groupConcat(expression [, delimiter] [, limit]);
|
||||
```
|
||||
|
||||
**Arguments**
|
||||
|
||||
- `expression` — The expression or column name that outputs strings to be concatenated..
|
||||
- `delimiter` — A [string](../../../sql-reference/data-types/string.md) that will be used to separate concatenated values. This parameter is optional and defaults to an empty string if not specified.
|
||||
- `limit` — A positive [integer](../../../sql-reference/data-types/int-uint.md) specifying the maximum number of elements to concatenate. If more elements are present, excess elements are ignored. This parameter is optional.
|
||||
|
||||
:::note
|
||||
If delimiter is specified without limit, it must be the first parameter following the expression. If both delimiter and limit are specified, delimiter must precede limit.
|
||||
:::
|
||||
|
||||
**Returned value**
|
||||
|
||||
- Returns a [string](../../../sql-reference/data-types/string.md) consisting of the concatenated values of the column or expression. If the group has no elements or only null elements, and the function does not specify a handling for only null values, the result is a nullable string with a null value.
|
||||
|
||||
**Examples**
|
||||
|
||||
Input table:
|
||||
|
||||
``` text
|
||||
┌─id─┬─name─┐
|
||||
│ 1 │ John│
|
||||
│ 2 │ Jane│
|
||||
│ 3 │ Bob│
|
||||
└────┴──────┘
|
||||
```
|
||||
|
||||
1. Basic usage without a delimiter:
|
||||
|
||||
Query:
|
||||
|
||||
``` sql
|
||||
SELECT groupConcat(Name) FROM Employees;
|
||||
```
|
||||
|
||||
Result:
|
||||
|
||||
``` text
|
||||
JohnJaneBob
|
||||
```
|
||||
|
||||
This concatenates all names into one continuous string without any separator.
|
||||
|
||||
|
||||
2. Using comma as a delimiter:
|
||||
|
||||
Query:
|
||||
|
||||
``` sql
|
||||
SELECT groupConcat(Name, ', ', 2) FROM Employees;
|
||||
```
|
||||
|
||||
Result:
|
||||
|
||||
``` text
|
||||
John, Jane, Bob
|
||||
```
|
||||
|
||||
This output shows the names separated by a comma followed by a space.
|
||||
|
||||
|
||||
3. Limiting the number of concatenated elements
|
||||
|
||||
Query:
|
||||
|
||||
``` sql
|
||||
SELECT groupConcat(Name, ', ', 2) FROM Employees;
|
||||
```
|
||||
|
||||
Result:
|
||||
|
||||
``` text
|
||||
John, Jane
|
||||
```
|
||||
|
||||
This query limits the output to the first two names, even though there are more names in the table.
|
@ -223,3 +223,28 @@ SELECT translateUTF8('Münchener Straße', 'üß', 'us') AS res;
|
||||
│ Munchener Strase │
|
||||
└──────────────────┘
|
||||
```
|
||||
|
||||
## printf
|
||||
|
||||
The `printf` function formats the given string with the values (strings, integers, floating-points etc.) listed in the arguments, similar to printf function in C++. The format string can contain format specifiers starting with `%` character. Anything not contained in `%` and the following format specifier is considered literal text and copied verbatim into the output. Literal `%` character can be escaped by `%%`.
|
||||
|
||||
**Syntax**
|
||||
|
||||
``` sql
|
||||
printf(format, arg1, arg2, ...)
|
||||
```
|
||||
|
||||
**Example**
|
||||
|
||||
Query:
|
||||
|
||||
``` sql
|
||||
select printf('%%%s %s %d', 'Hello', 'World', 2024);
|
||||
```
|
||||
|
||||
|
||||
``` response
|
||||
┌─printf('%%%s %s %d', 'Hello', 'World', 2024)─┐
|
||||
│ %Hello World 2024 │
|
||||
└──────────────────────────────────────────────┘
|
||||
```
|
||||
|
@ -150,15 +150,15 @@ A case insensitive invariant of [position](#position).
|
||||
Query:
|
||||
|
||||
``` sql
|
||||
SELECT position('Hello, world!', 'hello');
|
||||
SELECT positionCaseInsensitive('Hello, world!', 'hello');
|
||||
```
|
||||
|
||||
Result:
|
||||
|
||||
``` text
|
||||
┌─position('Hello, world!', 'hello')─┐
|
||||
│ 0 │
|
||||
└────────────────────────────────────┘
|
||||
┌─positionCaseInsensitive('Hello, world!', 'hello')─┐
|
||||
│ 1 │
|
||||
└───────────────────────────────────────────────────┘
|
||||
```
|
||||
|
||||
## positionUTF8
|
||||
|
@ -43,7 +43,7 @@ Result:
|
||||
|
||||
## mapFromArrays
|
||||
|
||||
Creates a map from an array of keys and an array of values.
|
||||
Creates a map from an array or map of keys and an array or map of values.
|
||||
|
||||
The function is a convenient alternative to syntax `CAST([...], 'Map(key_type, value_type)')`.
|
||||
For example, instead of writing
|
||||
@ -62,8 +62,8 @@ Alias: `MAP_FROM_ARRAYS(keys, values)`
|
||||
|
||||
**Arguments**
|
||||
|
||||
- `keys` — Array of keys to create the map from. [Array(T)](../data-types/array.md) where `T` can be any type supported by [Map](../data-types/map.md) as key type.
|
||||
- `values` - Array or map of values to create the map from. [Array](../data-types/array.md) or [Map](../data-types/map.md).
|
||||
- `keys` — Array or map of keys to create the map from [Array](../data-types/array.md) or [Map](../data-types/map.md). If `keys` is an array, we accept `Array(Nullable(T))` or `Array(LowCardinality(Nullable(T)))` as its type as long as it doesn't contain NULL value.
|
||||
- `values` - Array or map of values to create the map from [Array](../data-types/array.md) or [Map](../data-types/map.md).
|
||||
|
||||
**Returned value**
|
||||
|
||||
@ -99,6 +99,18 @@ Result:
|
||||
└───────────────────────────────────────────────────────┘
|
||||
```
|
||||
|
||||
```sql
|
||||
SELECT mapFromArrays(map('a', 1, 'b', 2, 'c', 3), [1, 2, 3])
|
||||
```
|
||||
|
||||
Result:
|
||||
|
||||
```
|
||||
┌─mapFromArrays(map('a', 1, 'b', 2, 'c', 3), [1, 2, 3])─┐
|
||||
│ {('a',1):1,('b',2):2,('c',3):3} │
|
||||
└───────────────────────────────────────────────────────┘
|
||||
```
|
||||
|
||||
## extractKeyValuePairs
|
||||
|
||||
Converts a string of key-value pairs to a [Map(String, String)](../data-types/map.md).
|
||||
|
@ -12,6 +12,8 @@ The [rank](./rank.md) function provides the same behaviour, but with gaps in ran
|
||||
|
||||
**Syntax**
|
||||
|
||||
Alias: `denseRank` (case-sensitive)
|
||||
|
||||
```sql
|
||||
dense_rank (column_name)
|
||||
OVER ([[PARTITION BY grouping_column] [ORDER BY sorting_column]
|
||||
|
@ -23,8 +23,8 @@ ClickHouse supports the standard grammar for defining windows and window functio
|
||||
| `INTERVAL` syntax for `DateTime` `RANGE OFFSET` frame | ❌ (specify the number of seconds instead (`RANGE` works with any numeric type).) |
|
||||
| `GROUPS` frame | ❌ |
|
||||
| Calculating aggregate functions over a frame (`sum(value) over (order by time)`) | ✅ (All aggregate functions are supported) |
|
||||
| `rank()`, `dense_rank()`, `row_number()` | ✅ |
|
||||
| `percent_rank()` | ✅ Efficiently computes the relative standing of a value within a partition in a dataset. This function effectively replaces the more verbose and computationally intensive manual SQL calculation expressed as `ifNull((rank() OVER(PARTITION BY x ORDER BY y) - 1) / nullif(count(1) OVER(PARTITION BY x) - 1, 0), 0)`|
|
||||
| `rank()`, `dense_rank()`, `row_number()` | ✅ <br/>Alias: `denseRank()` |
|
||||
| `percent_rank()` | ✅ Efficiently computes the relative standing of a value within a partition in a dataset. This function effectively replaces the more verbose and computationally intensive manual SQL calculation expressed as `ifNull((rank() OVER(PARTITION BY x ORDER BY y) - 1) / nullif(count(1) OVER(PARTITION BY x) - 1, 0), 0)` <br/>Alias: `percentRank()`|
|
||||
| `lag/lead(value, offset)` | ❌ <br/> You can use one of the following workarounds:<br/> 1) `any(value) over (.... rows between <offset> preceding and <offset> preceding)`, or `following` for `lead` <br/> 2) `lagInFrame/leadInFrame`, which are analogous, but respect the window frame. To get behavior identical to `lag/lead`, use `rows between unbounded preceding and unbounded following` |
|
||||
| ntile(buckets) | ✅ <br/> Specify window like, (partition by x order by y rows between unbounded preceding and unrounded following). |
|
||||
|
||||
|
@ -115,7 +115,7 @@ ClickHouse — полноценная столбцовая СУБД. Данны
|
||||
|
||||
`InterpreterSelectQuery` использует `ExpressionAnalyzer` и `ExpressionActions` механизмы для анализа запросов и преобразований. Именно здесь выполняется большинство оптимизаций запросов на основе правил. `ExpressionAnalyzer` написан довольно грязно и должен быть переписан: различные преобразования запросов и оптимизации должны быть извлечены в отдельные классы, чтобы позволить модульные преобразования или запросы.
|
||||
|
||||
Для решения текущих проблем, существующих в интерпретаторах, разрабатывается новый `InterpreterSelectQueryAnalyzer`. Это новая версия `InterpreterSelectQuery`, которая не использует `ExpressionAnalyzer` и вводит дополнительный уровень абстракции между `AST` и `QueryPipeline`, называемый `QueryTree`. Он еще не готов к использованию в продакшене, но его можно протестировать с помощью флага `allow_experimental_analyzer`.
|
||||
Для решения проблем, существующих в интерпретаторах, был разработан новый `InterpreterSelectQueryAnalyzer`. Это новая версия `InterpreterSelectQuery`, которая не использует `ExpressionAnalyzer` и вводит дополнительный уровень абстракции между `AST` и `QueryPipeline`, называемый `QueryTree`. Он полностью готов к использованию в продакшене, но на всякий случай его можно выключить, установив значение настройки `enable_analyzer` в `false`.
|
||||
|
||||
## Функции {#functions}
|
||||
|
||||
|
@ -252,7 +252,7 @@ sidebar_label: "\u53D8\u66F4\u65E5\u5FD7"
|
||||
- 抑制MSan下的一些测试失败。 [#8780](https://github.com/ClickHouse/ClickHouse/pull/8780) ([Alexander Kuzmenkov](https://github.com/akuzm))
|
||||
- 加速 “exception while insert” 测试 此测试通常在具有复盖率的调试版本中超时。 [#8711](https://github.com/ClickHouse/ClickHouse/pull/8711) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov))
|
||||
- 更新 `libcxx` 和 `libcxxabi` 为了主人 在准备 [#9304](https://github.com/ClickHouse/ClickHouse/issues/9304) [#9308](https://github.com/ClickHouse/ClickHouse/pull/9308) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov))
|
||||
- 修复flacky测试 `00910_zookeeper_test_alter_compression_codecs`. [#9525](https://github.com/ClickHouse/ClickHouse/pull/9525) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov))
|
||||
- 修复flaky测试 `00910_zookeeper_test_alter_compression_codecs`. [#9525](https://github.com/ClickHouse/ClickHouse/pull/9525) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov))
|
||||
- 清理重复的链接器标志。 确保链接器不会查找意想不到的符号。 [#9433](https://github.com/ClickHouse/ClickHouse/pull/9433) ([阿莫斯鸟](https://github.com/amosbird))
|
||||
- 添加 `clickhouse-odbc` 驱动程序进入测试图像。 这允许通过自己的ODBC驱动程序测试ClickHouse与ClickHouse的交互。 [#9348](https://github.com/ClickHouse/ClickHouse/pull/9348) ([filimonov](https://github.com/filimonov))
|
||||
- 修复单元测试中的几个错误。 [#9047](https://github.com/ClickHouse/ClickHouse/pull/9047) ([阿利沙平](https://github.com/alesapin))
|
||||
|
@ -66,14 +66,14 @@
|
||||
<server>
|
||||
<!-- Used for secure tcp port -->
|
||||
<!-- openssl req -subj "/CN=localhost" -new -newkey rsa:2048 -days 365 -nodes -x509 -keyout /etc/clickhouse-server/server.key -out /etc/clickhouse-server/server.crt -->
|
||||
<certificateFile>/etc/clickhouse-keeper/server.crt</certificateFile>
|
||||
<privateKeyFile>/etc/clickhouse-keeper/server.key</privateKeyFile>
|
||||
<!-- <certificateFile>/etc/clickhouse-keeper/server.crt</certificateFile> -->
|
||||
<!-- <privateKeyFile>/etc/clickhouse-keeper/server.key</privateKeyFile> -->
|
||||
<!-- dhparams are optional. You can delete the <dhParamsFile> element.
|
||||
To generate dhparams, use the following command:
|
||||
openssl dhparam -out /etc/clickhouse-keeper/dhparam.pem 4096
|
||||
Only file format with BEGIN DH PARAMETERS is supported.
|
||||
-->
|
||||
<dhParamsFile>/etc/clickhouse-keeper/dhparam.pem</dhParamsFile>
|
||||
<!-- <dhParamsFile>/etc/clickhouse-keeper/dhparam.pem</dhParamsFile> -->
|
||||
<verificationMode>none</verificationMode>
|
||||
<loadDefaultCAFile>true</loadDefaultCAFile>
|
||||
<cacheSessions>true</cacheSessions>
|
||||
|
@ -1,6 +1,7 @@
|
||||
#include "LocalServer.h"
|
||||
|
||||
#include <sys/resource.h>
|
||||
#include <Common/Config/getLocalConfigPath.h>
|
||||
#include <Common/logger_useful.h>
|
||||
#include <Common/formatReadable.h>
|
||||
#include <Core/UUID.h>
|
||||
@ -127,10 +128,21 @@ void LocalServer::initialize(Poco::Util::Application & self)
|
||||
{
|
||||
Poco::Util::Application::initialize(self);
|
||||
|
||||
const char * home_path_cstr = getenv("HOME"); // NOLINT(concurrency-mt-unsafe)
|
||||
if (home_path_cstr)
|
||||
home_path = home_path_cstr;
|
||||
|
||||
/// Load config files if exists
|
||||
if (getClientConfiguration().has("config-file") || fs::exists("config.xml"))
|
||||
std::string config_path;
|
||||
if (getClientConfiguration().has("config-file"))
|
||||
config_path = getClientConfiguration().getString("config-file");
|
||||
else if (config_path.empty() && fs::exists("config.xml"))
|
||||
config_path = "config.xml";
|
||||
else if (config_path.empty())
|
||||
config_path = getLocalConfigPath(home_path).value_or("");
|
||||
|
||||
if (fs::exists(config_path))
|
||||
{
|
||||
const auto config_path = getClientConfiguration().getString("config-file", "config.xml");
|
||||
ConfigProcessor config_processor(config_path, false, true);
|
||||
ConfigProcessor::setConfigPath(fs::path(config_path).parent_path());
|
||||
auto loaded_config = config_processor.loadConfig();
|
||||
|
@ -849,7 +849,7 @@ try
|
||||
#endif
|
||||
|
||||
#if defined(SANITIZER)
|
||||
LOG_INFO(log, "Query Profiler disabled because they cannot work under sanitizers"
|
||||
LOG_INFO(log, "Query Profiler is disabled because it cannot work under sanitizers"
|
||||
" when two different stack unwinding methods will interfere with each other.");
|
||||
#endif
|
||||
|
||||
|
@ -1130,8 +1130,7 @@
|
||||
<flush_interval_milliseconds>7500</flush_interval_milliseconds>
|
||||
</query_views_log>
|
||||
|
||||
<!-- Uncomment if use part log.
|
||||
Part log contains information about all actions with parts in MergeTree tables (creation, deletion, merges, downloads).-->
|
||||
<!-- Part log contains information about all actions with parts in MergeTree tables (creation, deletion, merges, downloads). -->
|
||||
<part_log>
|
||||
<database>system</database>
|
||||
<table>part_log</table>
|
||||
@ -1143,9 +1142,9 @@
|
||||
<flush_on_crash>false</flush_on_crash>
|
||||
</part_log>
|
||||
|
||||
<!-- Uncomment to write text log into table.
|
||||
Text log contains all information from usual server log but stores it in structured and efficient way.
|
||||
<!-- Text log contains all information from usual server log but stores it in structured and efficient way.
|
||||
The level of the messages that goes to the table can be limited (<level>), if not specified all messages will go to the table.
|
||||
-->
|
||||
<text_log>
|
||||
<database>system</database>
|
||||
<table>text_log</table>
|
||||
@ -1154,9 +1153,8 @@
|
||||
<reserved_size_rows>8192</reserved_size_rows>
|
||||
<buffer_size_rows_flush_threshold>524288</buffer_size_rows_flush_threshold>
|
||||
<flush_on_crash>false</flush_on_crash>
|
||||
<level></level>
|
||||
<level>trace</level>
|
||||
</text_log>
|
||||
-->
|
||||
|
||||
<!-- Metric log contains rows with current values of ProfileEvents, CurrentMetrics collected with "collect_interval_milliseconds" interval. -->
|
||||
<metric_log>
|
||||
|
@ -17,7 +17,7 @@
|
||||
--input-shadow-color: rgba(0, 255, 0, 1);
|
||||
--error-color: red;
|
||||
--global-error-color: white;
|
||||
--legend-background: rgba(255, 255, 255, 0.75);
|
||||
--legend-background: rgba(255, 255, 0, 0.75);
|
||||
--title-color: #666;
|
||||
--text-color: black;
|
||||
--edit-title-background: #FEE;
|
||||
@ -41,7 +41,7 @@
|
||||
--moving-shadow-color: rgba(255, 255, 255, 0.25);
|
||||
--input-shadow-color: rgba(255, 128, 0, 0.25);
|
||||
--error-color: #F66;
|
||||
--legend-background: rgba(255, 255, 255, 0.25);
|
||||
--legend-background: rgba(0, 96, 128, 0.75);
|
||||
--title-color: white;
|
||||
--text-color: white;
|
||||
--edit-title-background: #364f69;
|
||||
@ -218,6 +218,7 @@
|
||||
|
||||
#chart-params .param {
|
||||
width: 6%;
|
||||
font-family: monospace;
|
||||
}
|
||||
|
||||
input {
|
||||
@ -256,6 +257,7 @@
|
||||
font-weight: bold;
|
||||
user-select: none;
|
||||
cursor: pointer;
|
||||
margin-bottom: 1rem;
|
||||
}
|
||||
|
||||
#run:hover {
|
||||
@ -309,7 +311,7 @@
|
||||
color: var(--param-text-color);
|
||||
display: inline-block;
|
||||
box-shadow: 1px 1px 0 var(--shadow-color);
|
||||
margin-bottom: 1rem;
|
||||
margin-bottom: 0.5rem;
|
||||
}
|
||||
|
||||
input:focus {
|
||||
@ -657,6 +659,10 @@ function insertParam(name, value) {
|
||||
param_value.value = value;
|
||||
param_value.spellcheck = false;
|
||||
|
||||
let setWidth = e => { e.style.width = (e.value.length + 1) + 'ch' };
|
||||
if (value) { setWidth(param_value); }
|
||||
param_value.addEventListener('input', e => setWidth(e.target));
|
||||
|
||||
param_wrapper.appendChild(param_name);
|
||||
param_wrapper.appendChild(param_value);
|
||||
document.getElementById('chart-params').appendChild(param_wrapper);
|
||||
@ -945,6 +951,7 @@ function showMassEditor() {
|
||||
let editor = document.getElementById('mass-editor-textarea');
|
||||
editor.value = JSON.stringify({params: params, queries: queries}, null, 2);
|
||||
|
||||
editor.focus();
|
||||
mass_editor_active = true;
|
||||
}
|
||||
|
||||
@ -1004,14 +1011,14 @@ function legendAsTooltipPlugin({ className, style = { background: "var(--legend-
|
||||
className && legendEl.classList.add(className);
|
||||
|
||||
uPlot.assign(legendEl.style, {
|
||||
textAlign: "left",
|
||||
textAlign: "right",
|
||||
pointerEvents: "none",
|
||||
display: "none",
|
||||
position: "absolute",
|
||||
left: 0,
|
||||
top: 0,
|
||||
zIndex: 100,
|
||||
boxShadow: "2px 2px 10px rgba(0,0,0,0.1)",
|
||||
boxShadow: "2px 2px 10px rgba(0, 0, 0, 0.1)",
|
||||
...style
|
||||
});
|
||||
|
||||
@ -1051,8 +1058,10 @@ function legendAsTooltipPlugin({ className, style = { background: "var(--legend-
|
||||
|
||||
function update(u) {
|
||||
let { left, top } = u.cursor;
|
||||
left -= legendEl.clientWidth / 2;
|
||||
top -= legendEl.clientHeight / 2;
|
||||
/// This will make the balloon to the right of the cursor when the cursor is on the left side, and vise-versa,
|
||||
/// avoiding the borders of the chart.
|
||||
left -= legendEl.clientWidth * (left / u.width);
|
||||
top -= legendEl.clientHeight;
|
||||
legendEl.style.transform = "translate(" + left + "px, " + top + "px)";
|
||||
|
||||
if (multiline) {
|
||||
@ -1139,7 +1148,7 @@ async function draw(idx, chart, url_params, query) {
|
||||
|
||||
let {reply, error} = await doFetch(query, url_params);
|
||||
if (!error) {
|
||||
if (reply.rows.length == 0) {
|
||||
if (reply.rows == 0) {
|
||||
error = "Query returned empty result.";
|
||||
} else if (reply.meta.length < 2) {
|
||||
error = "Query should return at least two columns: unix timestamp and value.";
|
||||
@ -1229,14 +1238,53 @@ async function draw(idx, chart, url_params, query) {
|
||||
|
||||
let sync = uPlot.sync("sync");
|
||||
|
||||
let axis = {
|
||||
function formatDateTime(t) {
|
||||
return (new Date(t * 1000)).toISOString().replace('T', '\n').replace('.000Z', '');
|
||||
}
|
||||
|
||||
function formatDateTimes(self, ticks) {
|
||||
return ticks.map((t, idx) => {
|
||||
let res = formatDateTime(t);
|
||||
if (idx == 0 || res.substring(0, 10) != formatDateTime(ticks[idx - 1]).substring(0, 10)) {
|
||||
return res;
|
||||
} else {
|
||||
return res.substring(11);
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
function formatValue(v) {
|
||||
const a = Math.abs(v);
|
||||
if (a >= 1000000000000000) { return (v / 1000000000000000) + 'P'; }
|
||||
if (a >= 1000000000000) { return (v / 1000000000000) + 'T'; }
|
||||
if (a >= 1000000000) { return (v / 1000000000) + 'G'; }
|
||||
if (a >= 1000000) { return (v / 1000000) + 'M'; }
|
||||
if (a >= 1000) { return (v / 1000) + 'K'; }
|
||||
if (a > 0 && a < 0.001) { return (v * 1000000) + "μ"; }
|
||||
return v;
|
||||
}
|
||||
|
||||
let axis_x = {
|
||||
stroke: axes_color,
|
||||
grid: { width: 1 / devicePixelRatio, stroke: grid_color },
|
||||
ticks: { width: 1 / devicePixelRatio, stroke: grid_color }
|
||||
ticks: { width: 1 / devicePixelRatio, stroke: grid_color },
|
||||
values: formatDateTimes,
|
||||
space: 80,
|
||||
incrs: [1, 5, 10, 15, 30,
|
||||
60, 60 * 5, 60 * 10, 60 * 15, 60 * 30,
|
||||
3600, 3600 * 2, 3600 * 3, 3600 * 4, 3600 * 6, 3600 * 12,
|
||||
3600 * 24],
|
||||
};
|
||||
|
||||
let axes = [axis, axis];
|
||||
let series = [{ label: "x" }];
|
||||
let axis_y = {
|
||||
stroke: axes_color,
|
||||
grid: { width: 1 / devicePixelRatio, stroke: grid_color },
|
||||
ticks: { width: 1 / devicePixelRatio, stroke: grid_color },
|
||||
values: (self, ticks) => ticks.map(formatValue)
|
||||
};
|
||||
|
||||
let axes = [axis_x, axis_y];
|
||||
let series = [{ label: "time", value: (self, t) => formatDateTime(t) }];
|
||||
let data = [reply.data[reply.meta[0].name]];
|
||||
|
||||
// Treat every column as series
|
||||
@ -1254,9 +1302,10 @@ async function draw(idx, chart, url_params, query) {
|
||||
const opts = {
|
||||
width: chart.clientWidth,
|
||||
height: chart.clientHeight,
|
||||
scales: { x: { time: false } }, /// Because we want to split and format time on our own.
|
||||
axes,
|
||||
series,
|
||||
padding: [ null, null, null, (Math.round(max_value * 100) / 100).toString().length * 6 - 10 ],
|
||||
padding: [ null, null, null, 3 ],
|
||||
plugins: [ legendAsTooltipPlugin() ],
|
||||
cursor: {
|
||||
sync: {
|
||||
|
@ -522,6 +522,9 @@
|
||||
const current_url = new URL(window.location);
|
||||
const opened_locally = location.protocol == 'file:';
|
||||
|
||||
/// Run query instantly after page is loaded if the run parameter is present.
|
||||
const run_immediately = current_url.searchParams.has("run");
|
||||
|
||||
const server_address = current_url.searchParams.get('url');
|
||||
if (server_address) {
|
||||
document.getElementById('url').value = server_address;
|
||||
@ -599,6 +602,9 @@
|
||||
const title = "ClickHouse Query: " + query;
|
||||
|
||||
let history_url = window.location.pathname + '?user=' + encodeURIComponent(user);
|
||||
if (run_immediately) {
|
||||
history_url += "&run=1";
|
||||
}
|
||||
if (server_address != location.origin) {
|
||||
/// Save server's address in URL if it's not identical to the address of the play UI.
|
||||
history_url += '&url=' + encodeURIComponent(server_address);
|
||||
@ -1160,6 +1166,10 @@
|
||||
});
|
||||
}
|
||||
|
||||
if (run_immediately) {
|
||||
post();
|
||||
}
|
||||
|
||||
document.getElementById('toggle-light').onclick = function() {
|
||||
setColorTheme('light', true);
|
||||
}
|
||||
|
@ -39,6 +39,8 @@ disable = '''
|
||||
no-else-return,
|
||||
global-statement,
|
||||
f-string-without-interpolation,
|
||||
consider-using-with,
|
||||
use-maxsplit-arg,
|
||||
'''
|
||||
|
||||
[tool.pylint.SIMILARITIES]
|
||||
|
@ -67,6 +67,9 @@ struct UniqVariadicHash<false, true>
|
||||
{
|
||||
static UInt64 apply(size_t num_args, const IColumn ** columns, size_t row_num)
|
||||
{
|
||||
if (!num_args)
|
||||
return 0;
|
||||
|
||||
UInt64 hash;
|
||||
|
||||
const auto & tuple_columns = assert_cast<const ColumnTuple *>(columns[0])->getColumns();
|
||||
|
@ -24,7 +24,7 @@ void InterpolateNode::dumpTreeImpl(WriteBuffer & buffer, FormatState & format_st
|
||||
{
|
||||
buffer << std::string(indent, ' ') << "INTERPOLATE id: " << format_state.getNodeId(this);
|
||||
|
||||
buffer << '\n' << std::string(indent + 2, ' ') << "EXPRESSION\n";
|
||||
buffer << '\n' << std::string(indent + 2, ' ') << "EXPRESSION " << expression_name << " \n";
|
||||
getExpression()->dumpTreeImpl(buffer, format_state, indent + 4);
|
||||
|
||||
buffer << '\n' << std::string(indent + 2, ' ') << "INTERPOLATE_EXPRESSION\n";
|
||||
|
@ -50,6 +50,8 @@ public:
|
||||
return QueryTreeNodeType::INTERPOLATE;
|
||||
}
|
||||
|
||||
const std::string & getExpressionName() const { return expression_name; }
|
||||
|
||||
void dumpTreeImpl(WriteBuffer & buffer, FormatState & format_state, size_t indent) const override;
|
||||
|
||||
protected:
|
||||
|
@ -11,6 +11,7 @@
|
||||
#include <Analyzer/InDepthQueryTreeVisitor.h>
|
||||
#include <Analyzer/ConstantNode.h>
|
||||
#include <Analyzer/FunctionNode.h>
|
||||
#include <Analyzer/JoinNode.h>
|
||||
#include <Analyzer/Utils.h>
|
||||
|
||||
namespace DB
|
||||
@ -25,8 +26,15 @@ public:
|
||||
using Base = InDepthQueryTreeVisitorWithContext<ComparisonTupleEliminationPassVisitor>;
|
||||
using Base::Base;
|
||||
|
||||
static bool needChildVisit(QueryTreeNodePtr &, QueryTreeNodePtr & child)
|
||||
static bool needChildVisit(QueryTreeNodePtr & parent, QueryTreeNodePtr & child)
|
||||
{
|
||||
if (parent->getNodeType() == QueryTreeNodeType::JOIN)
|
||||
{
|
||||
/// In JOIN ON section comparison of tuples works a bit differently.
|
||||
/// For example we can join on tuple(NULL) = tuple(NULL), join algorithms consider only NULLs on the top level.
|
||||
if (parent->as<const JoinNode &>().getJoinExpression().get() == child.get())
|
||||
return false;
|
||||
}
|
||||
return child->getNodeType() != QueryTreeNodeType::TABLE_FUNCTION;
|
||||
}
|
||||
|
||||
|
@ -64,6 +64,8 @@
|
||||
#include <Analyzer/Resolve/TableExpressionsAliasVisitor.h>
|
||||
#include <Analyzer/Resolve/ReplaceColumnsVisitor.h>
|
||||
|
||||
#include <Planner/PlannerActionsVisitor.h>
|
||||
|
||||
#include <Core/Settings.h>
|
||||
|
||||
namespace ProfileEvents
|
||||
@ -4122,11 +4124,7 @@ void QueryAnalyzer::resolveInterpolateColumnsNodeList(QueryTreeNodePtr & interpo
|
||||
{
|
||||
auto & interpolate_node_typed = interpolate_node->as<InterpolateNode &>();
|
||||
|
||||
auto * column_to_interpolate = interpolate_node_typed.getExpression()->as<IdentifierNode>();
|
||||
if (!column_to_interpolate)
|
||||
throw Exception(ErrorCodes::LOGICAL_ERROR, "INTERPOLATE can work only for indentifiers, but {} is found",
|
||||
interpolate_node_typed.getExpression()->formatASTForErrorMessage());
|
||||
auto column_to_interpolate_name = column_to_interpolate->getIdentifier().getFullName();
|
||||
auto column_to_interpolate_name = interpolate_node_typed.getExpressionName();
|
||||
|
||||
resolveExpressionNode(interpolate_node_typed.getExpression(), scope, false /*allow_lambda_expression*/, false /*allow_table_expression*/);
|
||||
|
||||
@ -4135,14 +4133,11 @@ void QueryAnalyzer::resolveInterpolateColumnsNodeList(QueryTreeNodePtr & interpo
|
||||
auto & interpolation_to_resolve = interpolate_node_typed.getInterpolateExpression();
|
||||
IdentifierResolveScope interpolate_scope(interpolation_to_resolve, &scope /*parent_scope*/);
|
||||
|
||||
auto fake_column_node = std::make_shared<ColumnNode>(NameAndTypePair(column_to_interpolate_name, interpolate_node_typed.getExpression()->getResultType()), interpolate_node_typed.getExpression());
|
||||
auto fake_column_node = std::make_shared<ColumnNode>(NameAndTypePair(column_to_interpolate_name, interpolate_node_typed.getExpression()->getResultType()), interpolate_node);
|
||||
if (is_column_constant)
|
||||
interpolate_scope.expression_argument_name_to_node.emplace(column_to_interpolate_name, fake_column_node);
|
||||
|
||||
resolveExpressionNode(interpolation_to_resolve, interpolate_scope, false /*allow_lambda_expression*/, false /*allow_table_expression*/);
|
||||
|
||||
if (is_column_constant)
|
||||
interpolation_to_resolve = interpolation_to_resolve->cloneAndReplace(fake_column_node, interpolate_node_typed.getExpression());
|
||||
}
|
||||
}
|
||||
|
||||
@ -4546,7 +4541,15 @@ void QueryAnalyzer::resolveTableFunction(QueryTreeNodePtr & table_function_node,
|
||||
resolveExpressionNode(nodes[1], scope, /* allow_lambda_expression */false, /* allow_table_function */false);
|
||||
if (auto * constant = nodes[1]->as<ConstantNode>())
|
||||
{
|
||||
view_params[identifier_node->getIdentifier().getFullName()] = convertFieldToString(constant->getValue());
|
||||
/// Serialize the constant value using datatype specific
|
||||
/// interfaces to match the deserialization in ReplaceQueryParametersVistor.
|
||||
WriteBufferFromOwnString buf;
|
||||
const auto & value = constant->getValue();
|
||||
auto real_type = constant->getResultType();
|
||||
auto temporary_column = real_type->createColumn();
|
||||
temporary_column->insert(value);
|
||||
real_type->getDefaultSerialization()->serializeTextEscaped(*temporary_column, 0, buf, {});
|
||||
view_params[identifier_node->getIdentifier().getFullName()] = buf.str();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -43,6 +43,12 @@ size_t getCompoundTypeDepth(const IDataType & type)
|
||||
const auto & tuple_elements = assert_cast<const DataTypeTuple &>(*current_type).getElements();
|
||||
if (!tuple_elements.empty())
|
||||
current_type = tuple_elements.at(0).get();
|
||||
else
|
||||
{
|
||||
/// Special case: tuple with no element - tuple(). In this case, what's the compound type depth?
|
||||
/// I'm not certain about the theoretical answer, but from experiment, 1 is the most reasonable choice.
|
||||
return 1;
|
||||
}
|
||||
|
||||
++result;
|
||||
}
|
||||
|
@ -41,6 +41,7 @@ public:
|
||||
bool allow_s3_native_copy = true;
|
||||
bool allow_azure_native_copy = true;
|
||||
bool use_same_s3_credentials_for_base_backup = false;
|
||||
bool use_same_password_for_base_backup = false;
|
||||
bool azure_attempt_to_create_container = true;
|
||||
ReadSettings read_settings;
|
||||
WriteSettings write_settings;
|
||||
|
@ -92,7 +92,8 @@ BackupImpl::BackupImpl(
|
||||
std::shared_ptr<IBackupReader> reader_,
|
||||
const ContextPtr & context_,
|
||||
bool is_internal_backup_,
|
||||
bool use_same_s3_credentials_for_base_backup_)
|
||||
bool use_same_s3_credentials_for_base_backup_,
|
||||
bool use_same_password_for_base_backup_)
|
||||
: backup_info(backup_info_)
|
||||
, backup_name_for_logging(backup_info.toStringForLogging())
|
||||
, use_archive(!archive_params_.archive_name.empty())
|
||||
@ -104,6 +105,7 @@ BackupImpl::BackupImpl(
|
||||
, version(INITIAL_BACKUP_VERSION)
|
||||
, base_backup_info(base_backup_info_)
|
||||
, use_same_s3_credentials_for_base_backup(use_same_s3_credentials_for_base_backup_)
|
||||
, use_same_password_for_base_backup(use_same_password_for_base_backup_)
|
||||
, log(getLogger("BackupImpl"))
|
||||
{
|
||||
open();
|
||||
@ -120,7 +122,8 @@ BackupImpl::BackupImpl(
|
||||
const std::shared_ptr<IBackupCoordination> & coordination_,
|
||||
const std::optional<UUID> & backup_uuid_,
|
||||
bool deduplicate_files_,
|
||||
bool use_same_s3_credentials_for_base_backup_)
|
||||
bool use_same_s3_credentials_for_base_backup_,
|
||||
bool use_same_password_for_base_backup_)
|
||||
: backup_info(backup_info_)
|
||||
, backup_name_for_logging(backup_info.toStringForLogging())
|
||||
, use_archive(!archive_params_.archive_name.empty())
|
||||
@ -135,6 +138,7 @@ BackupImpl::BackupImpl(
|
||||
, base_backup_info(base_backup_info_)
|
||||
, deduplicate_files(deduplicate_files_)
|
||||
, use_same_s3_credentials_for_base_backup(use_same_s3_credentials_for_base_backup_)
|
||||
, use_same_password_for_base_backup(use_same_password_for_base_backup_)
|
||||
, log(getLogger("BackupImpl"))
|
||||
{
|
||||
open();
|
||||
@ -258,6 +262,11 @@ std::shared_ptr<const IBackup> BackupImpl::getBaseBackupUnlocked() const
|
||||
params.is_internal_backup = is_internal_backup;
|
||||
/// use_same_s3_credentials_for_base_backup should be inherited for base backups
|
||||
params.use_same_s3_credentials_for_base_backup = use_same_s3_credentials_for_base_backup;
|
||||
/// use_same_password_for_base_backup should be inherited for base backups
|
||||
params.use_same_password_for_base_backup = use_same_password_for_base_backup;
|
||||
|
||||
if (params.use_same_password_for_base_backup)
|
||||
params.password = archive_params.password;
|
||||
|
||||
base_backup = BackupFactory::instance().createBackup(params);
|
||||
|
||||
|
@ -41,7 +41,8 @@ public:
|
||||
std::shared_ptr<IBackupReader> reader_,
|
||||
const ContextPtr & context_,
|
||||
bool is_internal_backup_,
|
||||
bool use_same_s3_credentials_for_base_backup_);
|
||||
bool use_same_s3_credentials_for_base_backup_,
|
||||
bool use_same_password_for_base_backup_);
|
||||
|
||||
BackupImpl(
|
||||
const BackupInfo & backup_info_,
|
||||
@ -53,7 +54,8 @@ public:
|
||||
const std::shared_ptr<IBackupCoordination> & coordination_,
|
||||
const std::optional<UUID> & backup_uuid_,
|
||||
bool deduplicate_files_,
|
||||
bool use_same_s3_credentials_for_base_backup_);
|
||||
bool use_same_s3_credentials_for_base_backup_,
|
||||
bool use_same_password_for_base_backup_);
|
||||
|
||||
~BackupImpl() override;
|
||||
|
||||
@ -153,6 +155,7 @@ private:
|
||||
bool writing_finalized = false;
|
||||
bool deduplicate_files = true;
|
||||
bool use_same_s3_credentials_for_base_backup = false;
|
||||
bool use_same_password_for_base_backup = false;
|
||||
const LoggerPtr log;
|
||||
};
|
||||
|
||||
|
@ -29,6 +29,7 @@ namespace ErrorCodes
|
||||
M(Bool, allow_s3_native_copy) \
|
||||
M(Bool, allow_azure_native_copy) \
|
||||
M(Bool, use_same_s3_credentials_for_base_backup) \
|
||||
M(Bool, use_same_password_for_base_backup) \
|
||||
M(Bool, azure_attempt_to_create_container) \
|
||||
M(Bool, read_from_filesystem_cache) \
|
||||
M(UInt64, shard_num) \
|
||||
|
@ -50,6 +50,9 @@ struct BackupSettings
|
||||
/// Whether base backup to S3 should inherit credentials from the BACKUP query.
|
||||
bool use_same_s3_credentials_for_base_backup = false;
|
||||
|
||||
/// Whether base backup archive should be unlocked using the same password as the incremental archive
|
||||
bool use_same_password_for_base_backup = false;
|
||||
|
||||
/// Whether a new Azure container should be created if it does not exist (requires permissions at storage account level)
|
||||
bool azure_attempt_to_create_container = true;
|
||||
|
||||
|
@ -602,6 +602,7 @@ void BackupsWorker::doBackup(
|
||||
backup_create_params.allow_s3_native_copy = backup_settings.allow_s3_native_copy;
|
||||
backup_create_params.allow_azure_native_copy = backup_settings.allow_azure_native_copy;
|
||||
backup_create_params.use_same_s3_credentials_for_base_backup = backup_settings.use_same_s3_credentials_for_base_backup;
|
||||
backup_create_params.use_same_password_for_base_backup = backup_settings.use_same_password_for_base_backup;
|
||||
backup_create_params.azure_attempt_to_create_container = backup_settings.azure_attempt_to_create_container;
|
||||
backup_create_params.read_settings = getReadSettingsForBackup(context, backup_settings);
|
||||
backup_create_params.write_settings = getWriteSettingsForBackup(context);
|
||||
@ -924,6 +925,7 @@ void BackupsWorker::doRestore(
|
||||
backup_open_params.password = restore_settings.password;
|
||||
backup_open_params.allow_s3_native_copy = restore_settings.allow_s3_native_copy;
|
||||
backup_open_params.use_same_s3_credentials_for_base_backup = restore_settings.use_same_s3_credentials_for_base_backup;
|
||||
backup_open_params.use_same_password_for_base_backup = restore_settings.use_same_password_for_base_backup;
|
||||
backup_open_params.read_settings = getReadSettingsForRestore(context);
|
||||
backup_open_params.write_settings = getWriteSettingsForRestore(context);
|
||||
backup_open_params.is_internal_backup = restore_settings.internal;
|
||||
|
@ -323,7 +323,7 @@ bool RestoreCoordinationRemote::hasConcurrentRestores(const std::atomic<size_t>
|
||||
return false;
|
||||
|
||||
bool result = false;
|
||||
std::string path = zookeeper_path +"/stage";
|
||||
std::string path = zookeeper_path + "/stage";
|
||||
|
||||
auto holder = with_retries.createRetriesControlHolder("createRootNodes");
|
||||
holder.retries_ctl.retryLoop(
|
||||
|
@ -61,8 +61,6 @@ private:
|
||||
void createRootNodes();
|
||||
void removeAllNodes();
|
||||
|
||||
class ReplicatedDatabasesMetadataSync;
|
||||
|
||||
/// get_zookeeper will provide a zookeeper client without any fault injection
|
||||
const zkutil::GetZooKeeper get_zookeeper;
|
||||
const String root_zookeeper_path;
|
||||
|
@ -164,6 +164,7 @@ namespace
|
||||
M(RestoreUDFCreationMode, create_function) \
|
||||
M(Bool, allow_s3_native_copy) \
|
||||
M(Bool, use_same_s3_credentials_for_base_backup) \
|
||||
M(Bool, use_same_password_for_base_backup) \
|
||||
M(Bool, restore_broken_parts_as_detached) \
|
||||
M(Bool, internal) \
|
||||
M(String, host_id) \
|
||||
|
@ -113,6 +113,9 @@ struct RestoreSettings
|
||||
/// Whether base backup from S3 should inherit credentials from the RESTORE query.
|
||||
bool use_same_s3_credentials_for_base_backup = false;
|
||||
|
||||
/// Whether base backup archive should be unlocked using the same password as the incremental archive
|
||||
bool use_same_password_for_base_backup = false;
|
||||
|
||||
/// If it's true RESTORE won't stop on broken parts while restoring, instead they will be restored as detached parts
|
||||
/// to the `detached` folder with names starting with `broken-from-backup'.
|
||||
bool restore_broken_parts_as_detached = false;
|
||||
|
@ -222,10 +222,19 @@ void RestorerFromBackup::setStage(const String & new_stage, const String & messa
|
||||
if (restore_coordination)
|
||||
{
|
||||
restore_coordination->setStage(new_stage, message);
|
||||
if (new_stage == Stage::FINDING_TABLES_IN_BACKUP)
|
||||
restore_coordination->waitForStage(new_stage, on_cluster_first_sync_timeout);
|
||||
else
|
||||
restore_coordination->waitForStage(new_stage);
|
||||
|
||||
/// The initiator of a RESTORE ON CLUSTER query waits for other hosts to complete their work (see waitForStage(Stage::COMPLETED) in BackupsWorker::doRestore),
|
||||
/// but other hosts shouldn't wait for each others' completion. (That's simply unnecessary and also
|
||||
/// the initiator may start cleaning up (e.g. removing restore-coordination ZooKeeper nodes) once all other hosts are in Stage::COMPLETED.)
|
||||
bool need_wait = (new_stage != Stage::COMPLETED);
|
||||
|
||||
if (need_wait)
|
||||
{
|
||||
if (new_stage == Stage::FINDING_TABLES_IN_BACKUP)
|
||||
restore_coordination->waitForStage(new_stage, on_cluster_first_sync_timeout);
|
||||
else
|
||||
restore_coordination->waitForStage(new_stage);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -141,7 +141,8 @@ void registerBackupEngineAzureBlobStorage(BackupFactory & factory)
|
||||
reader,
|
||||
params.context,
|
||||
params.is_internal_backup,
|
||||
/* use_same_s3_credentials_for_base_backup*/ false);
|
||||
/* use_same_s3_credentials_for_base_backup*/ false,
|
||||
params.use_same_password_for_base_backup);
|
||||
}
|
||||
else
|
||||
{
|
||||
@ -164,7 +165,8 @@ void registerBackupEngineAzureBlobStorage(BackupFactory & factory)
|
||||
params.backup_coordination,
|
||||
params.backup_uuid,
|
||||
params.deduplicate_files,
|
||||
/* use_same_s3_credentials_for_base_backup */ false);
|
||||
/* use_same_s3_credentials_for_base_backup */ false,
|
||||
params.use_same_password_for_base_backup);
|
||||
}
|
||||
#else
|
||||
throw Exception(ErrorCodes::SUPPORT_IS_DISABLED, "AzureBlobStorage support is disabled");
|
||||
|
@ -120,7 +120,8 @@ void registerBackupEngineS3(BackupFactory & factory)
|
||||
reader,
|
||||
params.context,
|
||||
params.is_internal_backup,
|
||||
params.use_same_s3_credentials_for_base_backup);
|
||||
params.use_same_s3_credentials_for_base_backup,
|
||||
params.use_same_password_for_base_backup);
|
||||
}
|
||||
else
|
||||
{
|
||||
@ -144,7 +145,8 @@ void registerBackupEngineS3(BackupFactory & factory)
|
||||
params.backup_coordination,
|
||||
params.backup_uuid,
|
||||
params.deduplicate_files,
|
||||
params.use_same_s3_credentials_for_base_backup);
|
||||
params.use_same_s3_credentials_for_base_backup,
|
||||
params.use_same_password_for_base_backup);
|
||||
}
|
||||
#else
|
||||
throw Exception(ErrorCodes::SUPPORT_IS_DISABLED, "S3 support is disabled");
|
||||
|
@ -178,7 +178,8 @@ void registerBackupEnginesFileAndDisk(BackupFactory & factory)
|
||||
reader,
|
||||
params.context,
|
||||
params.is_internal_backup,
|
||||
params.use_same_s3_credentials_for_base_backup);
|
||||
params.use_same_s3_credentials_for_base_backup,
|
||||
params.use_same_password_for_base_backup);
|
||||
}
|
||||
else
|
||||
{
|
||||
@ -197,7 +198,8 @@ void registerBackupEnginesFileAndDisk(BackupFactory & factory)
|
||||
params.backup_coordination,
|
||||
params.backup_uuid,
|
||||
params.deduplicate_files,
|
||||
params.use_same_s3_credentials_for_base_backup);
|
||||
params.use_same_s3_credentials_for_base_backup,
|
||||
params.use_same_password_for_base_backup);
|
||||
}
|
||||
};
|
||||
|
||||
|
@ -33,12 +33,12 @@ ConnectionEstablisher::ConnectionEstablisher(
|
||||
{
|
||||
}
|
||||
|
||||
void ConnectionEstablisher::run(ConnectionEstablisher::TryResult & result, std::string & fail_message)
|
||||
void ConnectionEstablisher::run(ConnectionEstablisher::TryResult & result, std::string & fail_message, bool force_connected)
|
||||
{
|
||||
try
|
||||
{
|
||||
ProfileEvents::increment(ProfileEvents::DistributedConnectionTries);
|
||||
result.entry = pool->get(*timeouts, settings, /* force_connected = */ false);
|
||||
result.entry = pool->get(*timeouts, settings, force_connected);
|
||||
AsyncCallbackSetter async_setter(&*result.entry, std::move(async_callback));
|
||||
|
||||
UInt64 server_revision = 0;
|
||||
|
@ -24,7 +24,13 @@ public:
|
||||
const QualifiedTableName * table_to_check = nullptr);
|
||||
|
||||
/// Establish connection and save it in result, write possible exception message in fail_message.
|
||||
void run(TryResult & result, std::string & fail_message);
|
||||
/// The connection is returned from connection pool and it can be stale. Use force_connected flag to ensure that connection is working one.
|
||||
/// NOTE: force_connected is false by default due to the following consideration ...
|
||||
/// When true, it implies sending a Ping packet to another peer and, if it fails - reestablishing the connection.
|
||||
/// Ping-Pong round trip can be unnecessary in case of connection is still alive.
|
||||
/// So, the optimistic approach is used by default. In this case, stale connections can be handled by retrying,
|
||||
/// - see ConnectionPoolWithFailover, as example
|
||||
void run(TryResult & result, std::string & fail_message, bool force_connected = false);
|
||||
|
||||
/// Set async callback that will be called when reading from socket blocks.
|
||||
void setAsyncCallback(AsyncCallback async_callback_) { async_callback = std::move(async_callback_); }
|
||||
|
@ -330,7 +330,38 @@ ColumnPtr ColumnAggregateFunction::filter(const Filter & filter, ssize_t result_
|
||||
|
||||
void ColumnAggregateFunction::expand(const Filter & mask, bool inverted)
|
||||
{
|
||||
expandDataByMask<char *>(data, mask, inverted);
|
||||
ensureOwnership();
|
||||
Arena & arena = createOrGetArena();
|
||||
|
||||
if (mask.size() < data.size())
|
||||
throw Exception(ErrorCodes::LOGICAL_ERROR, "Mask size should be no less than data size.");
|
||||
|
||||
ssize_t from = data.size() - 1;
|
||||
ssize_t index = mask.size() - 1;
|
||||
data.resize(mask.size());
|
||||
while (index >= 0)
|
||||
{
|
||||
if (!!mask[index] ^ inverted)
|
||||
{
|
||||
if (from < 0)
|
||||
throw Exception(ErrorCodes::LOGICAL_ERROR, "Too many bytes in mask");
|
||||
|
||||
/// Copy only if it makes sense.
|
||||
if (index != from)
|
||||
data[index] = data[from];
|
||||
--from;
|
||||
}
|
||||
else
|
||||
{
|
||||
data[index] = arena.alignedAlloc(func->sizeOfData(), func->alignOfData());
|
||||
func->create(data[index]);
|
||||
}
|
||||
|
||||
--index;
|
||||
}
|
||||
|
||||
if (from != -1)
|
||||
throw Exception(ErrorCodes::LOGICAL_ERROR, "Not enough bytes in mask");
|
||||
}
|
||||
|
||||
ColumnPtr ColumnAggregateFunction::permute(const Permutation & perm, size_t limit) const
|
||||
|
@ -218,20 +218,27 @@ AsyncLoader::~AsyncLoader()
|
||||
{
|
||||
// All `LoadTask` objects should be destructed before AsyncLoader destruction because they hold a reference.
|
||||
// To make sure we check for all pending jobs to be finished.
|
||||
std::unique_lock lock{mutex};
|
||||
if (scheduled_jobs.empty() && finished_jobs.empty())
|
||||
return;
|
||||
{
|
||||
std::unique_lock lock{mutex};
|
||||
if (!scheduled_jobs.empty() || !finished_jobs.empty())
|
||||
{
|
||||
std::vector<String> scheduled;
|
||||
std::vector<String> finished;
|
||||
scheduled.reserve(scheduled_jobs.size());
|
||||
finished.reserve(finished_jobs.size());
|
||||
for (const auto & [job, _] : scheduled_jobs)
|
||||
scheduled.push_back(job->name);
|
||||
for (const auto & job : finished_jobs)
|
||||
finished.push_back(job->name);
|
||||
LOG_ERROR(log, "Bug. Destruction with pending ({}) and finished ({}) load jobs.", fmt::join(scheduled, ", "), fmt::join(finished, ", "));
|
||||
abort();
|
||||
}
|
||||
}
|
||||
|
||||
std::vector<String> scheduled;
|
||||
std::vector<String> finished;
|
||||
scheduled.reserve(scheduled_jobs.size());
|
||||
finished.reserve(finished_jobs.size());
|
||||
for (const auto & [job, _] : scheduled_jobs)
|
||||
scheduled.push_back(job->name);
|
||||
for (const auto & job : finished_jobs)
|
||||
finished.push_back(job->name);
|
||||
LOG_ERROR(log, "Bug. Destruction with pending ({}) and finished ({}) load jobs.", fmt::join(scheduled, ", "), fmt::join(finished, ", "));
|
||||
abort();
|
||||
// When all jobs are done we could still have finalizing workers.
|
||||
// These workers could call updateCurrentPriorityAndSpawn() that scans all pools.
|
||||
// We need to stop all of them before destructing any of them.
|
||||
stop();
|
||||
}
|
||||
|
||||
void AsyncLoader::start()
|
||||
|
@ -2,6 +2,7 @@ set (SRCS
|
||||
AbstractConfigurationComparison.cpp
|
||||
ConfigProcessor.cpp
|
||||
getClientConfigPath.cpp
|
||||
getLocalConfigPath.cpp
|
||||
ConfigReloader.cpp
|
||||
YAMLParser.cpp
|
||||
ConfigHelper.cpp
|
||||
|
@ -138,9 +138,14 @@ static Node * getRootNode(Document * document)
|
||||
return XMLUtils::getRootNode(document);
|
||||
}
|
||||
|
||||
static size_t firstNonWhitespacePos(const std::string & s)
|
||||
{
|
||||
return s.find_first_not_of(" \t\n\r");
|
||||
}
|
||||
|
||||
static bool allWhitespace(const std::string & s)
|
||||
{
|
||||
return s.find_first_not_of(" \t\n\r") == std::string::npos;
|
||||
return firstNonWhitespacePos(s) == std::string::npos;
|
||||
}
|
||||
|
||||
static void deleteAttributesRecursive(Node * root)
|
||||
@ -622,6 +627,49 @@ ConfigProcessor::Files ConfigProcessor::getConfigMergeFiles(const std::string &
|
||||
return files;
|
||||
}
|
||||
|
||||
XMLDocumentPtr ConfigProcessor::parseConfig(const std::string & config_path)
|
||||
{
|
||||
fs::path p(config_path);
|
||||
std::string extension = p.extension();
|
||||
boost::algorithm::to_lower(extension);
|
||||
|
||||
if (extension == ".xml")
|
||||
return dom_parser.parse(config_path);
|
||||
else if (extension == ".yaml" || extension == ".yml")
|
||||
return YAMLParser::parse(config_path);
|
||||
else
|
||||
{
|
||||
/// Suppose non regular file parsed as XML, such as pipe: /dev/fd/X (regardless it has .xml extension or not)
|
||||
if (!fs::is_regular_file(config_path))
|
||||
return dom_parser.parse(config_path);
|
||||
|
||||
/// If the regular file begins with < it might be XML, otherwise it might be YAML.
|
||||
bool maybe_xml = false;
|
||||
{
|
||||
std::ifstream file(config_path);
|
||||
if (!file.is_open())
|
||||
throw Exception(ErrorCodes::CANNOT_LOAD_CONFIG, "Unknown format of '{}' config", config_path);
|
||||
|
||||
std::string line;
|
||||
while (std::getline(file, line))
|
||||
{
|
||||
const size_t pos = firstNonWhitespacePos(line);
|
||||
|
||||
if (pos < line.size() && '<' == line[pos])
|
||||
{
|
||||
maybe_xml = true;
|
||||
break;
|
||||
}
|
||||
else if (pos != std::string::npos)
|
||||
break;
|
||||
}
|
||||
}
|
||||
if (maybe_xml)
|
||||
return dom_parser.parse(config_path);
|
||||
return YAMLParser::parse(config_path);
|
||||
}
|
||||
}
|
||||
|
||||
XMLDocumentPtr ConfigProcessor::processConfig(
|
||||
bool * has_zk_includes,
|
||||
zkutil::ZooKeeperNodeCache * zk_node_cache,
|
||||
@ -633,23 +681,7 @@ XMLDocumentPtr ConfigProcessor::processConfig(
|
||||
|
||||
if (fs::exists(path))
|
||||
{
|
||||
fs::path p(path);
|
||||
|
||||
std::string extension = p.extension();
|
||||
boost::algorithm::to_lower(extension);
|
||||
|
||||
if (extension == ".yaml" || extension == ".yml")
|
||||
{
|
||||
config = YAMLParser::parse(path);
|
||||
}
|
||||
else if (extension == ".xml" || extension == ".conf" || extension.empty())
|
||||
{
|
||||
config = dom_parser.parse(path);
|
||||
}
|
||||
else
|
||||
{
|
||||
throw Exception(ErrorCodes::CANNOT_LOAD_CONFIG, "Unknown format of '{}' config", path);
|
||||
}
|
||||
config = parseConfig(path);
|
||||
}
|
||||
else
|
||||
{
|
||||
@ -673,20 +705,7 @@ XMLDocumentPtr ConfigProcessor::processConfig(
|
||||
LOG_DEBUG(log, "Merging configuration file '{}'.", merge_file);
|
||||
|
||||
XMLDocumentPtr with;
|
||||
|
||||
fs::path p(merge_file);
|
||||
std::string extension = p.extension();
|
||||
boost::algorithm::to_lower(extension);
|
||||
|
||||
if (extension == ".yaml" || extension == ".yml")
|
||||
{
|
||||
with = YAMLParser::parse(merge_file);
|
||||
}
|
||||
else
|
||||
{
|
||||
with = dom_parser.parse(merge_file);
|
||||
}
|
||||
|
||||
with = parseConfig(merge_file);
|
||||
if (!merge(config, with))
|
||||
{
|
||||
LOG_DEBUG(log, "Merging bypassed - configuration file '{}' doesn't belong to configuration '{}' - merging root node name '{}' doesn't match '{}'",
|
||||
@ -730,19 +749,7 @@ XMLDocumentPtr ConfigProcessor::processConfig(
|
||||
{
|
||||
LOG_DEBUG(log, "Including configuration file '{}'.", include_from_path);
|
||||
|
||||
fs::path p(include_from_path);
|
||||
std::string extension = p.extension();
|
||||
boost::algorithm::to_lower(extension);
|
||||
|
||||
if (extension == ".yaml" || extension == ".yml")
|
||||
{
|
||||
include_from = YAMLParser::parse(include_from_path);
|
||||
}
|
||||
else
|
||||
{
|
||||
include_from = dom_parser.parse(include_from_path);
|
||||
}
|
||||
|
||||
include_from = parseConfig(include_from_path);
|
||||
contributing_files.push_back(include_from_path);
|
||||
}
|
||||
|
||||
|
@ -65,6 +65,8 @@ public:
|
||||
zkutil::ZooKeeperNodeCache * zk_node_cache = nullptr,
|
||||
const zkutil::EventPtr & zk_changed_event = nullptr);
|
||||
|
||||
XMLDocumentPtr parseConfig(const std::string & config_path);
|
||||
|
||||
/// These configurations will be used if there is no configuration file.
|
||||
static void registerEmbeddedConfig(std::string name, std::string_view content);
|
||||
|
||||
|
@ -12,7 +12,6 @@ namespace DB
|
||||
std::optional<std::string> getClientConfigPath(const std::string & home_path)
|
||||
{
|
||||
std::string config_path;
|
||||
bool found = false;
|
||||
|
||||
std::vector<std::string> names;
|
||||
names.emplace_back("./clickhouse-client");
|
||||
@ -28,18 +27,10 @@ std::optional<std::string> getClientConfigPath(const std::string & home_path)
|
||||
|
||||
std::error_code ec;
|
||||
if (fs::exists(config_path, ec))
|
||||
{
|
||||
found = true;
|
||||
break;
|
||||
}
|
||||
return config_path;
|
||||
}
|
||||
if (found)
|
||||
break;
|
||||
}
|
||||
|
||||
if (found)
|
||||
return config_path;
|
||||
|
||||
return std::nullopt;
|
||||
}
|
||||
|
||||
|
37
src/Common/Config/getLocalConfigPath.cpp
Normal file
37
src/Common/Config/getLocalConfigPath.cpp
Normal file
@ -0,0 +1,37 @@
|
||||
#include <Common/Config/getLocalConfigPath.h>
|
||||
|
||||
#include <filesystem>
|
||||
#include <vector>
|
||||
|
||||
|
||||
namespace fs = std::filesystem;
|
||||
|
||||
namespace DB
|
||||
{
|
||||
|
||||
std::optional<std::string> getLocalConfigPath(const std::string & home_path)
|
||||
{
|
||||
std::string config_path;
|
||||
|
||||
std::vector<std::string> names;
|
||||
names.emplace_back("./clickhouse-local");
|
||||
if (!home_path.empty())
|
||||
names.emplace_back(home_path + "/.clickhouse-local/config");
|
||||
names.emplace_back("/etc/clickhouse-local/config");
|
||||
|
||||
for (const auto & name : names)
|
||||
{
|
||||
for (const auto & extension : {".xml", ".yaml", ".yml"})
|
||||
{
|
||||
config_path = name + extension;
|
||||
|
||||
std::error_code ec;
|
||||
if (fs::exists(config_path, ec))
|
||||
return config_path;
|
||||
}
|
||||
}
|
||||
|
||||
return std::nullopt;
|
||||
}
|
||||
|
||||
}
|
12
src/Common/Config/getLocalConfigPath.h
Normal file
12
src/Common/Config/getLocalConfigPath.h
Normal file
@ -0,0 +1,12 @@
|
||||
#pragma once
|
||||
|
||||
#include <string>
|
||||
#include <optional>
|
||||
|
||||
namespace DB
|
||||
{
|
||||
|
||||
/// Return path to existing configuration file.
|
||||
std::optional<std::string> getLocalConfigPath(const std::string & home_path);
|
||||
|
||||
}
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user