Merge branch 'master' into add-assert-into-timer-descriptor

This commit is contained in:
Alexey Milovidov 2024-08-04 00:29:22 +02:00
commit 3bea8b4e25
127 changed files with 1497 additions and 1183 deletions

View File

@ -0,0 +1,21 @@
name: CheckWorkflowResults
description: Check overall workflow status and post error to slack if any
inputs:
needs:
description: github needs context as a json string
required: true
type: string
runs:
using: "composite"
steps:
- name: Check Workflow
shell: bash
run: |
export WORKFLOW_RESULT_FILE="/tmp/workflow_results.json"
cat > "$WORKFLOW_RESULT_FILE" << 'EOF'
${{ inputs.needs }}
EOF
python3 ./tests/ci/ci_buddy.py --check-wf-status

View File

@ -1,168 +0,0 @@
name: Release
description: Makes patch releases and creates new release branch
inputs:
ref:
description: 'Git reference (branch or commit sha) from which to create the release'
required: true
type: string
type:
description: 'The type of release: "new" for a new release or "patch" for a patch release'
required: true
type: choice
options:
- patch
- new
dry-run:
description: 'Dry run'
required: false
default: true
type: boolean
token:
required: true
type: string
runs:
using: "composite"
steps:
- name: Prepare Release Info
shell: bash
run: |
python3 ./tests/ci/create_release.py --prepare-release-info \
--ref ${{ inputs.ref }} --release-type ${{ inputs.type }} \
${{ inputs.dry-run && '--dry-run' || '' }}
echo "::group::Release Info"
python3 -m json.tool /tmp/release_info.json
echo "::endgroup::"
release_tag=$(jq -r '.release_tag' /tmp/release_info.json)
commit_sha=$(jq -r '.commit_sha' /tmp/release_info.json)
echo "Release Tag: $release_tag"
echo "RELEASE_TAG=$release_tag" >> "$GITHUB_ENV"
echo "COMMIT_SHA=$commit_sha" >> "$GITHUB_ENV"
- name: Download All Release Artifacts
if: ${{ inputs.type == 'patch' }}
shell: bash
run: |
python3 ./tests/ci/create_release.py --download-packages ${{ inputs.dry-run && '--dry-run' || '' }}
- name: Push Git Tag for the Release
shell: bash
run: |
python3 ./tests/ci/create_release.py --push-release-tag ${{ inputs.dry-run && '--dry-run' || '' }}
- name: Push New Release Branch
if: ${{ inputs.type == 'new' }}
shell: bash
run: |
python3 ./tests/ci/create_release.py --push-new-release-branch ${{ inputs.dry-run && '--dry-run' || '' }}
- name: Bump CH Version and Update Contributors' List
shell: bash
run: |
python3 ./tests/ci/create_release.py --create-bump-version-pr ${{ inputs.dry-run && '--dry-run' || '' }}
- name: Bump Docker versions, Changelog, Security
if: ${{ inputs.type == 'patch' }}
shell: bash
run: |
git checkout master
python3 ./tests/ci/create_release.py --set-progress-started --progress "update changelog, docker version, security"
echo "List versions"
./utils/list-versions/list-versions.sh > ./utils/list-versions/version_date.tsv
echo "Update docker version"
./utils/list-versions/update-docker-version.sh
echo "Generate ChangeLog"
export CI=1
docker run -u "${UID}:${GID}" -e PYTHONUNBUFFERED=1 -e CI=1 --network=host \
--volume=".:/ClickHouse" clickhouse/style-test \
/ClickHouse/tests/ci/changelog.py -v --debug-helpers \
--gh-user-or-token=${{ inputs.token }} --jobs=5 \
--output="/ClickHouse/docs/changelogs/${{ env.RELEASE_TAG }}.md" ${{ env.RELEASE_TAG }}
git add ./docs/changelogs/${{ env.RELEASE_TAG }}.md
echo "Generate Security"
python3 ./utils/security-generator/generate_security.py > SECURITY.md
git diff HEAD
- name: Create ChangeLog PR
if: ${{ inputs.type == 'patch' && ! inputs.dry-run }}
uses: peter-evans/create-pull-request@v6
with:
author: "robot-clickhouse <robot-clickhouse@users.noreply.github.com>"
token: ${{ inputs.token }}
committer: "robot-clickhouse <robot-clickhouse@users.noreply.github.com>"
commit-message: Update version_date.tsv and changelogs after ${{ env.RELEASE_TAG }}
branch: auto/${{ env.RELEASE_TAG }}
assignees: ${{ github.event.sender.login }} # assign the PR to the tag pusher
delete-branch: true
title: Update version_date.tsv and changelog after ${{ env.RELEASE_TAG }}
labels: do not test
body: |
Update version_date.tsv and changelogs after ${{ env.RELEASE_TAG }}
### Changelog category (leave one):
- Not for changelog (changelog entry is not required)
- name: Complete previous steps and Restore git state
if: ${{ inputs.type == 'patch' }}
shell: bash
run: |
python3 ./tests/ci/create_release.py --set-progress-completed
git reset --hard HEAD
git checkout "$GITHUB_REF_NAME"
- name: Create GH Release
shell: bash
if: ${{ inputs.type == 'patch' }}
run: |
python3 ./tests/ci/create_release.py --create-gh-release ${{ inputs.dry-run && '--dry-run' || '' }}
- name: Export TGZ Packages
if: ${{ inputs.type == 'patch' }}
shell: bash
run: |
python3 ./tests/ci/artifactory.py --export-tgz ${{ inputs.dry-run && '--dry-run' || '' }}
- name: Test TGZ Packages
if: ${{ inputs.type == 'patch' }}
shell: bash
run: |
python3 ./tests/ci/artifactory.py --test-tgz ${{ inputs.dry-run && '--dry-run' || '' }}
- name: Export RPM Packages
if: ${{ inputs.type == 'patch' }}
shell: bash
run: |
python3 ./tests/ci/artifactory.py --export-rpm ${{ inputs.dry-run && '--dry-run' || '' }}
- name: Test RPM Packages
if: ${{ inputs.type == 'patch' }}
shell: bash
run: |
python3 ./tests/ci/artifactory.py --test-rpm ${{ inputs.dry-run && '--dry-run' || '' }}
- name: Export Debian Packages
if: ${{ inputs.type == 'patch' }}
shell: bash
run: |
python3 ./tests/ci/artifactory.py --export-debian ${{ inputs.dry-run && '--dry-run' || '' }}
- name: Test Debian Packages
if: ${{ inputs.type == 'patch' }}
shell: bash
run: |
python3 ./tests/ci/artifactory.py --test-debian ${{ inputs.dry-run && '--dry-run' || '' }}
- name: Docker clickhouse/clickhouse-server building
if: ${{ inputs.type == 'patch' }}
shell: bash
run: |
cd "./tests/ci"
python3 ./create_release.py --set-progress-started --progress "docker server release"
export CHECK_NAME="Docker server image"
python3 docker_server.py --release-type auto --version ${{ env.RELEASE_TAG }} --check-name "$CHECK_NAME" --sha ${{ env.COMMIT_SHA }} ${{ ! inputs.dry-run && '--push' || '' }}
python3 ./create_release.py --set-progress-completed
- name: Docker clickhouse/clickhouse-keeper building
if: ${{ inputs.type == 'patch' }}
shell: bash
run: |
cd "./tests/ci"
python3 ./create_release.py --set-progress-started --progress "docker keeper release"
export CHECK_NAME="Docker keeper image"
python3 docker_server.py --release-type auto --version ${{ env.RELEASE_TAG }} --check-name "$CHECK_NAME" --sha ${{ env.COMMIT_SHA }} ${{ ! inputs.dry-run && '--push' || '' }}
python3 ./create_release.py --set-progress-completed
- name: Set current Release progress to Completed with OK
shell: bash
run: |
python3 ./tests/ci/create_release.py --set-progress-started --progress "completed"
python3 ./tests/ci/create_release.py --set-progress-completed
- name: Post Slack Message
if: ${{ !cancelled() }}
shell: bash
run: |
python3 ./tests/ci/create_release.py --post-status ${{ inputs.dry-run && '--dry-run' || '' }}

View File

@ -1,111 +0,0 @@
name: AutoRelease
env:
PYTHONUNBUFFERED: 1
DRY_RUN: true
concurrency:
group: release
on: # yamllint disable-line rule:truthy
# Workflow uses a test bucket for packages and dry run mode (no real releases)
schedule:
- cron: '0 9 * * *'
- cron: '0 15 * * *'
workflow_dispatch:
inputs:
dry-run:
description: 'Dry run'
required: false
default: true
type: boolean
jobs:
AutoRelease:
runs-on: [self-hosted, release-maker]
steps:
- name: DebugInfo
uses: hmarr/debug-action@f7318c783045ac39ed9bb497e22ce835fdafbfe6
- name: Set envs
run: |
cat >> "$GITHUB_ENV" << 'EOF'
ROBOT_CLICKHOUSE_SSH_KEY<<RCSK
${{secrets.ROBOT_CLICKHOUSE_SSH_KEY}}
RCSK
EOF
- name: Set DRY_RUN for schedule
if: ${{ github.event_name == 'schedule' }}
run: echo "DRY_RUN=true" >> "$GITHUB_ENV"
- name: Set DRY_RUN for dispatch
if: ${{ github.event_name == 'workflow_dispatch' }}
run: echo "DRY_RUN=${{ github.event.inputs.dry-run }}" >> "$GITHUB_ENV"
- name: Check out repository code
uses: ClickHouse/checkout@v1
with:
token: ${{secrets.ROBOT_CLICKHOUSE_COMMIT_TOKEN}}
fetch-depth: 0
- name: Auto Release Prepare
run: |
cd "$GITHUB_WORKSPACE/tests/ci"
python3 auto_release.py --prepare
echo "::group::Auto Release Info"
python3 -m json.tool /tmp/autorelease_info.json
echo "::endgroup::"
{
echo 'AUTO_RELEASE_PARAMS<<EOF'
cat /tmp/autorelease_info.json
echo 'EOF'
} >> "$GITHUB_ENV"
- name: Post Release Branch statuses
run: |
cd "$GITHUB_WORKSPACE/tests/ci"
python3 auto_release.py --post-status
- name: Release ${{ fromJson(env.AUTO_RELEASE_PARAMS).releases[0].release_branch }}
if: ${{ fromJson(env.AUTO_RELEASE_PARAMS).releases[0] && fromJson(env.AUTO_RELEASE_PARAMS).releases[0].ready }}
uses: ./.github/actions/release
with:
ref: ${{ fromJson(env.AUTO_RELEASE_PARAMS).releases[0].commit_sha }}
type: patch
dry-run: ${{ env.DRY_RUN }}
token: ${{secrets.ROBOT_CLICKHOUSE_COMMIT_TOKEN}}
- name: Release ${{ fromJson(env.AUTO_RELEASE_PARAMS).releases[1].release_branch }}
if: ${{ fromJson(env.AUTO_RELEASE_PARAMS).releases[0] && fromJson(env.AUTO_RELEASE_PARAMS).releases[1].ready }}
uses: ./.github/actions/release
with:
ref: ${{ fromJson(env.AUTO_RELEASE_PARAMS).releases[1].commit_sha }}
type: patch
dry-run: ${{ env.DRY_RUN }}
token: ${{secrets.ROBOT_CLICKHOUSE_COMMIT_TOKEN}}
- name: Release ${{ fromJson(env.AUTO_RELEASE_PARAMS).releases[2].release_branch }}
if: ${{ fromJson(env.AUTO_RELEASE_PARAMS).releases[2] && fromJson(env.AUTO_RELEASE_PARAMS).releases[2].ready }}
uses: ./.github/actions/release
with:
ref: ${{ fromJson(env.AUTO_RELEASE_PARAMS).releases[2].commit_sha }}
type: patch
dry-run: ${{ env.DRY_RUN }}
token: ${{secrets.ROBOT_CLICKHOUSE_COMMIT_TOKEN}}
- name: Release ${{ fromJson(env.AUTO_RELEASE_PARAMS).releases[3].release_branch }}
if: ${{ fromJson(env.AUTO_RELEASE_PARAMS).releases[3] && fromJson(env.AUTO_RELEASE_PARAMS).releases[3].ready }}
uses: ./.github/actions/release
with:
ref: ${{ fromJson(env.AUTO_RELEASE_PARAMS).releases[3].commit_sha }}
type: patch
dry-run: ${{ env.DRY_RUN }}
token: ${{secrets.ROBOT_CLICKHOUSE_COMMIT_TOKEN}}
- name: Release ${{ fromJson(env.AUTO_RELEASE_PARAMS).releases[4].release_branch }}
if: ${{ fromJson(env.AUTO_RELEASE_PARAMS).releases[4] && fromJson(env.AUTO_RELEASE_PARAMS).releases[4].ready }}
uses: ./.github/actions/release
with:
ref: ${{ fromJson(env.AUTO_RELEASE_PARAMS).releases[4].commit_sha }}
type: patch
dry-run: ${{ env.DRY_RUN }}
token: ${{secrets.ROBOT_CLICKHOUSE_COMMIT_TOKEN}}
- name: Post Slack Message
if: ${{ !cancelled() }}
run: |
cd "$GITHUB_WORKSPACE/tests/ci"
python3 auto_release.py --post-auto-release-complete --wf-status ${{ job.status }}
- name: Clean up
run: |
docker ps --quiet | xargs --no-run-if-empty docker kill ||:
docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||:
sudo rm -fr "$TEMP_PATH"

View File

@ -16,10 +16,15 @@ concurrency:
options:
- patch
- new
only-repo:
description: 'Run only repos updates including docker (repo-recovery, tests)'
required: false
default: false
type: boolean
dry-run:
description: 'Dry run'
required: false
default: true
default: false
type: boolean
jobs:
@ -35,10 +40,163 @@ jobs:
with:
token: ${{secrets.ROBOT_CLICKHOUSE_COMMIT_TOKEN}}
fetch-depth: 0
- name: Call Release Action
uses: ./.github/actions/release
- name: Prepare Release Info
shell: bash
run: |
if [ ${{ inputs.only-repo }} == "true" ]; then
git tag -l ${{ inputs.ref }} || { echo "With only-repo option ref must be a valid release tag"; exit 1; }
fi
python3 ./tests/ci/create_release.py --prepare-release-info \
--ref ${{ inputs.ref }} --release-type ${{ inputs.type }} \
${{ inputs.dry-run == true && '--dry-run' || '' }} \
${{ inputs.only-repo == true && '--skip-tag-check' || '' }}
echo "::group::Release Info"
python3 -m json.tool /tmp/release_info.json
echo "::endgroup::"
release_tag=$(jq -r '.release_tag' /tmp/release_info.json)
commit_sha=$(jq -r '.commit_sha' /tmp/release_info.json)
is_latest=$(jq -r '.latest' /tmp/release_info.json)
echo "Release Tag: $release_tag"
echo "RELEASE_TAG=$release_tag" >> "$GITHUB_ENV"
echo "COMMIT_SHA=$commit_sha" >> "$GITHUB_ENV"
if [ "$is_latest" == "true" ]; then
echo "DOCKER_TAG_TYPE=release-latest" >> "$GITHUB_ENV"
else
echo "DOCKER_TAG_TYPE=release" >> "$GITHUB_ENV"
fi
- name: Download All Release Artifacts
if: ${{ inputs.type == 'patch' }}
shell: bash
run: |
python3 ./tests/ci/create_release.py --download-packages ${{ inputs.dry-run == true && '--dry-run' || '' }}
- name: Push Git Tag for the Release
if: ${{ ! inputs.only-repo }}
shell: bash
run: |
python3 ./tests/ci/create_release.py --push-release-tag ${{ inputs.dry-run == true && '--dry-run' || '' }}
- name: Push New Release Branch
if: ${{ inputs.type == 'new' && ! inputs.only-repo }}
shell: bash
run: |
python3 ./tests/ci/create_release.py --push-new-release-branch ${{ inputs.dry-run == true && '--dry-run' || '' }}
- name: Bump CH Version and Update Contributors' List
if: ${{ ! inputs.only-repo }}
shell: bash
run: |
python3 ./tests/ci/create_release.py --create-bump-version-pr ${{ inputs.dry-run == true && '--dry-run' || '' }}
- name: Bump Docker versions, Changelog, Security
if: ${{ inputs.type == 'patch' && ! inputs.only-repo }}
shell: bash
run: |
python3 ./tests/ci/create_release.py --set-progress-started --progress "update changelog, docker version, security"
git checkout master # in case WF started from feature branch
echo "List versions"
./utils/list-versions/list-versions.sh > ./utils/list-versions/version_date.tsv
echo "Update docker version"
./utils/list-versions/update-docker-version.sh
echo "Generate ChangeLog"
export CI=1
docker run -u "${UID}:${GID}" -e PYTHONUNBUFFERED=1 -e CI=1 --network=host \
--volume=".:/wd" --workdir="/wd" \
clickhouse/style-test \
./tests/ci/changelog.py -v --debug-helpers \
--jobs=5 \
--output="./docs/changelogs/${{ env.RELEASE_TAG }}.md" ${{ env.RELEASE_TAG }}
git add ./docs/changelogs/${{ env.RELEASE_TAG }}.md
echo "Generate Security"
python3 ./utils/security-generator/generate_security.py > SECURITY.md
git diff HEAD
- name: Create ChangeLog PR
if: ${{ inputs.type == 'patch' && ! inputs.dry-run && ! inputs.only-repo }}
uses: peter-evans/create-pull-request@v6
with:
ref: ${{ inputs.ref }}
type: ${{ inputs.type }}
dry-run: ${{ inputs.dry-run }}
token: ${{secrets.ROBOT_CLICKHOUSE_COMMIT_TOKEN}}
author: "robot-clickhouse <robot-clickhouse@users.noreply.github.com>"
token: ${{ secrets.ROBOT_CLICKHOUSE_COMMIT_TOKEN }}
committer: "robot-clickhouse <robot-clickhouse@users.noreply.github.com>"
commit-message: Update version_date.tsv and changelogs after ${{ env.RELEASE_TAG }}
branch: auto/${{ env.RELEASE_TAG }}
base: master
assignees: ${{ github.event.sender.login }} # assign the PR to the tag pusher
delete-branch: true
title: Update version_date.tsv and changelog after ${{ env.RELEASE_TAG }}
labels: do not test
body: |
Update version_date.tsv and changelogs after ${{ env.RELEASE_TAG }}
### Changelog category (leave one):
- Not for changelog (changelog entry is not required)
- name: Complete previous steps and Restore git state
if: ${{ inputs.type == 'patch' && ! inputs.only-repo }}
shell: bash
run: |
python3 ./tests/ci/create_release.py --set-progress-completed
git reset --hard HEAD
git checkout "$GITHUB_REF_NAME"
- name: Create GH Release
if: ${{ inputs.type == 'patch' && ! inputs.only-repo }}
shell: bash
run: |
python3 ./tests/ci/create_release.py --create-gh-release ${{ inputs.dry-run == true && '--dry-run' || '' }}
- name: Export TGZ Packages
if: ${{ inputs.type == 'patch' }}
shell: bash
run: |
python3 ./tests/ci/artifactory.py --export-tgz ${{ inputs.dry-run == true && '--dry-run' || '' }}
- name: Test TGZ Packages
if: ${{ inputs.type == 'patch' }}
shell: bash
run: |
python3 ./tests/ci/artifactory.py --test-tgz ${{ inputs.dry-run == true && '--dry-run' || '' }}
- name: Export RPM Packages
if: ${{ inputs.type == 'patch' }}
shell: bash
run: |
python3 ./tests/ci/artifactory.py --export-rpm ${{ inputs.dry-run == true && '--dry-run' || '' }}
- name: Test RPM Packages
if: ${{ inputs.type == 'patch' }}
shell: bash
run: |
python3 ./tests/ci/artifactory.py --test-rpm ${{ inputs.dry-run == true && '--dry-run' || '' }}
- name: Export Debian Packages
if: ${{ inputs.type == 'patch' }}
shell: bash
run: |
python3 ./tests/ci/artifactory.py --export-debian ${{ inputs.dry-run == true && '--dry-run' || '' }}
- name: Test Debian Packages
if: ${{ inputs.type == 'patch' }}
shell: bash
run: |
python3 ./tests/ci/artifactory.py --test-debian ${{ inputs.dry-run == true && '--dry-run' || '' }}
- name: Docker clickhouse/clickhouse-server building
if: ${{ inputs.type == 'patch' }}
shell: bash
run: |
cd "./tests/ci"
python3 ./create_release.py --set-progress-started --progress "docker server release"
export CHECK_NAME="Docker server image"
python3 docker_server.py --tag-type ${{ env.DOCKER_TAG_TYPE }} --version ${{ env.RELEASE_TAG }} --check-name "$CHECK_NAME" --sha ${{ env.COMMIT_SHA }} ${{ ! inputs.dry-run && '--push' || '' }}
python3 ./create_release.py --set-progress-completed
- name: Docker clickhouse/clickhouse-keeper building
if: ${{ inputs.type == 'patch' }}
shell: bash
run: |
cd "./tests/ci"
python3 ./create_release.py --set-progress-started --progress "docker keeper release"
export CHECK_NAME="Docker keeper image"
python3 docker_server.py --tag-type ${{ env.DOCKER_TAG_TYPE }} --version ${{ env.RELEASE_TAG }} --check-name "$CHECK_NAME" --sha ${{ env.COMMIT_SHA }} ${{ ! inputs.dry-run && '--push' || '' }}
python3 ./create_release.py --set-progress-completed
- name: Update release info. Merge created PRs
shell: bash
run: |
python3 ./tests/ci/create_release.py --merge-prs ${{ inputs.dry-run == true && '--dry-run' || '' }}
- name: Set current Release progress to Completed with OK
shell: bash
run: |
# dummy stage to finalize release info with "progress: completed; status: OK"
python3 ./tests/ci/create_release.py --set-progress-started --progress "completed"
python3 ./tests/ci/create_release.py --set-progress-completed
- name: Post Slack Message
if: ${{ !cancelled() }}
shell: bash
run: |
python3 ./tests/ci/create_release.py --post-status ${{ inputs.dry-run == true && '--dry-run' || '' }}

View File

@ -142,8 +142,13 @@ jobs:
# Reports should run even if Builds_1/2 fail - run them separately (not in Tests_1/2/3)
Builds_Report:
# run report check for failed builds to indicate the CI error
if: ${{ !cancelled() && needs.RunConfig.result == 'success' && contains(fromJson(needs.RunConfig.outputs.data).jobs_data.jobs_to_do, 'Builds') }}
needs: [RunConfig, StyleCheck, Builds_1, Builds_2]
if: ${{ !cancelled()
&& needs.RunConfig.result == 'success'
&& needs.StyleCheck.result != 'failure'
&& needs.FastTest.result != 'failure'
&& needs.BuildDockers.result != 'failure'
&& contains(fromJson(needs.RunConfig.outputs.data).jobs_data.jobs_to_do, 'Builds') }}
needs: [RunConfig, BuildDockers, StyleCheck, FastTest, Builds_1, Builds_2]
uses: ./.github/workflows/reusable_test.yml
with:
test_name: Builds
@ -167,12 +172,9 @@ jobs:
cd "$GITHUB_WORKSPACE/tests/ci"
python3 merge_pr.py --set-ci-status --wf-status ${{ contains(needs.*.result, 'failure') && 'failure' || 'success' }}
- name: Check Workflow results
run: |
export WORKFLOW_RESULT_FILE="/tmp/workflow_results.json"
cat > "$WORKFLOW_RESULT_FILE" << 'EOF'
${{ toJson(needs) }}
EOF
python3 ./tests/ci/ci_buddy.py --check-wf-status
uses: ./.github/actions/check_workflow
with:
needs: ${{ toJson(needs) }}
################################# Stage Final #################################
#

View File

@ -1,69 +0,0 @@
name: PublishedReleaseCI
# - Gets artifacts from S3
# - Sends it to JFROG Artifactory
# - Adds them to the release assets
on: # yamllint disable-line rule:truthy
release:
types:
- published
workflow_dispatch:
inputs:
tag:
description: 'Release tag'
required: true
type: string
jobs:
ReleasePublish:
runs-on: [self-hosted, style-checker]
steps:
- name: Set tag from input
if: github.event_name == 'workflow_dispatch'
run: |
echo "GITHUB_TAG=${{ github.event.inputs.tag }}" >> "$GITHUB_ENV"
- name: Set tag from REF
if: github.event_name == 'release'
run: |
echo "GITHUB_TAG=${GITHUB_REF#refs/tags/}" >> "$GITHUB_ENV"
- name: Deploy packages and assets
run: |
curl --silent --data '' --no-buffer \
'${{ secrets.PACKAGES_RELEASE_URL }}/release/'"${GITHUB_TAG}"'?binary=binary_darwin&binary=binary_darwin_aarch64&sync=true'
############################################################################################
##################################### Docker images #######################################
############################################################################################
DockerServerImages:
runs-on: [self-hosted, style-checker]
steps:
- name: Set tag from input
if: github.event_name == 'workflow_dispatch'
run: |
echo "GITHUB_TAG=${{ github.event.inputs.tag }}" >> "$GITHUB_ENV"
- name: Set tag from REF
if: github.event_name == 'release'
run: |
echo "GITHUB_TAG=${GITHUB_REF#refs/tags/}" >> "$GITHUB_ENV"
- name: Check out repository code
uses: ClickHouse/checkout@v1
with:
clear-repository: true
fetch-depth: 0 # otherwise we will have no version info
filter: tree:0
ref: ${{ env.GITHUB_TAG }}
- name: Check docker clickhouse/clickhouse-server building
run: |
cd "$GITHUB_WORKSPACE/tests/ci"
export CHECK_NAME="Docker server image"
python3 docker_server.py --release-type auto --version "$GITHUB_TAG" --check-name "$CHECK_NAME" --push
- name: Check docker clickhouse/clickhouse-keeper building
run: |
cd "$GITHUB_WORKSPACE/tests/ci"
export CHECK_NAME="Docker keeper image"
python3 docker_server.py --release-type auto --version "$GITHUB_TAG" --check-name "$CHECK_NAME" --push
- name: Cleanup
if: always()
run: |
docker ps --quiet | xargs --no-run-if-empty docker kill ||:
docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||:
sudo rm -fr "$TEMP_PATH"

View File

@ -1,74 +0,0 @@
name: TagsStableWorkflow
# - Gets artifacts from S3
# - Sends it to JFROG Artifactory
# - Adds them to the release assets
env:
# Force the stdout and stderr streams to be unbuffered
PYTHONUNBUFFERED: 1
on: # yamllint disable-line rule:truthy
push:
tags:
- 'v*-prestable'
- 'v*-stable'
- 'v*-lts'
workflow_dispatch:
inputs:
tag:
description: 'Test tag'
required: true
type: string
jobs:
UpdateVersions:
runs-on: [self-hosted, style-checker]
steps:
- name: Set test tag
if: github.event_name == 'workflow_dispatch'
run: |
echo "GITHUB_TAG=${{ github.event.inputs.tag }}" >> "$GITHUB_ENV"
- name: Get tag name
if: github.event_name != 'workflow_dispatch'
run: |
echo "GITHUB_TAG=${GITHUB_REF#refs/tags/}" >> "$GITHUB_ENV"
- name: Check out repository code
uses: ClickHouse/checkout@v1
with:
ref: master
fetch-depth: 0
filter: tree:0
- name: Update versions, docker version, changelog, security
env:
GITHUB_TOKEN: ${{ secrets.ROBOT_CLICKHOUSE_COMMIT_TOKEN }}
run: |
./utils/list-versions/list-versions.sh > ./utils/list-versions/version_date.tsv
./utils/list-versions/update-docker-version.sh
GID=$(id -g "${UID}")
# --network=host and CI=1 are required for the S3 access from a container
docker run -u "${UID}:${GID}" -e PYTHONUNBUFFERED=1 -e CI=1 --network=host \
--volume="${GITHUB_WORKSPACE}:/ClickHouse" clickhouse/style-test \
/ClickHouse/tests/ci/changelog.py -v --debug-helpers \
--gh-user-or-token="$GITHUB_TOKEN" --jobs=5 \
--output="/ClickHouse/docs/changelogs/${GITHUB_TAG}.md" "${GITHUB_TAG}"
git add "./docs/changelogs/${GITHUB_TAG}.md"
python3 ./utils/security-generator/generate_security.py > SECURITY.md
git diff HEAD
- name: Create Pull Request
uses: peter-evans/create-pull-request@v6
with:
author: "robot-clickhouse <robot-clickhouse@users.noreply.github.com>"
token: ${{ secrets.ROBOT_CLICKHOUSE_COMMIT_TOKEN }}
committer: "robot-clickhouse <robot-clickhouse@users.noreply.github.com>"
commit-message: Update version_date.tsv and changelogs after ${{ env.GITHUB_TAG }}
branch: auto/${{ env.GITHUB_TAG }}
assignees: ${{ github.event.sender.login }} # assign the PR to the tag pusher
delete-branch: true
title: Update version_date.tsv and changelogs after ${{ env.GITHUB_TAG }}
labels: do not test
body: |
Update version_date.tsv and changelogs after ${{ env.GITHUB_TAG }}
### Changelog category (leave one):
- Not for changelog (changelog entry is not required)

View File

@ -26,7 +26,6 @@ sed -i '/onBrokenMarkdownLinks:/ s/ignore/error/g' docusaurus.config.js
if [[ $# -lt 1 ]] || [[ "$1" == "--"* ]]; then
export CI=true
yarn install
exec yarn build "$@"
fi

View File

@ -0,0 +1,35 @@
---
sidebar_position: 1
sidebar_label: 2024
---
# 2024 Changelog
### ClickHouse release v23.8.16.40-lts (e143a9039ba) FIXME as compared to v23.8.15.35-lts (060ff8e813a)
#### Improvement
* Backported in [#66962](https://github.com/ClickHouse/ClickHouse/issues/66962): Added support for parameterized view with analyzer to not analyze create parameterized view. Refactor existing parameterized view logic to not analyze create parameterized view. [#54211](https://github.com/ClickHouse/ClickHouse/pull/54211) ([SmitaRKulkarni](https://github.com/SmitaRKulkarni)).
* Backported in [#65461](https://github.com/ClickHouse/ClickHouse/issues/65461): Reload certificate chain during certificate reload. [#61671](https://github.com/ClickHouse/ClickHouse/pull/61671) ([Pervakov Grigorii](https://github.com/GrigoryPervakov)).
* Backported in [#65880](https://github.com/ClickHouse/ClickHouse/issues/65880): Always start Keeper with sufficient amount of threads in global thread pool. [#64444](https://github.com/ClickHouse/ClickHouse/pull/64444) ([Duc Canh Le](https://github.com/canhld94)).
* Backported in [#65912](https://github.com/ClickHouse/ClickHouse/issues/65912): Respect cgroup CPU limit in Keeper. [#65819](https://github.com/ClickHouse/ClickHouse/pull/65819) ([Antonio Andelic](https://github.com/antonio2368)).
#### Critical Bug Fix (crash, LOGICAL_ERROR, data loss, RBAC)
* Backported in [#65281](https://github.com/ClickHouse/ClickHouse/issues/65281): Fix crash with UniqInjectiveFunctionsEliminationPass and uniqCombined. [#65188](https://github.com/ClickHouse/ClickHouse/pull/65188) ([Raúl Marín](https://github.com/Algunenano)).
* Backported in [#65368](https://github.com/ClickHouse/ClickHouse/issues/65368): Fix a bug in ClickHouse Keeper that causes digest mismatch during closing session. [#65198](https://github.com/ClickHouse/ClickHouse/pull/65198) ([Aleksei Filatov](https://github.com/aalexfvk)).
* Backported in [#65743](https://github.com/ClickHouse/ClickHouse/issues/65743): Fix crash in maxIntersections. [#65689](https://github.com/ClickHouse/ClickHouse/pull/65689) ([Raúl Marín](https://github.com/Algunenano)).
#### Bug Fix (user-visible misbehavior in an official stable release)
* Backported in [#65351](https://github.com/ClickHouse/ClickHouse/issues/65351): Fix possible abort on uncaught exception in ~WriteBufferFromFileDescriptor in StatusFile. [#64206](https://github.com/ClickHouse/ClickHouse/pull/64206) ([Kruglov Pavel](https://github.com/Avogar)).
* Backported in [#66037](https://github.com/ClickHouse/ClickHouse/issues/66037): Fix crash on destroying AccessControl: add explicit shutdown. [#64993](https://github.com/ClickHouse/ClickHouse/pull/64993) ([Vitaly Baranov](https://github.com/vitlibar)).
* Backported in [#65782](https://github.com/ClickHouse/ClickHouse/issues/65782): Fixed bug in MergeJoin. Column in sparse serialisation might be treated as a column of its nested type though the required conversion wasn't performed. [#65632](https://github.com/ClickHouse/ClickHouse/pull/65632) ([Nikita Taranov](https://github.com/nickitat)).
* Backported in [#65926](https://github.com/ClickHouse/ClickHouse/issues/65926): For queries that read from `PostgreSQL`, cancel the internal `PostgreSQL` query if the ClickHouse query is finished. Otherwise, `ClickHouse` query cannot be canceled until the internal `PostgreSQL` query is finished. [#65771](https://github.com/ClickHouse/ClickHouse/pull/65771) ([Maksim Kita](https://github.com/kitaisreal)).
* Backported in [#65822](https://github.com/ClickHouse/ClickHouse/issues/65822): Fix a bug in short circuit logic when old analyzer and dictGetOrDefault is used. [#65802](https://github.com/ClickHouse/ClickHouse/pull/65802) ([jsc0218](https://github.com/jsc0218)).
* Backported in [#66449](https://github.com/ClickHouse/ClickHouse/issues/66449): Fixed a bug in ZooKeeper client: a session could get stuck in unusable state after receiving a hardware error from ZooKeeper. For example, this might happen due to "soft memory limit" in ClickHouse Keeper. [#66140](https://github.com/ClickHouse/ClickHouse/pull/66140) ([Alexander Tokmakov](https://github.com/tavplubix)).
* Backported in [#66717](https://github.com/ClickHouse/ClickHouse/issues/66717): Correctly track memory for `Allocator::realloc`. [#66548](https://github.com/ClickHouse/ClickHouse/pull/66548) ([Antonio Andelic](https://github.com/antonio2368)).
#### NOT FOR CHANGELOG / INSIGNIFICANT
* Backported in [#65080](https://github.com/ClickHouse/ClickHouse/issues/65080): Follow up to [#56541](https://github.com/ClickHouse/ClickHouse/issues/56541). [#57141](https://github.com/ClickHouse/ClickHouse/pull/57141) ([Kseniia Sumarokova](https://github.com/kssenii)).
* Backported in [#65913](https://github.com/ClickHouse/ClickHouse/issues/65913): Fix bug with session closing in Keeper. [#65735](https://github.com/ClickHouse/ClickHouse/pull/65735) ([Antonio Andelic](https://github.com/antonio2368)).
* Backported in [#66853](https://github.com/ClickHouse/ClickHouse/issues/66853): Fix data race in S3::ClientCache. [#66644](https://github.com/ClickHouse/ClickHouse/pull/66644) ([Konstantin Morozov](https://github.com/k-morozov)).

View File

@ -119,11 +119,6 @@ Minimum size of blocks of uncompressed data required for compression when writin
You can also specify this setting in the global settings (see [min_compress_block_size](/docs/en/operations/settings/settings.md/#min-compress-block-size) setting).
The value specified when table is created overrides the global value for this setting.
## max_partitions_to_read
Limits the maximum number of partitions that can be accessed in one query.
You can also specify setting [max_partitions_to_read](/docs/en/operations/settings/merge-tree-settings.md/#max-partitions-to-read) in the global setting.
## max_suspicious_broken_parts
If the number of broken parts in a single partition exceeds the `max_suspicious_broken_parts` value, automatic deletion is denied.
@ -691,6 +686,8 @@ Possible values:
Default value: -1 (unlimited).
You can also specify a query complexity setting [max_partitions_to_read](query-complexity#max-partitions-to-read) at a query / session / profile level.
## min_age_to_force_merge_seconds {#min_age_to_force_merge_seconds}
Merge parts if every part in the range is older than the value of `min_age_to_force_merge_seconds`.

View File

@ -188,7 +188,7 @@ If you set `timeout_before_checking_execution_speed `to 0, ClickHouse will use c
What to do if the query is run longer than `max_execution_time` or the estimated running time is longer than `max_estimated_execution_time`: `throw` or `break`. By default, `throw`.
# max_execution_time_leaf
## max_execution_time_leaf
Similar semantic to `max_execution_time` but only apply on leaf node for distributed or remote queries.
@ -204,7 +204,7 @@ We can use `max_execution_time_leaf` as the query settings:
SELECT count() FROM cluster(cluster, view(SELECT * FROM t)) SETTINGS max_execution_time_leaf = 10;
```
# timeout_overflow_mode_leaf
## timeout_overflow_mode_leaf
What to do when the query in leaf node run longer than `max_execution_time_leaf`: `throw` or `break`. By default, `throw`.
@ -426,3 +426,17 @@ Example:
```
Default value: 0 (Infinite count of simultaneous sessions).
## max_partitions_to_read {#max-partitions-to-read}
Limits the maximum number of partitions that can be accessed in one query.
The setting value specified when the table is created can be overridden via query-level setting.
Possible values:
- Any positive integer.
Default value: -1 (unlimited).
You can also specify a MergeTree setting [max_partitions_to_read](merge-tree-settings#max-partitions-to-read) in tables' setting.

View File

@ -150,15 +150,15 @@ A case insensitive invariant of [position](#position).
Query:
``` sql
SELECT position('Hello, world!', 'hello');
SELECT positionCaseInsensitive('Hello, world!', 'hello');
```
Result:
``` text
┌─position('Hello, world!', 'hello')─┐
0
└────────────────────────────────────┘
┌─positionCaseInsensitive('Hello, world!', 'hello')─┐
1
└───────────────────────────────────────────────────
```
## positionUTF8

View File

@ -39,6 +39,8 @@ disable = '''
no-else-return,
global-statement,
f-string-without-interpolation,
consider-using-with,
use-maxsplit-arg,
'''
[tool.pylint.SIMILARITIES]

View File

@ -24,7 +24,7 @@ void InterpolateNode::dumpTreeImpl(WriteBuffer & buffer, FormatState & format_st
{
buffer << std::string(indent, ' ') << "INTERPOLATE id: " << format_state.getNodeId(this);
buffer << '\n' << std::string(indent + 2, ' ') << "EXPRESSION\n";
buffer << '\n' << std::string(indent + 2, ' ') << "EXPRESSION " << expression_name << " \n";
getExpression()->dumpTreeImpl(buffer, format_state, indent + 4);
buffer << '\n' << std::string(indent + 2, ' ') << "INTERPOLATE_EXPRESSION\n";

View File

@ -50,6 +50,8 @@ public:
return QueryTreeNodeType::INTERPOLATE;
}
const std::string & getExpressionName() const { return expression_name; }
void dumpTreeImpl(WriteBuffer & buffer, FormatState & format_state, size_t indent) const override;
protected:

View File

@ -64,6 +64,8 @@
#include <Analyzer/Resolve/TableExpressionsAliasVisitor.h>
#include <Analyzer/Resolve/ReplaceColumnsVisitor.h>
#include <Planner/PlannerActionsVisitor.h>
#include <Core/Settings.h>
namespace ProfileEvents
@ -4122,11 +4124,7 @@ void QueryAnalyzer::resolveInterpolateColumnsNodeList(QueryTreeNodePtr & interpo
{
auto & interpolate_node_typed = interpolate_node->as<InterpolateNode &>();
auto * column_to_interpolate = interpolate_node_typed.getExpression()->as<IdentifierNode>();
if (!column_to_interpolate)
throw Exception(ErrorCodes::LOGICAL_ERROR, "INTERPOLATE can work only for indentifiers, but {} is found",
interpolate_node_typed.getExpression()->formatASTForErrorMessage());
auto column_to_interpolate_name = column_to_interpolate->getIdentifier().getFullName();
auto column_to_interpolate_name = interpolate_node_typed.getExpressionName();
resolveExpressionNode(interpolate_node_typed.getExpression(), scope, false /*allow_lambda_expression*/, false /*allow_table_expression*/);
@ -4135,14 +4133,11 @@ void QueryAnalyzer::resolveInterpolateColumnsNodeList(QueryTreeNodePtr & interpo
auto & interpolation_to_resolve = interpolate_node_typed.getInterpolateExpression();
IdentifierResolveScope interpolate_scope(interpolation_to_resolve, &scope /*parent_scope*/);
auto fake_column_node = std::make_shared<ColumnNode>(NameAndTypePair(column_to_interpolate_name, interpolate_node_typed.getExpression()->getResultType()), interpolate_node_typed.getExpression());
auto fake_column_node = std::make_shared<ColumnNode>(NameAndTypePair(column_to_interpolate_name, interpolate_node_typed.getExpression()->getResultType()), interpolate_node);
if (is_column_constant)
interpolate_scope.expression_argument_name_to_node.emplace(column_to_interpolate_name, fake_column_node);
resolveExpressionNode(interpolation_to_resolve, interpolate_scope, false /*allow_lambda_expression*/, false /*allow_table_expression*/);
if (is_column_constant)
interpolation_to_resolve = interpolation_to_resolve->cloneAndReplace(fake_column_node, interpolate_node_typed.getExpression());
}
}

View File

@ -43,6 +43,12 @@ size_t getCompoundTypeDepth(const IDataType & type)
const auto & tuple_elements = assert_cast<const DataTypeTuple &>(*current_type).getElements();
if (!tuple_elements.empty())
current_type = tuple_elements.at(0).get();
else
{
/// Special case: tuple with no element - tuple(). In this case, what's the compound type depth?
/// I'm not certain about the theoretical answer, but from experiment, 1 is the most reasonable choice.
return 1;
}
++result;
}

View File

@ -222,10 +222,19 @@ void RestorerFromBackup::setStage(const String & new_stage, const String & messa
if (restore_coordination)
{
restore_coordination->setStage(new_stage, message);
if (new_stage == Stage::FINDING_TABLES_IN_BACKUP)
restore_coordination->waitForStage(new_stage, on_cluster_first_sync_timeout);
else
restore_coordination->waitForStage(new_stage);
/// The initiator of a RESTORE ON CLUSTER query waits for other hosts to complete their work (see waitForStage(Stage::COMPLETED) in BackupsWorker::doRestore),
/// but other hosts shouldn't wait for each others' completion. (That's simply unnecessary and also
/// the initiator may start cleaning up (e.g. removing restore-coordination ZooKeeper nodes) once all other hosts are in Stage::COMPLETED.)
bool need_wait = (new_stage != Stage::COMPLETED);
if (need_wait)
{
if (new_stage == Stage::FINDING_TABLES_IN_BACKUP)
restore_coordination->waitForStage(new_stage, on_cluster_first_sync_timeout);
else
restore_coordination->waitForStage(new_stage);
}
}
}

View File

@ -218,20 +218,27 @@ AsyncLoader::~AsyncLoader()
{
// All `LoadTask` objects should be destructed before AsyncLoader destruction because they hold a reference.
// To make sure we check for all pending jobs to be finished.
std::unique_lock lock{mutex};
if (scheduled_jobs.empty() && finished_jobs.empty())
return;
{
std::unique_lock lock{mutex};
if (!scheduled_jobs.empty() || !finished_jobs.empty())
{
std::vector<String> scheduled;
std::vector<String> finished;
scheduled.reserve(scheduled_jobs.size());
finished.reserve(finished_jobs.size());
for (const auto & [job, _] : scheduled_jobs)
scheduled.push_back(job->name);
for (const auto & job : finished_jobs)
finished.push_back(job->name);
LOG_ERROR(log, "Bug. Destruction with pending ({}) and finished ({}) load jobs.", fmt::join(scheduled, ", "), fmt::join(finished, ", "));
abort();
}
}
std::vector<String> scheduled;
std::vector<String> finished;
scheduled.reserve(scheduled_jobs.size());
finished.reserve(finished_jobs.size());
for (const auto & [job, _] : scheduled_jobs)
scheduled.push_back(job->name);
for (const auto & job : finished_jobs)
finished.push_back(job->name);
LOG_ERROR(log, "Bug. Destruction with pending ({}) and finished ({}) load jobs.", fmt::join(scheduled, ", "), fmt::join(finished, ", "));
abort();
// When all jobs are done we could still have finalizing workers.
// These workers could call updateCurrentPriorityAndSpawn() that scans all pools.
// We need to stop all of them before destructing any of them.
stop();
}
void AsyncLoader::start()

View File

@ -306,6 +306,8 @@
\
M(FilteringMarksWithPrimaryKey, "Number of threads currently doing filtering of mark ranges by the primary key") \
M(FilteringMarksWithSecondaryKeys, "Number of threads currently doing filtering of mark ranges by secondary keys") \
\
M(S3DiskNoKeyErrors, "The number of `NoSuchKey` errors that occur when reading data from S3 cloud storage through ClickHouse disks.") \
#ifdef APPLY_FOR_EXTERNAL_METRICS
#define APPLY_FOR_METRICS(M) APPLY_FOR_BUILTIN_METRICS(M) APPLY_FOR_EXTERNAL_METRICS(M)

View File

@ -19,7 +19,7 @@ Epoll::Epoll() : events_count(0)
{
epoll_fd = epoll_create1(0);
if (epoll_fd == -1)
throw DB::ErrnoException(DB::ErrorCodes::EPOLL_ERROR, "Cannot open epoll descriptor");
throw ErrnoException(ErrorCodes::EPOLL_ERROR, "Cannot open epoll descriptor");
}
Epoll::Epoll(Epoll && other) noexcept : epoll_fd(other.epoll_fd), events_count(other.events_count.load())
@ -47,7 +47,7 @@ void Epoll::add(int fd, void * ptr, uint32_t events)
++events_count;
if (epoll_ctl(epoll_fd, EPOLL_CTL_ADD, fd, &event) == -1)
throw DB::ErrnoException(DB::ErrorCodes::EPOLL_ERROR, "Cannot add new descriptor to epoll");
throw ErrnoException(ErrorCodes::EPOLL_ERROR, "Cannot add new descriptor to epoll");
}
void Epoll::remove(int fd)
@ -55,7 +55,7 @@ void Epoll::remove(int fd)
--events_count;
if (epoll_ctl(epoll_fd, EPOLL_CTL_DEL, fd, nullptr) == -1)
throw DB::ErrnoException(DB::ErrorCodes::EPOLL_ERROR, "Cannot remove descriptor from epoll");
throw ErrnoException(ErrorCodes::EPOLL_ERROR, "Cannot remove descriptor from epoll");
}
size_t Epoll::getManyReady(int max_events, epoll_event * events_out, int timeout) const
@ -82,7 +82,7 @@ size_t Epoll::getManyReady(int max_events, epoll_event * events_out, int timeout
continue;
}
else
throw DB::ErrnoException(DB::ErrorCodes::EPOLL_ERROR, "Error in epoll_wait");
throw ErrnoException(ErrorCodes::EPOLL_ERROR, "Error in epoll_wait");
}
else
break;

View File

@ -4,8 +4,6 @@
#include <Common/ExponentiallySmoothedCounter.h>
#include <numbers>
namespace DB
{
@ -14,9 +12,10 @@ namespace DB
class EventRateMeter
{
public:
explicit EventRateMeter(double now, double period_)
explicit EventRateMeter(double now, double period_, size_t heating_ = 0)
: period(period_)
, half_decay_time(period * std::numbers::ln2) // for `ExponentiallySmoothedAverage::sumWeights()` to be equal to `1/period`
, max_interval(period * 10)
, heating(heating_)
{
reset(now);
}
@ -29,16 +28,11 @@ public:
{
// Remove data for initial heating stage that can present at the beginning of a query.
// Otherwise it leads to wrong gradual increase of average value, turning algorithm into not very reactive.
if (count != 0.0 && ++data_points < 5)
{
start = events.time;
events = ExponentiallySmoothedAverage();
}
if (count != 0.0 && data_points++ <= heating)
reset(events.time, data_points);
if (now - period <= start) // precise counting mode
events = ExponentiallySmoothedAverage(events.value + count, now);
else // exponential smoothing mode
events.add(count, now, half_decay_time);
duration.add(std::min(max_interval, now - duration.time), now, period);
events.add(count, now, period);
}
/// Compute average event rate throughout `[now - period, now]` period.
@ -49,24 +43,26 @@ public:
add(now, 0);
if (unlikely(now <= start))
return 0;
if (now - period <= start) // precise counting mode
return events.value / (now - start);
else // exponential smoothing mode
return events.get(half_decay_time); // equals to `events.value / period`
// We do not use .get() because sum of weights will anyway be canceled out (optimization)
return events.value / duration.value;
}
void reset(double now)
void reset(double now, size_t data_points_ = 0)
{
start = now;
events = ExponentiallySmoothedAverage();
data_points = 0;
duration = ExponentiallySmoothedAverage();
data_points = data_points_;
}
private:
const double period;
const double half_decay_time;
const double max_interval;
const size_t heating;
double start; // Instant in past without events before it; when measurement started or reset
ExponentiallySmoothedAverage events; // Estimated number of events in the last `period`
ExponentiallySmoothedAverage duration; // Current duration of a period
ExponentiallySmoothedAverage events; // Estimated number of events in last `duration` seconds
size_t data_points = 0;
};

View File

@ -105,7 +105,7 @@ private:
bool write_progress_on_update = false;
EventRateMeter cpu_usage_meter{static_cast<double>(clock_gettime_ns()), 2'000'000'000 /*ns*/}; // average cpu utilization last 2 second
EventRateMeter cpu_usage_meter{static_cast<double>(clock_gettime_ns()), 2'000'000'000 /*ns*/, 4}; // average cpu utilization last 2 second, skip first 4 points
HostToTimesMap hosts_data;
/// In case of all of the above:
/// - clickhouse-local

View File

@ -3,6 +3,8 @@
#include <Common/ErrorCodes.h>
#include <Common/Exception.h>
#include <Common/Priority.h>
#include <Common/EventRateMeter.h>
#include <Common/Stopwatch.h>
#include <base/defines.h>
#include <base/types.h>
@ -176,6 +178,14 @@ protected:
/// Postponed to be handled in scheduler thread, so it is intended to be called from outside.
void scheduleActivation();
/// Helper for introspection metrics
void incrementDequeued(ResourceCost cost)
{
dequeued_requests++;
dequeued_cost += cost;
throughput.add(static_cast<double>(clock_gettime_ns())/1e9, cost);
}
public:
EventQueue * const event_queue;
String basename;
@ -189,6 +199,10 @@ public:
std::atomic<ResourceCost> dequeued_cost{0};
std::atomic<ResourceCost> canceled_cost{0};
std::atomic<UInt64> busy_periods{0};
/// Average dequeued_cost per second
/// WARNING: Should only be accessed from the scheduler thread, so that locking is not required
EventRateMeter throughput{static_cast<double>(clock_gettime_ns())/1e9, 2, 1};
};
using SchedulerNodePtr = std::shared_ptr<ISchedulerNode>;

View File

@ -188,8 +188,7 @@ public:
if (request)
{
dequeued_requests++;
dequeued_cost += request->cost;
incrementDequeued(request->cost);
return {request, heap_size > 0};
}
}

View File

@ -59,8 +59,7 @@ public:
if (requests.empty())
busy_periods++;
queue_cost -= result->cost;
dequeued_requests++;
dequeued_cost += result->cost;
incrementDequeued(result->cost);
return {result, !requests.empty()};
}

View File

@ -122,8 +122,7 @@ public:
if (request)
{
dequeued_requests++;
dequeued_cost += request->cost;
incrementDequeued(request->cost);
return {request, !items.empty()};
}
}

View File

@ -81,8 +81,7 @@ public:
child_active = child_now_active;
if (!active())
busy_periods++;
dequeued_requests++;
dequeued_cost += request->cost;
incrementDequeued(request->cost);
return {request, active()};
}

View File

@ -89,8 +89,7 @@ public:
child_active = child_now_active;
if (!active())
busy_periods++;
dequeued_requests++;
dequeued_cost += request->cost;
incrementDequeued(request->cost);
return {request, active()};
}

View File

@ -162,8 +162,7 @@ public:
if (request == nullptr) // Possible in case of request cancel, just retry
continue;
dequeued_requests++;
dequeued_cost += request->cost;
incrementDequeued(request->cost);
return {request, current != nullptr};
}
}

View File

@ -2,6 +2,7 @@
#include <Common/TimerDescriptor.h>
#include <Common/Exception.h>
#include <Common/Epoll.h>
#include <Common/logger_useful.h>
#include <sys/timerfd.h>
@ -75,10 +76,22 @@ void TimerDescriptor::drain() const
/// or since the last successful read(2), then the buffer given to read(2) returns an unsigned 8-byte integer (uint64_t)
/// containing the number of expirations that have occurred.
/// (The returned value is in host byte order—that is, the native byte order for integers on the host machine.)
/// Due to a bug in Linux Kernel, reading from timerfd in non-blocking mode can be still blocking.
/// Avoid it with polling.
Epoll epoll;
epoll.add(timer_fd);
epoll_event event;
event.data.fd = -1;
size_t ready_count = epoll.getManyReady(1, &event, 0);
if (!ready_count)
return;
uint64_t buf;
while (true)
{
ssize_t res = ::read(timer_fd, &buf, sizeof(buf));
if (res < 0)
{
/// man timerfd_create:

View File

@ -0,0 +1,68 @@
#include <gtest/gtest.h>
#include <Common/EventRateMeter.h>
#include <cmath>
TEST(EventRateMeter, ExponentiallySmoothedAverage)
{
double target = 100.0;
// The test is only correct for timestep of 1 second because of
// how sum of weights is implemented inside `ExponentiallySmoothedAverage`
double time_step = 1.0;
for (double half_decay_time : { 0.1, 1.0, 10.0, 100.0})
{
DB::ExponentiallySmoothedAverage esa;
int steps = static_cast<int>(half_decay_time * 30 / time_step);
for (int i = 1; i <= steps; ++i)
esa.add(target * time_step, i * time_step, half_decay_time);
double measured = esa.get(half_decay_time);
ASSERT_LE(std::fabs(measured - target), 1e-5 * target);
}
}
TEST(EventRateMeter, ConstantRate)
{
double target = 100.0;
for (double period : {0.1, 1.0, 10.0})
{
for (double time_step : {0.001, 0.01, 0.1, 1.0})
{
DB::EventRateMeter erm(0.0, period);
int steps = static_cast<int>(period * 30 / time_step);
for (int i = 1; i <= steps; ++i)
erm.add(i * time_step, target * time_step);
double measured = erm.rate(steps * time_step);
// std::cout << "T=" << period << " dt=" << time_step << " measured=" << measured << std::endl;
ASSERT_LE(std::fabs(measured - target), 1e-5 * target);
}
}
}
TEST(EventRateMeter, PreciseStart)
{
double target = 100.0;
for (double period : {0.1, 1.0, 10.0})
{
for (double time_step : {0.001, 0.01, 0.1, 1.0})
{
DB::EventRateMeter erm(0.0, period);
int steps = static_cast<int>(period / time_step);
for (int i = 1; i <= steps; ++i)
{
erm.add(i * time_step, target * time_step);
double measured = erm.rate(i * time_step);
// std::cout << "T=" << period << " dt=" << time_step << " measured=" << measured << std::endl;
ASSERT_LE(std::fabs(measured - target), 1e-5 * target);
}
}
}
}

View File

@ -936,6 +936,7 @@ class IColumn;
M(UInt64, parallel_replicas_min_number_of_rows_per_replica, 0, "Limit the number of replicas used in a query to (estimated rows to read / min_number_of_rows_per_replica). The max is still limited by 'max_parallel_replicas'", 0) \
M(Bool, parallel_replicas_prefer_local_join, true, "If true, and JOIN can be executed with parallel replicas algorithm, and all storages of right JOIN part are *MergeTree, local JOIN will be used instead of GLOBAL JOIN.", 0) \
M(UInt64, parallel_replicas_mark_segment_size, 128, "Parts virtually divided into segments to be distributed between replicas for parallel reading. This setting controls the size of these segments. Not recommended to change until you're absolutely sure in what you're doing", 0) \
M(Bool, allow_archive_path_syntax, true, "File/S3 engines/table function will parse paths with '::' as '<archive> :: <file>' if archive has correct extension", 0) \
\
M(Bool, allow_experimental_inverted_index, false, "If it is set to true, allow to use experimental inverted index.", 0) \
M(Bool, allow_experimental_full_text_index, false, "If it is set to true, allow to use experimental full-text index.", 0) \

View File

@ -76,6 +76,7 @@ static std::initializer_list<std::pair<ClickHouseVersion, SettingsChangesHistory
{"24.8",
{
{"merge_tree_min_bytes_per_task_for_remote_reading", 4194304, 2097152, "Value is unified with `filesystem_prefetch_min_bytes_for_single_read_task`"},
{"allow_archive_path_syntax", true, true, "Added new setting to allow disabling archive path syntax."},
}
},
{"24.7",
@ -151,6 +152,7 @@ static std::initializer_list<std::pair<ClickHouseVersion, SettingsChangesHistory
{"cast_string_to_dynamic_use_inference", false, false, "Add setting to allow converting String to Dynamic through parsing"},
{"allow_experimental_dynamic_type", false, false, "Add new experimental Dynamic type"},
{"azure_max_blocks_in_multipart_upload", 50000, 50000, "Maximum number of blocks in multipart upload for Azure."},
{"allow_archive_path_syntax", false, true, "Added new setting to allow disabling archive path syntax."},
}
},
{"24.4",

View File

@ -103,7 +103,15 @@ static std::string getSortDescriptionDump(const SortDescription & description, c
WriteBufferFromOwnString buffer;
for (size_t i = 0; i < description.size(); ++i)
buffer << header_types[i]->getName() << ' ' << description[i].direction << ' ' << description[i].nulls_direction;
{
if (i != 0)
buffer << ", ";
buffer << "(type: " << header_types[i]->getName()
<< ", direction: " << description[i].direction
<< ", nulls_direction: " << description[i].nulls_direction
<< ")";
}
return buffer.str();
}

View File

@ -16,6 +16,7 @@
#include <Interpreters/Context.h>
#include <Interpreters/InterpreterCreateQuery.h>
#include <Interpreters/FunctionNameNormalizer.h>
#include <Interpreters/NormalizeSelectWithUnionQueryVisitor.h>
#include <Parsers/ASTCreateQuery.h>
#include <Parsers/ASTSetQuery.h>
#include <Parsers/ParserCreateQuery.h>
@ -250,6 +251,8 @@ void DatabaseOrdinary::loadTablesMetadata(ContextPtr local_context, ParsedTables
convertMergeTreeToReplicatedIfNeeded(ast, qualified_name, file_name);
NormalizeSelectWithUnionQueryVisitor::Data data{local_context->getSettingsRef().union_default_mode};
NormalizeSelectWithUnionQueryVisitor{data}.visit(ast);
std::lock_guard lock{metadata.mutex};
metadata.parsed_tables[qualified_name] = ParsedTableMetadata{full_path.string(), ast};
metadata.total_dictionaries += create_query->is_dictionary;

View File

@ -24,7 +24,7 @@ namespace DB
static constexpr auto millisecond_multiplier = 1'000;
static constexpr auto microsecond_multiplier = 1'000'000;
static constexpr auto nanosecond_multiplier = 1'000'000'000;
static constexpr auto nanosecond_multiplier = 1'000'000'000;
static constexpr FormatSettings::DateTimeOverflowBehavior default_date_time_overflow_behavior = FormatSettings::DateTimeOverflowBehavior::Ignore;
@ -381,11 +381,13 @@ struct ToStartOfWeekImpl
static UInt16 execute(Int64 t, UInt8 week_mode, const DateLUTImpl & time_zone)
{
return time_zone.toFirstDayNumOfWeek(time_zone.toDayNum(t), week_mode);
const int res = time_zone.toFirstDayNumOfWeek(time_zone.toDayNum(t), week_mode);
return std::max(res, 0);
}
static UInt16 execute(UInt32 t, UInt8 week_mode, const DateLUTImpl & time_zone)
{
return time_zone.toFirstDayNumOfWeek(time_zone.toDayNum(t), week_mode);
const int res = time_zone.toFirstDayNumOfWeek(time_zone.toDayNum(t), week_mode);
return std::max(res, 0);
}
static UInt16 execute(Int32 d, UInt8 week_mode, const DateLUTImpl & time_zone)
{

View File

@ -2,6 +2,7 @@
#include <AggregateFunctions/AggregateFunctionFactory.h>
#include <Backups/RestorerFromBackup.h>
#include <Core/Settings.h>
#include <Functions/FunctionFactory.h>
#include <Functions/UserDefined/IUserDefinedSQLObjectsStorage.h>
#include <Functions/UserDefined/UserDefinedExecutableFunctionFactory.h>
@ -9,6 +10,7 @@
#include <Functions/UserDefined/UserDefinedSQLObjectsBackup.h>
#include <Interpreters/Context.h>
#include <Interpreters/FunctionNameNormalizer.h>
#include <Interpreters/NormalizeSelectWithUnionQueryVisitor.h>
#include <Parsers/ASTCreateFunctionQuery.h>
#include <Parsers/ASTFunction.h>
#include <Parsers/ASTIdentifier.h>
@ -80,13 +82,15 @@ namespace
validateFunctionRecursiveness(*function_body, name);
}
ASTPtr normalizeCreateFunctionQuery(const IAST & create_function_query)
ASTPtr normalizeCreateFunctionQuery(const IAST & create_function_query, const ContextPtr & context)
{
auto ptr = create_function_query.clone();
auto & res = typeid_cast<ASTCreateFunctionQuery &>(*ptr);
res.if_not_exists = false;
res.or_replace = false;
FunctionNameNormalizer::visit(res.function_core.get());
NormalizeSelectWithUnionQueryVisitor::Data data{context->getSettingsRef().union_default_mode};
NormalizeSelectWithUnionQueryVisitor{data}.visit(res.function_core);
return ptr;
}
}
@ -125,7 +129,7 @@ void UserDefinedSQLFunctionFactory::checkCanBeUnregistered(const ContextPtr & co
bool UserDefinedSQLFunctionFactory::registerFunction(const ContextMutablePtr & context, const String & function_name, ASTPtr create_function_query, bool throw_if_exists, bool replace_if_exists)
{
checkCanBeRegistered(context, function_name, *create_function_query);
create_function_query = normalizeCreateFunctionQuery(*create_function_query);
create_function_query = normalizeCreateFunctionQuery(*create_function_query, context);
try
{

View File

@ -1,7 +1,7 @@
#include "Functions/UserDefined/UserDefinedSQLObjectsDiskStorage.h"
#include "Functions/UserDefined/UserDefinedSQLFunctionFactory.h"
#include "Functions/UserDefined/UserDefinedSQLObjectType.h"
#include <Functions/UserDefined/UserDefinedSQLObjectType.h>
#include <Functions/UserDefined/UserDefinedSQLObjectsStorageBase.h>
#include <Common/StringUtils.h>
#include <Common/atomicRename.h>
@ -54,7 +54,7 @@ namespace
}
UserDefinedSQLObjectsDiskStorage::UserDefinedSQLObjectsDiskStorage(const ContextPtr & global_context_, const String & dir_path_)
: global_context(global_context_)
: UserDefinedSQLObjectsStorageBase(global_context_)
, dir_path{makeDirectoryPathCanonical(dir_path_)}
, log{getLogger("UserDefinedSQLObjectsLoaderFromDisk")}
{

View File

@ -42,7 +42,6 @@ private:
ASTPtr tryLoadObject(UserDefinedSQLObjectType object_type, const String & object_name, const String & file_path, bool check_file_exists);
String getFilePath(UserDefinedSQLObjectType object_type, const String & object_name) const;
ContextPtr global_context;
String dir_path;
LoggerPtr log;
std::atomic<bool> objects_loaded = false;

View File

@ -2,7 +2,10 @@
#include <boost/container/flat_set.hpp>
#include <Core/Settings.h>
#include <Interpreters/Context.h>
#include <Interpreters/FunctionNameNormalizer.h>
#include <Interpreters/NormalizeSelectWithUnionQueryVisitor.h>
#include <Parsers/ASTCreateFunctionQuery.h>
namespace DB
@ -17,18 +20,24 @@ namespace ErrorCodes
namespace
{
ASTPtr normalizeCreateFunctionQuery(const IAST & create_function_query)
ASTPtr normalizeCreateFunctionQuery(const IAST & create_function_query, const ContextPtr & context)
{
auto ptr = create_function_query.clone();
auto & res = typeid_cast<ASTCreateFunctionQuery &>(*ptr);
res.if_not_exists = false;
res.or_replace = false;
FunctionNameNormalizer::visit(res.function_core.get());
NormalizeSelectWithUnionQueryVisitor::Data data{context->getSettingsRef().union_default_mode};
NormalizeSelectWithUnionQueryVisitor{data}.visit(res.function_core);
return ptr;
}
}
UserDefinedSQLObjectsStorageBase::UserDefinedSQLObjectsStorageBase(ContextPtr global_context_)
: global_context(std::move(global_context_))
{}
ASTPtr UserDefinedSQLObjectsStorageBase::get(const String & object_name) const
{
std::lock_guard lock(mutex);
@ -148,7 +157,7 @@ void UserDefinedSQLObjectsStorageBase::setAllObjects(const std::vector<std::pair
{
std::unordered_map<String, ASTPtr> normalized_functions;
for (const auto & [function_name, create_query] : new_objects)
normalized_functions[function_name] = normalizeCreateFunctionQuery(*create_query);
normalized_functions[function_name] = normalizeCreateFunctionQuery(*create_query, global_context);
std::lock_guard lock(mutex);
object_name_to_create_object_map = std::move(normalized_functions);
@ -166,7 +175,7 @@ std::vector<std::pair<String, ASTPtr>> UserDefinedSQLObjectsStorageBase::getAllO
void UserDefinedSQLObjectsStorageBase::setObject(const String & object_name, const IAST & create_object_query)
{
std::lock_guard lock(mutex);
object_name_to_create_object_map[object_name] = normalizeCreateFunctionQuery(create_object_query);
object_name_to_create_object_map[object_name] = normalizeCreateFunctionQuery(create_object_query, global_context);
}
void UserDefinedSQLObjectsStorageBase::removeObject(const String & object_name)

View File

@ -4,6 +4,7 @@
#include <mutex>
#include <Functions/UserDefined/IUserDefinedSQLObjectsStorage.h>
#include <Interpreters/Context_fwd.h>
#include <Parsers/IAST.h>
@ -13,6 +14,7 @@ namespace DB
class UserDefinedSQLObjectsStorageBase : public IUserDefinedSQLObjectsStorage
{
public:
explicit UserDefinedSQLObjectsStorageBase(ContextPtr global_context_);
ASTPtr get(const String & object_name) const override;
ASTPtr tryGet(const String & object_name) const override;
@ -64,6 +66,8 @@ protected:
std::unordered_map<String, ASTPtr> object_name_to_create_object_map;
mutable std::recursive_mutex mutex;
ContextPtr global_context;
};
}

View File

@ -48,7 +48,7 @@ namespace
UserDefinedSQLObjectsZooKeeperStorage::UserDefinedSQLObjectsZooKeeperStorage(
const ContextPtr & global_context_, const String & zookeeper_path_)
: global_context{global_context_}
: UserDefinedSQLObjectsStorageBase(global_context_)
, zookeeper_getter{[global_context_]() { return global_context_->getZooKeeper(); }}
, zookeeper_path{zookeeper_path_}
, watch_queue{std::make_shared<ConcurrentBoundedQueue<std::pair<UserDefinedSQLObjectType, String>>>(std::numeric_limits<size_t>::max())}

View File

@ -68,8 +68,6 @@ private:
void refreshObjects(const zkutil::ZooKeeperPtr & zookeeper, UserDefinedSQLObjectType object_type);
void syncObjects(const zkutil::ZooKeeperPtr & zookeeper, UserDefinedSQLObjectType object_type);
ContextPtr global_context;
zkutil::ZooKeeperCachingGetter zookeeper_getter;
String zookeeper_path;
std::atomic<bool> objects_loaded = false;

View File

@ -0,0 +1,50 @@
#include <IO/Archives/ArchiveUtils.h>
#include <string_view>
#include <array>
namespace DB
{
namespace
{
using namespace std::literals;
constexpr std::array tar_extensions{".tar"sv, ".tar.gz"sv, ".tgz"sv, ".tar.zst"sv, ".tzst"sv, ".tar.xz"sv, ".tar.bz2"sv, ".tar.lzma"sv};
constexpr std::array zip_extensions{".zip"sv, ".zipx"sv};
constexpr std::array sevenz_extensiosns{".7z"sv};
bool hasSupportedExtension(std::string_view path, const auto & supported_extensions)
{
for (auto supported_extension : supported_extensions)
{
if (path.ends_with(supported_extension))
return true;
}
return false;
}
}
bool hasSupportedTarExtension(std::string_view path)
{
return hasSupportedExtension(path, tar_extensions);
}
bool hasSupportedZipExtension(std::string_view path)
{
return hasSupportedExtension(path, zip_extensions);
}
bool hasSupported7zExtension(std::string_view path)
{
return hasSupportedExtension(path, sevenz_extensiosns);
}
bool hasSupportedArchiveExtension(std::string_view path)
{
return hasSupportedTarExtension(path) || hasSupportedZipExtension(path) || hasSupported7zExtension(path);
}
}

View File

@ -10,3 +10,17 @@
#include <archive.h>
#include <archive_entry.h>
#endif
#include <string_view>
namespace DB
{
bool hasSupportedTarExtension(std::string_view path);
bool hasSupportedZipExtension(std::string_view path);
bool hasSupported7zExtension(std::string_view path);
bool hasSupportedArchiveExtension(std::string_view path);
}

View File

@ -1,6 +1,7 @@
#include <IO/Archives/LibArchiveReader.h>
#include <IO/Archives/ZipArchiveReader.h>
#include <IO/Archives/createArchiveReader.h>
#include <IO/Archives/ArchiveUtils.h>
#include <Common/Exception.h>
@ -12,7 +13,6 @@ extern const int CANNOT_UNPACK_ARCHIVE;
extern const int SUPPORT_IS_DISABLED;
}
std::shared_ptr<IArchiveReader> createArchiveReader(const String & path_to_archive)
{
return createArchiveReader(path_to_archive, {}, 0);
@ -24,11 +24,7 @@ std::shared_ptr<IArchiveReader> createArchiveReader(
[[maybe_unused]] const std::function<std::unique_ptr<SeekableReadBuffer>()> & archive_read_function,
[[maybe_unused]] size_t archive_size)
{
using namespace std::literals;
static constexpr std::array tar_extensions{
".tar"sv, ".tar.gz"sv, ".tgz"sv, ".tar.zst"sv, ".tzst"sv, ".tar.xz"sv, ".tar.bz2"sv, ".tar.lzma"sv};
if (path_to_archive.ends_with(".zip") || path_to_archive.ends_with(".zipx"))
if (hasSupportedZipExtension(path_to_archive))
{
#if USE_MINIZIP
return std::make_shared<ZipArchiveReader>(path_to_archive, archive_read_function, archive_size);
@ -36,8 +32,7 @@ std::shared_ptr<IArchiveReader> createArchiveReader(
throw Exception(ErrorCodes::SUPPORT_IS_DISABLED, "minizip library is disabled");
#endif
}
else if (std::any_of(
tar_extensions.begin(), tar_extensions.end(), [&](const auto extension) { return path_to_archive.ends_with(extension); }))
else if (hasSupportedTarExtension(path_to_archive))
{
#if USE_LIBARCHIVE
return std::make_shared<TarArchiveReader>(path_to_archive, archive_read_function);
@ -45,7 +40,7 @@ std::shared_ptr<IArchiveReader> createArchiveReader(
throw Exception(ErrorCodes::SUPPORT_IS_DISABLED, "libarchive library is disabled");
#endif
}
else if (path_to_archive.ends_with(".7z"))
else if (hasSupported7zExtension(path_to_archive))
{
#if USE_LIBARCHIVE
return std::make_shared<SevenZipArchiveReader>(path_to_archive);

View File

@ -1,3 +1,4 @@
#include <IO/Archives/ArchiveUtils.h>
#include <IO/Archives/LibArchiveWriter.h>
#include <IO/Archives/TarArchiveWriter.h>
#include <IO/Archives/ZipArchiveWriter.h>
@ -24,10 +25,7 @@ std::shared_ptr<IArchiveWriter> createArchiveWriter(const String & path_to_archi
std::shared_ptr<IArchiveWriter>
createArchiveWriter(const String & path_to_archive, [[maybe_unused]] std::unique_ptr<WriteBuffer> archive_write_buffer)
{
using namespace std::literals;
static constexpr std::array tar_extensions{
".tar"sv, ".tar.gz"sv, ".tgz"sv, ".tar.bz2"sv, ".tar.lzma"sv, ".tar.zst"sv, ".tzst"sv, ".tar.xz"sv};
if (path_to_archive.ends_with(".zip") || path_to_archive.ends_with(".zipx"))
if (hasSupportedZipExtension(path_to_archive))
{
#if USE_MINIZIP
return std::make_shared<ZipArchiveWriter>(path_to_archive, std::move(archive_write_buffer));
@ -35,8 +33,7 @@ createArchiveWriter(const String & path_to_archive, [[maybe_unused]] std::unique
throw Exception(ErrorCodes::SUPPORT_IS_DISABLED, "minizip library is disabled");
#endif
}
else if (std::any_of(
tar_extensions.begin(), tar_extensions.end(), [&](const auto extension) { return path_to_archive.ends_with(extension); }))
else if (hasSupportedTarExtension(path_to_archive))
{
#if USE_LIBARCHIVE
return std::make_shared<TarArchiveWriter>(path_to_archive, std::move(archive_write_buffer));

View File

@ -24,6 +24,7 @@
#include <Common/assert_cast.h>
#include <Common/logger_useful.h>
#include <Common/CurrentMetrics.h>
#include <Common/ProxyConfigurationResolverProvider.h>
#include <Core/Settings.h>
@ -43,6 +44,11 @@ namespace ProfileEvents
extern const Event TinyS3Clients;
}
namespace CurrentMetrics
{
extern const Metric S3DiskNoKeyErrors;
}
namespace DB
{
@ -381,7 +387,7 @@ Model::HeadObjectOutcome Client::HeadObject(HeadObjectRequest & request) const
/// The next call is NOT a recurcive call
/// This is a virtuall call Aws::S3::S3Client::HeadObject(const Model::HeadObjectRequest&)
return enrichErrorMessage(
return processRequestResult(
HeadObject(static_cast<const Model::HeadObjectRequest&>(request)));
}
@ -402,7 +408,7 @@ Model::ListObjectsOutcome Client::ListObjects(ListObjectsRequest & request) cons
Model::GetObjectOutcome Client::GetObject(GetObjectRequest & request) const
{
return enrichErrorMessage(
return processRequestResult(
doRequest(request, [this](const Model::GetObjectRequest & req) { return GetObject(req); }));
}
@ -689,11 +695,14 @@ Client::doRequestWithRetryNetworkErrors(RequestType & request, RequestFn request
}
template <typename RequestResult>
RequestResult Client::enrichErrorMessage(RequestResult && outcome) const
RequestResult Client::processRequestResult(RequestResult && outcome) const
{
if (outcome.IsSuccess() || !isClientForDisk())
return std::forward<RequestResult>(outcome);
if (outcome.GetError().GetErrorType() == Aws::S3::S3Errors::NO_SUCH_KEY)
CurrentMetrics::add(CurrentMetrics::S3DiskNoKeyErrors);
String enriched_message = fmt::format(
"{} {}",
outcome.GetError().GetMessage(),

View File

@ -271,7 +271,7 @@ private:
void insertRegionOverride(const std::string & bucket, const std::string & region) const;
template <typename RequestResult>
RequestResult enrichErrorMessage(RequestResult && outcome) const;
RequestResult processRequestResult(RequestResult && outcome) const;
String initial_endpoint;
std::shared_ptr<Aws::Auth::AWSCredentialsProvider> credentials_provider;

View File

@ -6,6 +6,7 @@
#include <Common/Exception.h>
#include <Common/quoteString.h>
#include <Common/re2.h>
#include <IO/Archives/ArchiveUtils.h>
#include <boost/algorithm/string/case_conv.hpp>
@ -29,7 +30,7 @@ namespace ErrorCodes
namespace S3
{
URI::URI(const std::string & uri_)
URI::URI(const std::string & uri_, bool allow_archive_path_syntax)
{
/// Case when bucket name represented in domain name of S3 URL.
/// E.g. (https://bucket-name.s3.region.amazonaws.com/key)
@ -54,10 +55,11 @@ URI::URI(const std::string & uri_)
static constexpr auto OSS = "OSS";
static constexpr auto EOS = "EOS";
if (containsArchive(uri_))
std::tie(uri_str, archive_pattern) = getPathToArchiveAndArchivePattern(uri_);
if (allow_archive_path_syntax)
std::tie(uri_str, archive_pattern) = getURIAndArchivePattern(uri_);
else
uri_str = uri_;
uri = Poco::URI(uri_str);
std::unordered_map<std::string, std::string> mapper;
@ -167,32 +169,37 @@ void URI::validateBucket(const String & bucket, const Poco::URI & uri)
!uri.empty() ? " (" + uri.toString() + ")" : "");
}
bool URI::containsArchive(const std::string & source)
std::pair<std::string, std::optional<std::string>> URI::getURIAndArchivePattern(const std::string & source)
{
size_t pos = source.find("::");
return (pos != std::string::npos);
}
if (pos == String::npos)
return {source, std::nullopt};
std::pair<std::string, std::string> URI::getPathToArchiveAndArchivePattern(const std::string & source)
{
size_t pos = source.find("::");
assert(pos != std::string::npos);
std::string_view path_to_archive_view = std::string_view{source}.substr(0, pos);
bool contains_spaces_around_operator = false;
while (path_to_archive_view.ends_with(' '))
{
contains_spaces_around_operator = true;
path_to_archive_view.remove_suffix(1);
}
std::string path_to_archive = source.substr(0, pos);
while ((!path_to_archive.empty()) && path_to_archive.ends_with(' '))
path_to_archive.pop_back();
std::string_view archive_pattern_view = std::string_view{source}.substr(pos + 2);
while (archive_pattern_view.starts_with(' '))
{
contains_spaces_around_operator = true;
archive_pattern_view.remove_prefix(1);
}
if (path_to_archive.empty())
throw Exception(ErrorCodes::BAD_ARGUMENTS, "Path to archive is empty");
/// possible situations when the first part can be archive is only if one of the following is true:
/// - it contains supported extension
/// - it contains spaces after or before :: (URI cannot contain spaces)
/// - it contains characters that could mean glob expression
if (archive_pattern_view.empty() || path_to_archive_view.empty()
|| (!contains_spaces_around_operator && !hasSupportedArchiveExtension(path_to_archive_view)
&& path_to_archive_view.find_first_of("*?{") == std::string_view::npos))
return {source, std::nullopt};
std::string_view path_in_archive_view = std::string_view{source}.substr(pos + 2);
while (path_in_archive_view.front() == ' ')
path_in_archive_view.remove_prefix(1);
if (path_in_archive_view.empty())
throw Exception(ErrorCodes::BAD_ARGUMENTS, "Filename is empty");
return {path_to_archive, std::string{path_in_archive_view}};
return std::pair{std::string{path_to_archive_view}, std::string{archive_pattern_view}};
}
}

View File

@ -36,14 +36,13 @@ struct URI
bool is_virtual_hosted_style;
URI() = default;
explicit URI(const std::string & uri_);
explicit URI(const std::string & uri_, bool allow_archive_path_syntax = false);
void addRegionToURI(const std::string & region);
static void validateBucket(const std::string & bucket, const Poco::URI & uri);
private:
bool containsArchive(const std::string & source);
std::pair<std::string, std::string> getPathToArchiveAndArchivePattern(const std::string & source);
std::pair<std::string, std::optional<std::string>> getURIAndArchivePattern(const std::string & source);
};
}

View File

@ -4,8 +4,6 @@
#include <Interpreters/InDepthNodeVisitor.h>
#include <Parsers/IAST_fwd.h>
#include <unordered_set>
namespace DB
{

View File

@ -46,7 +46,7 @@ public:
auto column_source_node = column_node->getColumnSource();
auto column_source_node_type = column_source_node->getNodeType();
if (column_source_node_type == QueryTreeNodeType::LAMBDA)
if (column_source_node_type == QueryTreeNodeType::LAMBDA || column_source_node_type == QueryTreeNodeType::INTERPOLATE)
return;
/// JOIN using expression

View File

@ -744,6 +744,8 @@ void addWithFillStepIfNeeded(QueryPlan & query_plan,
}
else
{
ActionsDAG rename_dag;
for (auto & interpolate_node : interpolate_list_nodes)
{
auto & interpolate_node_typed = interpolate_node->as<InterpolateNode &>();
@ -772,8 +774,28 @@ void addWithFillStepIfNeeded(QueryPlan & query_plan,
const auto * alias_node = &interpolate_actions_dag.addAlias(*interpolate_expression, expression_to_interpolate_name);
interpolate_actions_dag.getOutputs().push_back(alias_node);
/// Here we fix INTERPOLATE by constant expression.
/// Example from 02336_sort_optimization_with_fill:
///
/// SELECT 5 AS x, 'Hello' AS s ORDER BY x WITH FILL FROM 1 TO 10 INTERPOLATE (s AS s||'A')
///
/// For this query, INTERPOLATE_EXPRESSION would be : s AS concat(s, 'A'),
/// so that interpolate_actions_dag would have INPUT `s`.
///
/// However, INPUT `s` does not exist. Instead, we have a constant with execution name 'Hello'_String.
/// To fix this, we prepend a rename : 'Hello'_String -> s
if (const auto * constant_node = interpolate_node_typed.getExpression()->as<const ConstantNode>())
{
const auto * node = &rename_dag.addInput(alias_node->result_name, alias_node->result_type);
node = &rename_dag.addAlias(*node, interpolate_node_typed.getExpressionName());
rename_dag.getOutputs().push_back(node);
}
}
if (!rename_dag.getOutputs().empty())
interpolate_actions_dag = ActionsDAG::merge(std::move(rename_dag), std::move(interpolate_actions_dag));
interpolate_actions_dag.removeUnusedActions();
}

View File

@ -491,7 +491,16 @@ public:
{
auto it = node_name_to_node.find(node_name);
if (it != node_name_to_node.end())
return it->second;
{
/// It is possible that ActionsDAG already has an input with the same name as constant.
/// In this case, prefer constant to input.
/// Constatns affect function return type, which should be consistent with QueryTree.
/// Query example:
/// SELECT materialize(toLowCardinality('b')) || 'a' FROM remote('127.0.0.{1,2}', system, one) GROUP BY 'a'
bool materialized_input = it->second->type == ActionsDAG::ActionType::INPUT && !it->second->column;
if (!materialized_input)
return it->second;
}
const auto * node = &actions_dag.addColumn(column);
node_name_to_node[node->result_name] = node;

View File

@ -462,6 +462,9 @@ SortAnalysisResult analyzeSort(const QueryNode & query_node,
for (auto & interpolate_node : interpolate_list_node.getNodes())
{
auto & interpolate_node_typed = interpolate_node->as<InterpolateNode &>();
if (interpolate_node_typed.getExpression()->getNodeType() == QueryTreeNodeType::CONSTANT)
continue;
interpolate_actions_visitor.visit(interpolate_actions_dag, interpolate_node_typed.getInterpolateExpression());
}

View File

@ -2,6 +2,7 @@
#include <Processors/Transforms/FillingTransform.h>
#include <QueryPipeline/QueryPipelineBuilder.h>
#include <IO/Operators.h>
#include <Interpreters/ExpressionActions.h>
#include <Common/JSONBuilder.h>
namespace DB
@ -58,14 +59,25 @@ void FillingStep::transformPipeline(QueryPipelineBuilder & pipeline, const Build
void FillingStep::describeActions(FormatSettings & settings) const
{
settings.out << String(settings.offset, ' ');
String prefix(settings.offset, settings.indent_char);
settings.out << prefix;
dumpSortDescription(sort_description, settings.out);
settings.out << '\n';
if (interpolate_description)
{
auto expression = std::make_shared<ExpressionActions>(interpolate_description->actions.clone());
expression->describeActions(settings.out, prefix);
}
}
void FillingStep::describeActions(JSONBuilder::JSONMap & map) const
{
map.add("Sort Description", explainSortDescription(sort_description));
if (interpolate_description)
{
auto expression = std::make_shared<ExpressionActions>(interpolate_description->actions.clone());
map.add("Expression", expression->toTree());
}
}
void FillingStep::updateOutputStream()

View File

@ -142,14 +142,14 @@ ObjectStoragePtr StorageS3Configuration::createObjectStorage(ContextPtr context,
void StorageS3Configuration::fromNamedCollection(const NamedCollection & collection, ContextPtr context)
{
const auto settings = context->getSettingsRef();
const auto & settings = context->getSettingsRef();
validateNamedCollection(collection, required_configuration_keys, optional_configuration_keys);
auto filename = collection.getOrDefault<String>("filename", "");
if (!filename.empty())
url = S3::URI(std::filesystem::path(collection.get<String>("url")) / filename);
url = S3::URI(std::filesystem::path(collection.get<String>("url")) / filename, settings.allow_archive_path_syntax);
else
url = S3::URI(collection.get<String>("url"));
url = S3::URI(collection.get<String>("url"), settings.allow_archive_path_syntax);
auth_settings.access_key_id = collection.getOrDefault<String>("access_key_id", "");
auth_settings.secret_access_key = collection.getOrDefault<String>("secret_access_key", "");
@ -330,7 +330,7 @@ void StorageS3Configuration::fromAST(ASTs & args, ContextPtr context, bool with_
}
/// This argument is always the first
url = S3::URI(checkAndGetLiteralArgument<String>(args[0], "url"));
url = S3::URI(checkAndGetLiteralArgument<String>(args[0], "url"), context->getSettingsRef().allow_archive_path_syntax);
if (engine_args_to_idx.contains("format"))
{

View File

@ -25,6 +25,7 @@
#include <IO/WriteHelpers.h>
#include <IO/Archives/createArchiveReader.h>
#include <IO/Archives/IArchiveReader.h>
#include <IO/Archives/ArchiveUtils.h>
#include <IO/PeekableReadBuffer.h>
#include <IO/AsynchronousReadBufferFromFile.h>
#include <Disks/IO/IOUringReader.h>
@ -2207,7 +2208,11 @@ void registerStorageFile(StorageFactory & factory)
else if (type == Field::Types::UInt64)
source_fd = static_cast<int>(literal->value.get<UInt64>());
else if (type == Field::Types::String)
StorageFile::parseFileSource(literal->value.get<String>(), source_path, storage_args.path_to_archive);
StorageFile::parseFileSource(
literal->value.get<String>(),
source_path,
storage_args.path_to_archive,
factory_args.getLocalContext()->getSettingsRef().allow_archive_path_syntax);
else
throw Exception(ErrorCodes::BAD_ARGUMENTS, "Second argument must be path or file descriptor");
}
@ -2234,8 +2239,14 @@ SchemaCache & StorageFile::getSchemaCache(const ContextPtr & context)
return schema_cache;
}
void StorageFile::parseFileSource(String source, String & filename, String & path_to_archive)
void StorageFile::parseFileSource(String source, String & filename, String & path_to_archive, bool allow_archive_path_syntax)
{
if (!allow_archive_path_syntax)
{
filename = std::move(source);
return;
}
size_t pos = source.find("::");
if (pos == String::npos)
{
@ -2247,18 +2258,21 @@ void StorageFile::parseFileSource(String source, String & filename, String & pat
while (path_to_archive_view.ends_with(' '))
path_to_archive_view.remove_suffix(1);
if (path_to_archive_view.empty())
throw Exception(ErrorCodes::BAD_ARGUMENTS, "Path to archive is empty");
path_to_archive = path_to_archive_view;
std::string_view filename_view = std::string_view{source}.substr(pos + 2);
while (filename_view.front() == ' ')
while (filename_view.starts_with(' '))
filename_view.remove_prefix(1);
if (filename_view.empty())
throw Exception(ErrorCodes::BAD_ARGUMENTS, "Filename is empty");
/// possible situations when the first part can be archive is only if one of the following is true:
/// - it contains supported extension
/// - it contains characters that could mean glob expression
if (filename_view.empty() || path_to_archive_view.empty()
|| (!hasSupportedArchiveExtension(path_to_archive_view) && path_to_archive_view.find_first_of("*?{") == std::string_view::npos))
{
filename = std::move(source);
return;
}
path_to_archive = path_to_archive_view;
filename = filename_view;
}

View File

@ -128,7 +128,7 @@ public:
static SchemaCache & getSchemaCache(const ContextPtr & context);
static void parseFileSource(String source, String & filename, String & path_to_archive);
static void parseFileSource(String source, String & filename, String & path_to_archive, bool allow_archive_path_syntax);
static ArchiveInfo getArchiveInfo(
const std::string & path_to_archive,

View File

@ -31,6 +31,7 @@ ColumnsDescription StorageSystemScheduler::getColumnsDescription()
{"dequeued_requests", std::make_shared<DataTypeUInt64>(), "The total number of resource requests dequeued from this node."},
{"canceled_requests", std::make_shared<DataTypeUInt64>(), "The total number of resource requests canceled from this node."},
{"dequeued_cost", std::make_shared<DataTypeInt64>(), "The sum of costs (e.g. size in bytes) of all requests dequeued from this node."},
{"throughput", std::make_shared<DataTypeFloat64>(), "Current average throughput (dequeued cost per second)."},
{"canceled_cost", std::make_shared<DataTypeInt64>(), "The sum of costs (e.g. size in bytes) of all requests canceled from this node."},
{"busy_periods", std::make_shared<DataTypeUInt64>(), "The total number of deactivations of this node."},
{"vruntime", std::make_shared<DataTypeNullable>(std::make_shared<DataTypeFloat64>()),
@ -96,6 +97,7 @@ void StorageSystemScheduler::fillData(MutableColumns & res_columns, ContextPtr c
res_columns[i++]->insert(node->dequeued_requests.load());
res_columns[i++]->insert(node->canceled_requests.load());
res_columns[i++]->insert(node->dequeued_cost.load());
res_columns[i++]->insert(node->throughput.rate(static_cast<double>(clock_gettime_ns())/1e9));
res_columns[i++]->insert(node->canceled_cost.load());
res_columns[i++]->insert(node->busy_periods.load());

View File

@ -26,7 +26,7 @@ void TableFunctionFile::parseFirstArguments(const ASTPtr & arg, const ContextPtr
if (context->getApplicationType() != Context::ApplicationType::LOCAL)
{
ITableFunctionFileLike::parseFirstArguments(arg, context);
StorageFile::parseFileSource(std::move(filename), filename, path_to_archive);
StorageFile::parseFileSource(std::move(filename), filename, path_to_archive, context->getSettingsRef().allow_archive_path_syntax);
return;
}
@ -42,7 +42,8 @@ void TableFunctionFile::parseFirstArguments(const ASTPtr & arg, const ContextPtr
else if (filename == "stderr")
fd = STDERR_FILENO;
else
StorageFile::parseFileSource(std::move(filename), filename, path_to_archive);
StorageFile::parseFileSource(
std::move(filename), filename, path_to_archive, context->getSettingsRef().allow_archive_path_syntax);
}
else if (type == Field::Types::Int64 || type == Field::Types::UInt64)
{
@ -63,9 +64,12 @@ std::optional<String> TableFunctionFile::tryGetFormatFromFirstArgument()
return FormatFactory::instance().tryGetFormatFromFileName(filename);
}
StoragePtr TableFunctionFile::getStorage(const String & source,
const String & format_, const ColumnsDescription & columns,
ContextPtr global_context, const std::string & table_name,
StoragePtr TableFunctionFile::getStorage(
const String & source,
const String & format_,
const ColumnsDescription & columns,
ContextPtr global_context,
const std::string & table_name,
const std::string & compression_method_) const
{
// For `file` table function, we are going to use format settings from the

View File

@ -13,8 +13,8 @@ from ci_utils import WithIter, Shell
class MountPointApp(metaclass=WithIter):
RCLONE = "rclone"
S3FS = "s3fs"
GEESEFS = "geesefs"
class R2MountPoint:
@ -30,9 +30,6 @@ class R2MountPoint:
DEBUG = True
# enable cache for mountpoint
CACHE_ENABLED = False
# TODO: which mode is better: minimal/writes/full/off
_RCLONE_CACHE_MODE = "minimal"
UMASK = "0000"
def __init__(self, app: str, dry_run: bool) -> None:
assert app in MountPointApp
@ -52,20 +49,26 @@ class R2MountPoint:
if self.CACHE_ENABLED
else ""
)
if not dry_run:
self.aux_mount_options += (
"-o passwd_file /home/ubuntu/.passwd-s3fs_packages "
)
# without -o nomultipart there are errors like "Error 5 writing to /home/ubuntu/***.deb: Input/output error"
self.mount_cmd = f"s3fs {self.bucket_name} {self.MOUNT_POINT} -o url={self.API_ENDPOINT} -o use_path_request_style -o umask=0000 -o nomultipart -o logfile={self.LOG_FILE} {self.aux_mount_options}"
elif self.app == MountPointApp.RCLONE:
# run rclone mount process asynchronously, otherwise subprocess.run(daemonized command) will not return
self.cache_dir = "/home/ubuntu/rclone_cache"
self.aux_mount_options += "--no-modtime " if self.NOMODTIME else ""
self.aux_mount_options += "-v " if self.DEBUG else "" # -vv too verbose
elif self.app == MountPointApp.GEESEFS:
self.cache_dir = "/home/ubuntu/geesefs_cache"
self.aux_mount_options += (
f"--vfs-cache-mode {self._RCLONE_CACHE_MODE} --vfs-cache-max-size {self._CACHE_MAX_SIZE_GB}G"
if self.CACHE_ENABLED
else "--vfs-cache-mode off"
f" --cache={self.cache_dir} " if self.CACHE_ENABLED else ""
)
# Use --no-modtime to try to avoid: ERROR : rpm/lts/clickhouse-client-24.3.6.5.x86_64.rpm: Failed to apply pending mod time
self.mount_cmd = f"rclone mount remote:{self.bucket_name} {self.MOUNT_POINT} --daemon --cache-dir {self.cache_dir} --umask 0000 --log-file {self.LOG_FILE} {self.aux_mount_options}"
if not dry_run:
self.aux_mount_options += f" --shared-config=/home/ubuntu/.r2_auth "
else:
self.aux_mount_options += (
f" --shared-config=/home/ubuntu/.r2_auth_test "
)
if self.DEBUG:
self.aux_mount_options += " --debug_s3 "
self.mount_cmd = f"geesefs --endpoint={self.API_ENDPOINT} --cheap --memory-limit=1000 --gc-interval=100 --max-flushers=10 --max-parallel-parts=1 --max-parallel-copy=10 --log-file={self.LOG_FILE} {self.aux_mount_options} {self.bucket_name} {self.MOUNT_POINT}"
else:
assert False
@ -79,22 +82,17 @@ class R2MountPoint:
)
_TEST_MOUNT_CMD = f"mount | grep -q {self.MOUNT_POINT}"
Shell.run(_CLEAN_LOG_FILE_CMD)
Shell.run(_UNMOUNT_CMD)
Shell.run(_MKDIR_CMD)
Shell.run(_MKDIR_FOR_CACHE)
if self.app == MountPointApp.S3FS:
Shell.run(self.mount_cmd, check=True)
else:
# didn't manage to use simple run() and without blocking or failure
Shell.run_as_daemon(self.mount_cmd)
Shell.check(_CLEAN_LOG_FILE_CMD, verbose=True)
Shell.check(_UNMOUNT_CMD, verbose=True)
Shell.check(_MKDIR_CMD, verbose=True)
Shell.check(_MKDIR_FOR_CACHE, verbose=True)
Shell.check(self.mount_cmd, strict=True, verbose=True)
time.sleep(3)
Shell.run(_TEST_MOUNT_CMD, check=True)
Shell.check(_TEST_MOUNT_CMD, strict=True, verbose=True)
@classmethod
def teardown(cls):
print(f"Unmount [{cls.MOUNT_POINT}]")
Shell.run(f"umount {cls.MOUNT_POINT}")
Shell.check(f"umount {cls.MOUNT_POINT}", verbose=True)
class RepoCodenames(metaclass=WithIter):
@ -129,10 +127,9 @@ class DebianArtifactory:
]
REPREPRO_CMD_PREFIX = f"reprepro --basedir {R2MountPoint.MOUNT_POINT}/configs/deb --outdir {R2MountPoint.MOUNT_POINT}/deb --verbose"
cmd = f"{REPREPRO_CMD_PREFIX} includedeb {self.codename} {' '.join(paths)}"
print("Running export command:")
print(f" {cmd}")
Shell.run(cmd, check=True)
Shell.run("sync")
print("Running export commands:")
Shell.check(cmd, strict=True, verbose=True)
Shell.check("sync")
if self.codename == RepoCodenames.LTS:
packages_with_version = [
@ -144,18 +141,24 @@ class DebianArtifactory:
cmd = f"{REPREPRO_CMD_PREFIX} copy {RepoCodenames.STABLE} {RepoCodenames.LTS} {' '.join(packages_with_version)}"
print("Running copy command:")
print(f" {cmd}")
Shell.run(cmd, check=True)
Shell.run("sync")
Shell.check(cmd, strict=True)
Shell.check("sync")
def test_packages(self):
Shell.run("docker pull ubuntu:latest")
Shell.check("docker pull ubuntu:latest", strict=True)
print(f"Test packages installation, version [{self.version}]")
debian_command = f"echo 'deb {self.repo_url} stable main' | tee /etc/apt/sources.list.d/clickhouse.list; apt update -y; apt-get install -y clickhouse-common-static={self.version} clickhouse-client={self.version}"
cmd = f'docker run --rm ubuntu:latest bash -c "apt update -y; apt install -y sudo gnupg ca-certificates; apt-key adv --keyserver hkp://keyserver.ubuntu.com:80 --recv 8919F6BD2B48D754; {debian_command}"'
print("Running test command:")
print(f" {cmd}")
Shell.run(cmd, check=True)
self.release_info.debian_command = debian_command
assert Shell.check(cmd)
print(f"Test packages installation, version [latest]")
debian_command_2 = f"echo 'deb {self.repo_url} stable main' | tee /etc/apt/sources.list.d/clickhouse.list; apt update -y; apt-get install -y clickhouse-common-static clickhouse-client"
cmd = f'docker run --rm ubuntu:latest bash -c "apt update -y; apt install -y sudo gnupg ca-certificates; apt-key adv --keyserver hkp://keyserver.ubuntu.com:80 --recv 8919F6BD2B48D754; {debian_command_2}"'
print("Running test command:")
print(f" {cmd}")
assert Shell.check(cmd)
self.release_info.debian = debian_command
self.release_info.dump()
@ -204,34 +207,40 @@ class RpmArtifactory:
for package in paths:
_copy_if_not_exists(Path(package), dest_dir)
# switching between different fuse providers invalidates --update option (apparently some fuse(s) can mess around with mtime)
# add --skip-stat to skip mtime check
commands = (
f"createrepo_c --local-sqlite --workers=2 --update --verbose {dest_dir}",
f"createrepo_c --local-sqlite --workers=2 --update --skip-stat --verbose {dest_dir}",
f"gpg --sign-with {self._SIGN_KEY} --detach-sign --batch --yes --armor {dest_dir / 'repodata' / 'repomd.xml'}",
)
print(f"Exporting RPM packages into [{codename}]")
for command in commands:
print("Running command:")
print(f" {command}")
Shell.run(command, check=True)
Shell.check(command, strict=True, verbose=True)
update_public_key = f"gpg --armor --export {self._SIGN_KEY}"
pub_key_path = dest_dir / "repodata" / "repomd.xml.key"
print("Updating repomd.xml.key")
pub_key_path.write_text(Shell.run(update_public_key, check=True))
pub_key_path.write_text(Shell.get_output_or_raise(update_public_key))
if codename == RepoCodenames.LTS:
self.export_packages(RepoCodenames.STABLE)
Shell.run("sync")
Shell.check("sync")
def test_packages(self):
Shell.run("docker pull fedora:latest")
Shell.check("docker pull fedora:latest", strict=True)
print(f"Test package installation, version [{self.version}]")
rpm_command = f"dnf config-manager --add-repo={self.repo_url} && dnf makecache && dnf -y install clickhouse-client-{self.version}-1"
cmd = f'docker run --rm fedora:latest /bin/bash -c "dnf -y install dnf-plugins-core && dnf config-manager --add-repo={self.repo_url} && {rpm_command}"'
print("Running test command:")
print(f" {cmd}")
Shell.run(cmd, check=True)
self.release_info.rpm_command = rpm_command
assert Shell.check(cmd)
print(f"Test package installation, version [latest]")
rpm_command_2 = f"dnf config-manager --add-repo={self.repo_url} && dnf makecache && dnf -y install clickhouse-client"
cmd = f'docker run --rm fedora:latest /bin/bash -c "dnf -y install dnf-plugins-core && dnf config-manager --add-repo={self.repo_url} && {rpm_command_2}"'
print("Running test command:")
print(f" {cmd}")
assert Shell.check(cmd)
self.release_info.rpm = rpm_command
self.release_info.dump()
@ -271,27 +280,31 @@ class TgzArtifactory:
if codename == RepoCodenames.LTS:
self.export_packages(RepoCodenames.STABLE)
Shell.run("sync")
Shell.check("sync")
def test_packages(self):
tgz_file = "/tmp/tmp.tgz"
tgz_sha_file = "/tmp/tmp.tgz.sha512"
cmd = f"curl -o {tgz_file} -f0 {self.repo_url}/stable/clickhouse-client-{self.version}-arm64.tgz"
Shell.run(
Shell.check(
cmd,
check=True,
strict=True,
verbose=True,
)
Shell.run(
Shell.check(
f"curl -o {tgz_sha_file} -f0 {self.repo_url}/stable/clickhouse-client-{self.version}-arm64.tgz.sha512",
check=True,
strict=True,
verbose=True,
)
expected_checksum = Shell.get_output_or_raise(f"cut -d ' ' -f 1 {tgz_sha_file}")
actual_checksum = Shell.get_output_or_raise(
f"sha512sum {tgz_file} | cut -d ' ' -f 1"
)
expected_checksum = Shell.run(f"cut -d ' ' -f 1 {tgz_sha_file}", check=True)
actual_checksum = Shell.run(f"sha512sum {tgz_file} | cut -d ' ' -f 1")
assert (
expected_checksum == actual_checksum
), f"[{actual_checksum} != {expected_checksum}]"
Shell.run("rm /tmp/tmp.tgz*")
self.release_info.tgz_command = cmd
Shell.check("rm /tmp/tmp.tgz*", verbose=True)
self.release_info.tgz = cmd
self.release_info.dump()
@ -342,11 +355,11 @@ if __name__ == "__main__":
args = parse_args()
"""
Use S3FS. RCLONE has some errors with r2 remote which I didn't figure out how to resolve:
ERROR : IO error: NotImplemented: versionId not implemented
Failed to copy: NotImplemented: versionId not implemented
S3FS - very slow with a big repo
RCLONE - fuse had many different errors with r2 remote and completely removed
GEESEFS ?
"""
mp = R2MountPoint(MountPointApp.S3FS, dry_run=args.dry_run)
mp = R2MountPoint(MountPointApp.GEESEFS, dry_run=args.dry_run)
if args.export_debian:
with ReleaseContextManager(
release_progress=ReleaseProgress.EXPORT_DEB

View File

@ -85,7 +85,7 @@ class AutoReleaseInfo:
def _prepare(token):
assert len(token) > 10
os.environ["GH_TOKEN"] = token
Shell.run("gh auth status", check=True)
Shell.check("gh auth status")
gh = GitHub(token)
prs = gh.get_release_pulls(GITHUB_REPOSITORY)
@ -106,9 +106,8 @@ def _prepare(token):
latest_release_tag_ref = refs[-1]
latest_release_tag = repo.get_git_tag(latest_release_tag_ref.object.sha)
commits = Shell.run(
commits = Shell.get_output_or_raise(
f"git rev-list --first-parent {latest_release_tag.tag}..origin/{pr.head.ref}",
check=True,
).split("\n")
commit_num = len(commits)
print(
@ -128,15 +127,13 @@ def _prepare(token):
)
commit_num -= 1
is_completed = CI.GHActions.check_wf_completed(
token=token, commit_sha=commit
)
is_completed = CI.GH.check_wf_completed(token=token, commit_sha=commit)
if not is_completed:
print(f"CI is in progress for [{commit}] - check previous commit")
commits_to_branch_head += 1
continue
commit_ci_status = CI.GHActions.get_commit_status_by_name(
commit_ci_status = CI.GH.get_commit_status_by_name(
token=token,
commit_sha=commit,
status_name=(CI.JobNames.BUILD_CHECK, "ClickHouse build check"),

View File

@ -7,7 +7,7 @@ import re
from datetime import date, timedelta
from pathlib import Path
from subprocess import DEVNULL
from typing import Any, Dict, List, Optional, TextIO
from typing import Any, Dict, List, Optional, TextIO, Tuple
import tqdm # type: ignore
from github.GithubException import RateLimitExceededException, UnknownObjectException
@ -19,6 +19,8 @@ from env_helper import TEMP_PATH
from git_helper import git_runner, is_shallow
from github_helper import GitHub, PullRequest, PullRequests, Repository
from s3_helper import S3Helper
from get_robot_token import get_best_robot_token
from ci_utils import Shell
from version_helper import (
FILE_WITH_VERSION_PATH,
get_abs_path,
@ -171,6 +173,7 @@ def parse_args() -> argparse.Namespace:
parser.add_argument(
"--gh-user-or-token",
help="user name or GH token to authenticate",
default=get_best_robot_token(),
)
parser.add_argument(
"--gh-password",
@ -397,6 +400,21 @@ def get_year(prs: PullRequests) -> int:
return max(pr.created_at.year for pr in prs)
def get_branch_and_patch_by_tag(tag: str) -> Tuple[Optional[str], Optional[int]]:
tag = tag.removeprefix("v")
versions = tag.split(".")
if len(versions) < 4:
print("ERROR: Can't get branch by tag")
return None, None
try:
patch_version = int(versions[2])
branch = f"{int(versions[0])}.{int(versions[1])}"
print(f"Branch [{branch}], patch version [{patch_version}]")
except ValueError:
return None, None
return branch, patch_version
def main():
log_levels = [logging.WARN, logging.INFO, logging.DEBUG]
args = parse_args()
@ -446,6 +464,22 @@ def main():
gh_cache = GitHubCache(gh.cache_path, temp_path, S3Helper())
gh_cache.download()
query = f"type:pr repo:{args.repo} is:merged"
branch, patch = get_branch_and_patch_by_tag(TO_REF)
if branch and patch and Shell.check(f"git show-ref --quiet {branch}"):
if patch > 1:
query += f" base:{branch}"
print(
f"NOTE: It's a patch [{patch}]. will use base branch to filter PRs [{branch}]"
)
else:
print(
f"NOTE: It's a first patch version. should count PRs merged on master - won't filter PRs by branch"
)
else:
print(f"ERROR: invalid branch {branch} - pass")
print(f"Fetch PRs with query {query}")
prs = gh.get_pulls_from_search(
query=query, merged=merged, sort="created", progress_func=tqdm.tqdm
)

View File

@ -16,7 +16,7 @@ import upload_result_helper
from build_check import get_release_or_pr
from ci_config import CI
from ci_metadata import CiMetadata
from ci_utils import GHActions, normalize_string, Utils
from ci_utils import GH, normalize_string, Utils
from clickhouse_helper import (
CiLogsCredentials,
ClickHouseHelper,
@ -368,7 +368,7 @@ def _pre_action(s3, job_name, batch, indata, pr_info):
)
to_be_skipped = True
# skip_status = SUCCESS already there
GHActions.print_in_group("Commit Status Data", job_status)
GH.print_in_group("Commit Status Data", job_status)
# create pre report
jr = JobReport.create_pre_report(status=skip_status, job_skipped=to_be_skipped)
@ -1019,7 +1019,9 @@ def _get_ext_check_name(check_name: str) -> str:
return check_name_with_group
def _cancel_pr_wf(s3: S3Helper, pr_number: int, cancel_sync: bool = False) -> None:
def _cancel_pr_workflow(
s3: S3Helper, pr_number: int, cancel_sync: bool = False
) -> None:
wf_data = CiMetadata(s3, pr_number).fetch_meta()
if not cancel_sync:
if not wf_data.run_id:
@ -1368,12 +1370,12 @@ def main() -> int:
assert indata, "Run config must be provided via --infile"
_update_gh_statuses_action(indata=indata, s3=s3)
### CANCEL PREVIOUS WORKFLOW RUN
### CANCEL THE PREVIOUS WORKFLOW RUN
elif args.cancel_previous_run:
if pr_info.is_merge_queue:
_cancel_pr_wf(s3, pr_info.merged_pr)
_cancel_pr_workflow(s3, pr_info.merged_pr)
elif pr_info.is_pr:
_cancel_pr_wf(s3, pr_info.number, cancel_sync=True)
_cancel_pr_workflow(s3, pr_info.number, cancel_sync=True)
else:
assert False, "BUG! Not supported scenario"

View File

@ -8,7 +8,7 @@ import requests
from botocore.exceptions import ClientError
from pr_info import PRInfo
from ci_utils import Shell, GHActions
from ci_config import CI
class CIBuddy:
@ -31,10 +31,19 @@ class CIBuddy:
self.sha = pr_info.sha[:10]
def check_workflow(self):
GHActions.print_workflow_results()
res = GHActions.get_workflow_job_result(GHActions.ActionsNames.RunConfig)
if res != GHActions.ActionStatuses.SUCCESS:
self.post_job_error("Workflow Configuration Failed", critical=True)
CI.GH.print_workflow_results()
if CI.Envs.GITHUB_WORKFLOW == CI.WorkFlowNames.CreateRelease:
if not CI.GH.is_workflow_ok():
self.post_job_error(
f"{CI.Envs.GITHUB_WORKFLOW} Workflow Failed", critical=True
)
else:
res = CI.GH.get_workflow_job_result(CI.GH.ActionsNames.RunConfig)
if res != CI.GH.ActionStatuses.SUCCESS:
print(f"ERROR: RunConfig status is [{res}] - post report to slack")
self.post_job_error(
f"{CI.Envs.GITHUB_WORKFLOW} Workflow Failed", critical=True
)
@staticmethod
def _get_webhooks():
@ -74,10 +83,13 @@ class CIBuddy:
message = title
if isinstance(body, dict):
for name, value in body.items():
if "commit_sha" in name:
if "sha" in name and value and len(value) == 40:
value = (
f"<https://github.com/{self.repo}/commit/{value}|{value[:8]}>"
)
elif isinstance(value, str) and value.startswith("https://github.com/"):
value_shorten = value.split("/")[-1]
value = f"<{value}|{value_shorten}>"
message += f" *{name}*: {value}\n"
else:
message += body + "\n"
@ -120,8 +132,12 @@ class CIBuddy:
) -> None:
instance_id, instance_type = "unknown", "unknown"
if with_instance_info:
instance_id = Shell.run("ec2metadata --instance-id") or instance_id
instance_type = Shell.run("ec2metadata --instance-type") or instance_type
instance_id = (
CI.Shell.get_output("ec2metadata --instance-id") or instance_id
)
instance_type = (
CI.Shell.get_output("ec2metadata --instance-type") or instance_type
)
if not job_name:
job_name = os.getenv("CHECK_NAME", "unknown")
sign = ":red_circle:" if not critical else ":black_circle:"

View File

@ -7,7 +7,7 @@ from typing import Dict, Optional, Any, Union, Sequence, List, Set
from ci_config import CI
from ci_utils import is_hex, GHActions
from ci_utils import is_hex, GH
from commit_status_helper import CommitStatusData
from env_helper import (
TEMP_PATH,
@ -258,15 +258,15 @@ class CiCache:
def print_status(self):
print(f"Cache enabled: [{self.enabled}]")
for record_type in self.RecordType:
GHActions.print_in_group(
GH.print_in_group(
f"Cache records: [{record_type}]", list(self.records[record_type])
)
GHActions.print_in_group(
GH.print_in_group(
"Jobs to do:",
list(self.jobs_to_do.items()),
)
GHActions.print_in_group("Jobs to skip:", self.jobs_to_skip)
GHActions.print_in_group(
GH.print_in_group("Jobs to skip:", self.jobs_to_skip)
GH.print_in_group(
"Jobs to wait:",
list(self.jobs_to_wait.items()),
)
@ -788,7 +788,7 @@ class CiCache:
while round_cnt < MAX_ROUNDS_TO_WAIT:
round_cnt += 1
GHActions.print_in_group(
GH.print_in_group(
f"Wait pending jobs, round [{round_cnt}/{MAX_ROUNDS_TO_WAIT}]:",
list(self.jobs_to_wait),
)
@ -853,7 +853,7 @@ class CiCache:
# make up for 2 iterations in dry_run
expired_sec += int(TIMEOUT / 2) + 1
GHActions.print_in_group(
GH.print_in_group(
"Remaining jobs:",
[list(self.jobs_to_wait)],
)

View File

@ -34,7 +34,8 @@ class CI:
from ci_definitions import Runners as Runners
from ci_utils import Envs as Envs
from ci_utils import Utils as Utils
from ci_utils import GHActions as GHActions
from ci_utils import GH as GH
from ci_utils import Shell as Shell
from ci_definitions import Labels as Labels
from ci_definitions import TRUSTED_CONTRIBUTORS as TRUSTED_CONTRIBUTORS
from ci_definitions import WorkFlowNames as WorkFlowNames

View File

@ -112,6 +112,7 @@ class WorkFlowNames(metaclass=WithIter):
"""
JEPSEN = "JepsenWorkflow"
CreateRelease = "CreateRelease"
class BuildNames(metaclass=WithIter):
@ -554,7 +555,7 @@ class CommonJobConfigs:
run_command="sqllogic_test.py",
timeout=10800,
release_only=True,
runner_type=Runners.STYLE_CHECKER,
runner_type=Runners.FUNC_TESTER,
)
SQL_TEST = JobConfig(
job_name_keyword="sqltest",
@ -578,10 +579,11 @@ class CommonJobConfigs:
DOCKER_SERVER = JobConfig(
job_name_keyword="docker",
required_on_release_branch=True,
run_command='docker_server.py --check-name "$CHECK_NAME" --release-type head --allow-build-reuse',
run_command='docker_server.py --check-name "$CHECK_NAME" --tag-type head --allow-build-reuse',
digest=DigestConfig(
include_paths=[
"tests/ci/docker_server.py",
"tests/ci/docker_images_helper.py",
"./docker/server",
]
),

View File

@ -9,7 +9,7 @@ from env_helper import (
S3_BUILDS_BUCKET_PUBLIC,
)
from s3_helper import S3Helper
from ci_utils import GHActions
from ci_utils import GH
from synchronizer_utils import SYNC_BRANCH_PREFIX
@ -111,7 +111,7 @@ class CiMetadata:
else:
log_title = f"Storing workflow metadata: PR [{self.pr_number}], upstream PR [{self.upstream_pr_number}]"
GHActions.print_in_group(
GH.print_in_group(
log_title,
[f"run_id: {self.run_id}"],
)

View File

@ -2,6 +2,7 @@ import json
import os
import re
import subprocess
import sys
import time
from contextlib import contextmanager
from pathlib import Path
@ -15,6 +16,8 @@ class Envs:
WORKFLOW_RESULT_FILE = os.getenv(
"WORKFLOW_RESULT_FILE", "/tmp/workflow_results.json"
)
S3_BUILDS_BUCKET = os.getenv("S3_BUILDS_BUCKET", "clickhouse-builds")
GITHUB_WORKFLOW = os.getenv("GITHUB_WORKFLOW", "")
LABEL_CATEGORIES = {
@ -82,7 +85,7 @@ def normalize_string(string: str) -> str:
return res
class GHActions:
class GH:
class ActionsNames:
RunConfig = "RunConfig"
@ -116,6 +119,14 @@ class GHActions:
results = [f"{job}: {data['result']}" for job, data in res.items()]
cls.print_in_group("Workflow results", results)
@classmethod
def is_workflow_ok(cls) -> bool:
res = cls._get_workflow_results()
for _job, data in res.items():
if data["result"] == "failure":
return False
return bool(res)
@classmethod
def get_workflow_job_result(cls, wf_job_name: str) -> Optional[str]:
res = cls._get_workflow_results()
@ -188,73 +199,79 @@ class GHActions:
return False
@staticmethod
def get_pr_url_by_branch(repo, branch):
get_url_cmd = (
f"gh pr list --repo {repo} --head {branch} --json url --jq '.[0].url'"
)
url = Shell.run(get_url_cmd)
def get_pr_url_by_branch(branch, repo=None):
repo = repo or Envs.GITHUB_REPOSITORY
get_url_cmd = f"gh pr list --repo {repo} --head {branch} --json url --jq '.[0].url' --state open"
url = Shell.get_output(get_url_cmd)
if not url:
print(f"WARNING: No open PR found, branch [{branch}] - search for merged")
get_url_cmd = f"gh pr list --repo {repo} --head {branch} --json url --jq '.[0].url' --state merged"
url = Shell.get_output(get_url_cmd)
if not url:
print(f"ERROR: PR nor found, branch [{branch}]")
return url
@staticmethod
def is_latest_release_branch(branch):
latest_branch = Shell.get_output(
'gh pr list --label release --repo ClickHouse/ClickHouse --search "sort:created" -L1 --json headRefName'
)
return latest_branch == branch
class Shell:
@classmethod
def run_strict(cls, command):
def get_output_or_raise(cls, command):
return cls.get_output(command, strict=True)
@classmethod
def get_output(cls, command, strict=False):
res = subprocess.run(
command,
shell=True,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
text=True,
check=True,
check=strict,
)
return res.stdout.strip()
@classmethod
def run(cls, command, check=False, dry_run=False, **kwargs):
def check(
cls,
command,
strict=False,
verbose=False,
dry_run=False,
stdin_str=None,
**kwargs,
):
if dry_run:
print(f"Dry-ryn. Would run command [{command}]")
return ""
print(f"Run command [{command}]")
res = ""
result = subprocess.run(
return True
if verbose:
print(f"Run command [{command}]")
proc = subprocess.Popen(
command,
shell=True,
stderr=subprocess.STDOUT,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
text=True,
check=False,
stdin=subprocess.PIPE if stdin_str else None,
universal_newlines=True,
start_new_session=True,
bufsize=1,
errors="backslashreplace",
**kwargs,
)
if result.returncode == 0:
print(f"stdout: {result.stdout.strip()}")
res = result.stdout
else:
print(
f"ERROR: stdout: {result.stdout.strip()}, stderr: {result.stderr.strip()}"
)
if check:
assert result.returncode == 0
return res.strip()
@classmethod
def run_as_daemon(cls, command):
print(f"Run daemon command [{command}]")
subprocess.Popen(command.split(" ")) # pylint:disable=consider-using-with
return 0, ""
@classmethod
def check(cls, command):
result = subprocess.run(
command,
shell=True,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
text=True,
check=False,
)
return result.returncode == 0
if stdin_str:
proc.communicate(input=stdin_str)
elif proc.stdout:
for line in proc.stdout:
sys.stdout.write(line)
proc.wait()
if strict:
assert proc.returncode == 0
return proc.returncode == 0
class Utils:
@ -278,7 +295,7 @@ class Utils:
@staticmethod
def clear_dmesg():
Shell.run("sudo dmesg --clear ||:")
Shell.check("sudo dmesg --clear", verbose=True)
@staticmethod
def check_pr_description(pr_body: str, repo_name: str) -> Tuple[str, str]:

View File

@ -3,6 +3,7 @@ import fileinput
import json
import logging
import time
import os
from pathlib import Path
from typing import Any, Dict, List, Optional
@ -298,6 +299,11 @@ class CiLogsCredentials:
def get_docker_arguments(
self, pr_info: PRInfo, check_start_time: str, check_name: str
) -> str:
run_by_hash_total = int(os.getenv("RUN_BY_HASH_TOTAL", "0"))
if run_by_hash_total > 1:
run_by_hash_num = int(os.getenv("RUN_BY_HASH_NUM", "0"))
check_name = f"{check_name} [{run_by_hash_num + 1}/{run_by_hash_total}]"
self.create_ci_logs_credentials()
if not self.config_path.exists():
logging.info("Do not use external logs pushing")

View File

@ -301,7 +301,7 @@ def get_worst_state(statuses: CommitStatuses) -> StatusType:
def create_ci_report(pr_info: PRInfo, statuses: CommitStatuses) -> str:
"""The function converst the statuses to TestResults and uploads the report
"""The function converts the statuses to TestResults and uploads the report
to S3 tests bucket. Then it returns the URL"""
test_results = [] # type: TestResults
for status in statuses:

View File

@ -10,9 +10,8 @@ from typing import Iterator, List
from git_helper import Git, GIT_PREFIX
from ssh import SSHAgent
from env_helper import GITHUB_REPOSITORY, S3_BUILDS_BUCKET
from s3_helper import S3Helper
from ci_utils import Shell, GHActions
from ci_utils import Shell, GH
from ci_buddy import CIBuddy
from version_helper import (
FILE_WITH_VERSION_PATH,
@ -43,6 +42,7 @@ class ReleaseProgress:
TEST_TGZ = "test TGZ packages"
TEST_RPM = "test RPM packages"
TEST_DEB = "test DEB packages"
MERGE_CREATED_PRS = "merge created PRs"
COMPLETED = "completed"
@ -68,13 +68,14 @@ class ReleaseContextManager:
previous_release_tag="NA",
previous_release_sha="NA",
release_progress=ReleaseProgress.STARTED,
latest=False,
).dump()
else:
# fetch release info from fs and update
self.release_info = ReleaseInfo.from_file()
assert self.release_info
assert (
self.release_info.progress_description == ReleaseProgressDescription.OK
self.release_info.progress_status == ReleaseProgressDescription.OK
), "Must be OK on the start of new context"
self.release_info.release_progress = self.release_progress
self.release_info.dump()
@ -83,9 +84,9 @@ class ReleaseContextManager:
def __exit__(self, exc_type, exc_value, traceback):
assert self.release_info
if exc_type is not None:
self.release_info.progress_description = ReleaseProgressDescription.FAILED
self.release_info.progress_status = ReleaseProgressDescription.FAILED
else:
self.release_info.progress_description = ReleaseProgressDescription.OK
self.release_info.progress_status = ReleaseProgressDescription.OK
self.release_info.dump()
@ -95,19 +96,21 @@ class ReleaseInfo:
release_tag: str
release_branch: str
commit_sha: str
latest: bool
# lts or stable
codename: str
previous_release_tag: str
previous_release_sha: str
changelog_pr: str = ""
version_bump_pr: str = ""
prs_merged: bool = False
release_url: str = ""
debian_command: str = ""
rpm_command: str = ""
tgz_command: str = ""
docker_command: str = ""
debian: str = ""
rpm: str = ""
tgz: str = ""
docker: str = ""
release_progress: str = ""
progress_description: str = ""
progress_status: str = ""
def is_patch(self):
return self.release_branch != "master"
@ -127,22 +130,26 @@ class ReleaseInfo:
print(json.dumps(dataclasses.asdict(self), indent=2), file=f)
return self
def prepare(self, commit_ref: str, release_type: str) -> "ReleaseInfo":
def prepare(
self, commit_ref: str, release_type: str, skip_tag_check: bool
) -> "ReleaseInfo":
version = None
release_branch = None
release_tag = None
previous_release_tag = None
previous_release_sha = None
latest_release = False
codename = ""
assert release_type in ("patch", "new")
if release_type == "new":
# check commit_ref is right and on a right branch
Shell.run(
Shell.check(
f"git merge-base --is-ancestor {commit_ref} origin/master",
check=True,
strict=True,
verbose=True,
)
with checkout(commit_ref):
commit_sha = Shell.run(f"git rev-parse {commit_ref}", check=True)
commit_sha = Shell.get_output_or_raise(f"git rev-list -n1 {commit_ref}")
# Git() must be inside "with checkout" contextmanager
git = Git()
version = get_version_from_repo(git=git)
@ -154,13 +161,13 @@ class ReleaseInfo:
), f"BUG: latest tag [{git.latest_tag}], expected [{expected_prev_tag}]"
release_tag = version.describe
previous_release_tag = expected_prev_tag
previous_release_sha = Shell.run_strict(
f"git rev-parse {previous_release_tag}"
previous_release_sha = Shell.get_output_or_raise(
f"git rev-list -n1 {previous_release_tag}"
)
assert previous_release_sha
if release_type == "patch":
with checkout(commit_ref):
commit_sha = Shell.run(f"git rev-parse {commit_ref}", check=True)
commit_sha = Shell.get_output_or_raise(f"git rev-list -n1 {commit_ref}")
# Git() must be inside "with checkout" contextmanager
git = Git()
version = get_version_from_repo(git=git)
@ -168,11 +175,16 @@ class ReleaseInfo:
version.with_description(codename)
release_branch = f"{version.major}.{version.minor}"
release_tag = version.describe
Shell.run(f"{GIT_PREFIX} fetch origin {release_branch} --tags", check=True)
Shell.check(
f"{GIT_PREFIX} fetch origin {release_branch} --tags",
strict=True,
verbose=True,
)
# check commit is right and on a right branch
Shell.run(
Shell.check(
f"git merge-base --is-ancestor {commit_ref} origin/{release_branch}",
check=True,
strict=True,
verbose=True,
)
if version.patch == 1:
expected_version = copy(version)
@ -192,16 +204,20 @@ class ReleaseInfo:
expected_tag_prefix
) and git.latest_tag.endswith(expected_tag_suffix):
pass
else:
elif not skip_tag_check:
assert (
False
), f"BUG: Unexpected latest tag [{git.latest_tag}] expected [{expected_tag_prefix}*{expected_tag_suffix}]"
), f"BUG: Unexpected latest tag [{git.latest_tag}] expected [{expected_tag_prefix}*{expected_tag_suffix}]. Already Released?"
previous_release_sha = Shell.run_strict(
f"git rev-parse {previous_release_tag}"
previous_release_sha = Shell.get_output_or_raise(
f"git rev-list -n1 {previous_release_tag}"
)
assert previous_release_sha
if CI.GH.is_latest_release_branch(release_branch):
print("This is going to be the latest release!")
latest_release = True
assert (
release_branch
and previous_release_tag
@ -210,7 +226,7 @@ class ReleaseInfo:
and release_tag
and version
and (codename in ("lts", "stable") or release_type == "new")
)
), f"Check: {release_branch}, {previous_release_tag}, {previous_release_sha}, {commit_sha}, {release_tag}, {version}"
self.release_branch = release_branch
self.commit_sha = commit_sha
@ -220,31 +236,33 @@ class ReleaseInfo:
self.previous_release_tag = previous_release_tag
self.previous_release_sha = previous_release_sha
self.release_progress = ReleaseProgress.STARTED
self.progress_description = ReleaseProgressDescription.OK
self.progress_status = ReleaseProgressDescription.OK
self.latest = latest_release
return self
def push_release_tag(self, dry_run: bool) -> None:
if dry_run:
# remove locally created tag from prev run
Shell.run(
f"{GIT_PREFIX} tag -l | grep -q {self.release_tag} && git tag -d {self.release_tag} ||:"
Shell.check(
f"{GIT_PREFIX} tag -l | grep -q {self.release_tag} && git tag -d {self.release_tag}"
)
# Create release tag
print(
f"Create and push release tag [{self.release_tag}], commit [{self.commit_sha}]"
)
tag_message = f"Release {self.release_tag}"
Shell.run(
Shell.check(
f"{GIT_PREFIX} tag -a -m '{tag_message}' {self.release_tag} {self.commit_sha}",
check=True,
strict=True,
verbose=True,
)
cmd_push_tag = f"{GIT_PREFIX} push origin {self.release_tag}:{self.release_tag}"
Shell.run(cmd_push_tag, dry_run=dry_run, check=True)
Shell.check(cmd_push_tag, dry_run=dry_run, strict=True, verbose=True)
@staticmethod
def _create_gh_label(label: str, color_hex: str, dry_run: bool) -> None:
cmd = f"gh api repos/{GITHUB_REPOSITORY}/labels -f name={label} -f color={color_hex}"
Shell.run(cmd, dry_run=dry_run, check=True)
cmd = f"gh api repos/{CI.Envs.GITHUB_REPOSITORY}/labels -f name={label} -f color={color_hex}"
Shell.check(cmd, dry_run=dry_run, strict=True)
def push_new_release_branch(self, dry_run: bool) -> None:
assert (
@ -261,7 +279,7 @@ class ReleaseInfo:
), f"Unexpected current version in git, must precede [{self.version}] by one step, actual [{version.string}]"
if dry_run:
# remove locally created branch from prev run
Shell.run(
Shell.check(
f"{GIT_PREFIX} branch -l | grep -q {new_release_branch} && git branch -d {new_release_branch}"
)
print(
@ -275,7 +293,7 @@ class ReleaseInfo:
cmd_push_branch = (
f"{GIT_PREFIX} push --set-upstream origin {new_release_branch}"
)
Shell.run(cmd_push_branch, dry_run=dry_run, check=True)
Shell.check(cmd_push_branch, dry_run=dry_run, strict=True, verbose=True)
print("Create and push backport tags for new release branch")
ReleaseInfo._create_gh_label(
@ -284,18 +302,22 @@ class ReleaseInfo:
ReleaseInfo._create_gh_label(
f"v{new_release_branch}-affected", "c2bfff", dry_run=dry_run
)
Shell.run(
f"""gh pr create --repo {GITHUB_REPOSITORY} --title 'Release pull request for branch {new_release_branch}'
Shell.check(
f"""gh pr create --repo {CI.Envs.GITHUB_REPOSITORY} --title 'Release pull request for branch {new_release_branch}'
--head {new_release_branch} {pr_labels}
--body 'This PullRequest is a part of ClickHouse release cycle. It is used by CI system only. Do not perform any changes with it.'
""",
dry_run=dry_run,
check=True,
strict=True,
verbose=True,
)
def get_version_bump_branch(self):
return f"bump_version_{self.version}"
def update_version_and_contributors_list(self, dry_run: bool) -> None:
# Bump version, update contributors list, create PR
branch_upd_version_contributors = f"bump_version_{self.version}"
branch_upd_version_contributors = self.get_version_bump_branch()
with checkout(self.commit_sha):
git = Git()
version = get_version_from_repo(git=git)
@ -313,43 +335,61 @@ class ReleaseInfo:
update_contributors(raise_error=True)
cmd_commit_version_upd = f"{GIT_PREFIX} commit '{CMAKE_PATH}' '{CONTRIBUTORS_PATH}' -m 'Update autogenerated version to {self.version} and contributors'"
cmd_push_branch = f"{GIT_PREFIX} push --set-upstream origin {branch_upd_version_contributors}"
body_file = get_abs_path(".github/PULL_REQUEST_TEMPLATE.md")
actor = os.getenv("GITHUB_ACTOR", "") or "me"
cmd_create_pr = f"gh pr create --repo {GITHUB_REPOSITORY} --title 'Update version after release' --head {branch_upd_version_contributors} --base {self.release_branch} --body-file '{body_file} --label 'do not test' --assignee @{actor}"
Shell.run(cmd_commit_version_upd, check=True, dry_run=dry_run)
Shell.run(cmd_push_branch, check=True, dry_run=dry_run)
Shell.run(cmd_create_pr, check=True, dry_run=dry_run)
body = f"Automatic version bump after release {self.release_tag}\n### Changelog category (leave one):\n- Not for changelog (changelog entry is not required)\n"
cmd_create_pr = f"gh pr create --repo {CI.Envs.GITHUB_REPOSITORY} --title 'Update version after release' --head {branch_upd_version_contributors} --base {self.release_branch} --body \"{body}\" --assignee {actor}"
Shell.check(
cmd_commit_version_upd, strict=True, dry_run=dry_run, verbose=True
)
Shell.check(cmd_push_branch, strict=True, dry_run=dry_run, verbose=True)
Shell.check(cmd_create_pr, strict=True, dry_run=dry_run, verbose=True)
if dry_run:
Shell.run(f"{GIT_PREFIX} diff '{CMAKE_PATH}' '{CONTRIBUTORS_PATH}'")
Shell.run(
f"{GIT_PREFIX} checkout '{CMAKE_PATH}' '{CONTRIBUTORS_PATH}'"
Shell.check(
f"{GIT_PREFIX} diff '{CMAKE_PATH}' '{CONTRIBUTORS_PATH}'",
verbose=True,
)
Shell.check(
f"{GIT_PREFIX} checkout '{CMAKE_PATH}' '{CONTRIBUTORS_PATH}'",
verbose=True,
)
self.version_bump_pr = "dry-run"
else:
self.version_bump_pr = GHActions.get_pr_url_by_branch(
repo=GITHUB_REPOSITORY, branch=branch_upd_version_contributors
self.version_bump_pr = GH.get_pr_url_by_branch(
branch=branch_upd_version_contributors
)
def get_change_log_branch(self):
return f"auto/{self.release_tag}"
def update_release_info(self, dry_run: bool) -> "ReleaseInfo":
if self.release_branch != "master":
branch = f"auto/{release_info.release_tag}"
if not dry_run:
url = GHActions.get_pr_url_by_branch(
repo=GITHUB_REPOSITORY, branch=branch
)
else:
url = "dry-run"
print(f"ChangeLog PR url [{url}]")
self.changelog_pr = url
print(f"Release url [{url}]")
self.release_url = f"https://github.com/{GITHUB_REPOSITORY}/releases/tag/{self.release_tag}"
if self.release_progress == ReleaseProgress.COMPLETED:
self.docker_command = f"docker run --rm clickhouse/clickhouse:{self.version} clickhouse --version"
if not self.changelog_pr:
branch = self.get_change_log_branch()
if not dry_run:
url = GH.get_pr_url_by_branch(branch=branch)
else:
url = "dry-run"
print(f"ChangeLog PR url [{url}]")
self.changelog_pr = url
if not self.version_bump_pr:
branch = self.get_version_bump_branch()
if not dry_run:
url = GH.get_pr_url_by_branch(branch=branch)
else:
url = "dry-run"
print(f"Version bump PR url [{url}]")
self.version_bump_pr = url
self.release_url = f"https://github.com/{CI.Envs.GITHUB_REPOSITORY}/releases/tag/{self.release_tag}"
print(f"Release url [{self.release_url}]")
self.docker = f"docker run --rm clickhouse/clickhouse:{self.version} clickhouse --version"
self.dump()
return self
def create_gh_release(self, packages_files: List[str], dry_run: bool) -> None:
repo = os.getenv("GITHUB_REPOSITORY")
repo = CI.Envs.GITHUB_REPOSITORY
assert repo
cmds = [
f"gh release create --repo {repo} --title 'Release {self.release_tag}' {self.release_tag}"
@ -358,14 +398,48 @@ class ReleaseInfo:
cmds.append(f"gh release upload {self.release_tag} {file}")
if not dry_run:
for cmd in cmds:
Shell.run(cmd, check=True)
self.release_url = f"https://github.com/{GITHUB_REPOSITORY}/releases/tag/{self.release_tag}"
Shell.check(cmd, strict=True, verbose=True)
self.release_url = (
f"https://github.com/{repo}/releases/tag/{self.release_tag}"
)
else:
print("Dry-run, would run commands:")
print("\n * ".join(cmds))
self.release_url = f"dry-run"
self.dump()
def merge_prs(self, dry_run: bool) -> None:
repo = CI.Envs.GITHUB_REPOSITORY
assert self.version_bump_pr
if dry_run:
version_bump_pr_num = 12345
else:
version_bump_pr_num = int(self.version_bump_pr.split("/")[-1])
print("Merging Version bump PR")
res_1 = Shell.check(
f"gh pr merge {version_bump_pr_num} --repo {repo} --merge --auto",
verbose=True,
dry_run=dry_run,
)
res_2 = True
if not self.release_tag.endswith("-new"):
assert self.changelog_pr
print("Merging ChangeLog PR")
if dry_run:
changelog_pr_num = 23456
else:
changelog_pr_num = int(self.changelog_pr.split("/")[-1])
res_2 = Shell.check(
f"gh pr merge {changelog_pr_num} --repo {repo} --merge --auto",
verbose=True,
dry_run=dry_run,
)
else:
assert not self.changelog_pr
self.prs_merged = res_1 and res_2
class RepoTypes:
RPM = "rpm"
@ -424,7 +498,7 @@ class PackageDownloader:
self.macos_package_files = ["clickhouse-macos", "clickhouse-macos-aarch64"]
self.file_to_type = {}
Shell.run(f"mkdir -p {self.LOCAL_DIR}")
Shell.check(f"mkdir -p {self.LOCAL_DIR}")
for package_type in self.PACKAGE_TYPES:
for package in self.package_names:
@ -474,7 +548,7 @@ class PackageDownloader:
return res
def run(self):
Shell.run(f"rm -rf {self.LOCAL_DIR}/*")
Shell.check(f"rm -rf {self.LOCAL_DIR}/*")
for package_file in (
self.deb_package_files + self.rpm_package_files + self.tgz_package_files
):
@ -488,7 +562,7 @@ class PackageDownloader:
]
)
self.s3.download_file(
bucket=S3_BUILDS_BUCKET,
bucket=CI.Envs.S3_BUILDS_BUCKET,
s3_path=s3_path,
local_file_path="/".join([self.LOCAL_DIR, package_file]),
)
@ -509,7 +583,7 @@ class PackageDownloader:
]
)
self.s3.download_file(
bucket=S3_BUILDS_BUCKET,
bucket=CI.Envs.S3_BUILDS_BUCKET,
s3_path=s3_path,
local_file_path="/".join([self.LOCAL_DIR, destination_binary_name]),
)
@ -549,33 +623,33 @@ class PackageDownloader:
@contextmanager
def checkout(ref: str) -> Iterator[None]:
orig_ref = Shell.run(f"{GIT_PREFIX} symbolic-ref --short HEAD", check=True)
orig_ref = Shell.get_output_or_raise(f"{GIT_PREFIX} symbolic-ref --short HEAD")
rollback_cmd = f"{GIT_PREFIX} checkout {orig_ref}"
assert orig_ref
if ref not in (orig_ref,):
Shell.run(f"{GIT_PREFIX} checkout {ref}")
Shell.check(f"{GIT_PREFIX} checkout {ref}", strict=True, verbose=True)
try:
yield
except (Exception, KeyboardInterrupt) as e:
print(f"ERROR: Exception [{e}]")
Shell.run(rollback_cmd)
Shell.check(rollback_cmd, verbose=True)
raise
Shell.run(rollback_cmd)
Shell.check(rollback_cmd, verbose=True)
@contextmanager
def checkout_new(ref: str) -> Iterator[None]:
orig_ref = Shell.run(f"{GIT_PREFIX} symbolic-ref --short HEAD", check=True)
orig_ref = Shell.get_output_or_raise(f"{GIT_PREFIX} symbolic-ref --short HEAD")
rollback_cmd = f"{GIT_PREFIX} checkout {orig_ref}"
assert orig_ref
Shell.run(f"{GIT_PREFIX} checkout -b {ref}", check=True)
Shell.check(f"{GIT_PREFIX} checkout -b {ref}", strict=True, verbose=True)
try:
yield
except (Exception, KeyboardInterrupt) as e:
print(f"ERROR: Exception [{e}]")
Shell.run(rollback_cmd)
Shell.check(rollback_cmd, verbose=True)
raise
Shell.run(rollback_cmd)
Shell.check(rollback_cmd, verbose=True)
def parse_args() -> argparse.Namespace:
@ -588,6 +662,11 @@ def parse_args() -> argparse.Namespace:
action="store_true",
help="Initial step to prepare info like release branch, release tag, etc.",
)
parser.add_argument(
"--skip-tag-check",
action="store_true",
help="To skip check against latest git tag on a release branch",
)
parser.add_argument(
"--push-release-tag",
action="store_true",
@ -613,6 +692,11 @@ def parse_args() -> argparse.Namespace:
action="store_true",
help="Create GH Release object and attach all packages",
)
parser.add_argument(
"--merge-prs",
action="store_true",
help="Merge PRs with version, changelog updates",
)
parser.add_argument(
"--post-status",
action="store_true",
@ -672,7 +756,11 @@ if __name__ == "__main__":
assert (
args.ref and args.release_type
), "--ref and --release-type must be provided with --prepare-release-info"
release_info.prepare(commit_ref=args.ref, release_type=args.release_type)
release_info.prepare(
commit_ref=args.ref,
release_type=args.release_type,
skip_tag_check=args.skip_tag_check,
)
if args.download_packages:
with ReleaseContextManager(
@ -718,13 +806,12 @@ if __name__ == "__main__":
if args.post_status:
release_info = ReleaseInfo.from_file()
release_info.update_release_info(dry_run=args.dry_run)
if release_info.is_new_release_branch():
title = "New release branch"
else:
title = "New release"
if (
release_info.progress_description == ReleaseProgressDescription.OK
release_info.progress_status == ReleaseProgressDescription.OK
and release_info.release_progress == ReleaseProgress.COMPLETED
):
title = "Completed: " + title
@ -740,18 +827,25 @@ if __name__ == "__main__":
if args.set_progress_started:
ri = ReleaseInfo.from_file()
ri.release_progress = args.progress
ri.progress_description = ReleaseProgressDescription.FAILED
ri.progress_status = ReleaseProgressDescription.FAILED
ri.dump()
assert args.progress, "Progress step name must be provided"
if args.set_progress_completed:
ri = ReleaseInfo.from_file()
assert (
ri.progress_description == ReleaseProgressDescription.FAILED
ri.progress_status == ReleaseProgressDescription.FAILED
), "Must be FAILED before set to OK"
ri.progress_description = ReleaseProgressDescription.OK
ri.progress_status = ReleaseProgressDescription.OK
ri.dump()
if args.merge_prs:
with ReleaseContextManager(
release_progress=ReleaseProgress.MERGE_CREATED_PRS
) as release_info:
release_info.update_release_info(dry_run=args.dry_run)
release_info.merge_prs(dry_run=args.dry_run)
# tear down ssh
if _ssh_agent and _key_pub:
_ssh_agent.remove(_key_pub)

View File

@ -19,11 +19,11 @@ def docker_login(relogin: bool = True) -> None:
if relogin or not Shell.check(
"docker system info | grep --quiet -E 'Username|Registry'"
):
Shell.run( # pylint: disable=unexpected-keyword-arg
Shell.check( # pylint: disable=unexpected-keyword-arg
"docker login --username 'robotclickhouse' --password-stdin",
input=get_parameter_from_ssm("dockerhub_robot_password"),
strict=True,
stdin_str=get_parameter_from_ssm("dockerhub_robot_password"),
encoding="utf-8",
check=True,
)
@ -42,7 +42,7 @@ class DockerImage:
def pull_image(image: DockerImage) -> DockerImage:
try:
logging.info("Pulling image %s - start", image)
Shell.run(f"docker pull {image}", check=True)
Shell.check(f"docker pull {image}", strict=True)
logging.info("Pulling image %s - done", image)
except Exception as ex:
logging.info("Got exception pulling docker %s", ex)

View File

@ -27,7 +27,6 @@ from stopwatch import Stopwatch
from tee_popen import TeePopen
from version_helper import (
ClickHouseVersion,
get_tagged_versions,
get_version_from_repo,
version_arg,
)
@ -69,13 +68,14 @@ def parse_args() -> argparse.Namespace:
help="sha of the commit to use packages from",
)
parser.add_argument(
"--release-type",
"--tag-type",
type=str,
choices=("auto", "latest", "major", "minor", "patch", "head"),
choices=("head", "release", "latest-release"),
default="head",
help="version part that will be updated when '--version' is set; "
"'auto' is a special case, it will get versions from github and detect the "
"release type (latest, major, minor or patch) automatically",
help="defines required tags for resulting docker image. "
"head - for master image (tag: head) "
"release - for release image (tags: XX, XX.XX, XX.XX.XX, XX.XX.XX.XX) "
"release-latest - for latest release image (tags: XX, XX.XX, XX.XX.XX, XX.XX.XX.XX, latest) ",
)
parser.add_argument(
"--image-path",
@ -149,74 +149,35 @@ def retry_popen(cmd: str, log_file: Path) -> int:
return retcode
def auto_release_type(version: ClickHouseVersion, release_type: str) -> str:
if release_type != "auto":
return release_type
git_versions = get_tagged_versions()
reference_version = git_versions[0]
for i in reversed(range(len(git_versions))):
if git_versions[i] <= version:
if i == len(git_versions) - 1:
return "latest"
reference_version = git_versions[i + 1]
break
if version.major < reference_version.major:
return "major"
if version.minor < reference_version.minor:
return "minor"
if version.patch < reference_version.patch:
return "patch"
raise ValueError(
"Release type 'tweak' is not supported for "
f"{version.string} < {reference_version.string}"
)
def gen_tags(version: ClickHouseVersion, release_type: str) -> List[str]:
def gen_tags(version: ClickHouseVersion, tag_type: str) -> List[str]:
"""
22.2.2.2 + latest:
@tag_type release-latest, @version 22.2.2.2:
- latest
- 22
- 22.2
- 22.2.2
- 22.2.2.2
22.2.2.2 + major:
@tag_type release, @version 22.2.2.2:
- 22
- 22.2
- 22.2.2
- 22.2.2.2
22.2.2.2 + minor:
- 22.2
- 22.2.2
- 22.2.2.2
22.2.2.2 + patch:
- 22.2.2
- 22.2.2.2
22.2.2.2 + head:
@tag_type head:
- head
"""
parts = version.string.split(".")
tags = []
if release_type == "latest":
tags.append(release_type)
if tag_type == "release-latest":
tags.append("latest")
for i in range(len(parts)):
tags.append(".".join(parts[: i + 1]))
elif release_type == "major":
elif tag_type == "head":
tags.append(tag_type)
elif tag_type == "release":
for i in range(len(parts)):
tags.append(".".join(parts[: i + 1]))
elif release_type == "minor":
for i in range(1, len(parts)):
tags.append(".".join(parts[: i + 1]))
elif release_type == "patch":
for i in range(2, len(parts)):
tags.append(".".join(parts[: i + 1]))
elif release_type == "head":
tags.append(release_type)
else:
raise ValueError(f"{release_type} is not valid release part")
assert False, f"Invalid release type [{tag_type}]"
return tags
@ -370,8 +331,7 @@ def main():
push = True
image = DockerImageData(image_path, image_repo, False)
args.release_type = auto_release_type(args.version, args.release_type)
tags = gen_tags(args.version, args.release_type)
tags = gen_tags(args.version, args.tag_type)
repo_urls = {}
direct_urls: Dict[str, List[str]] = {}

View File

@ -689,4 +689,5 @@ def main():
if __name__ == "__main__":
assert False, "Script Deprecated, ask ci team for help"
main()

View File

@ -293,9 +293,9 @@ class JobReport:
start_time: str
duration: float
additional_files: Union[Sequence[str], Sequence[Path]]
# clickhouse version, build job only
# ClickHouse version, build job only
version: str = ""
# checkname to set in commit status, set if differs from jjob name
# check_name to be set in commit status, set it if it differs from the job name
check_name: str = ""
# directory with artifacts to upload on s3
build_dir_for_upload: Union[Path, str] = ""
@ -667,11 +667,7 @@ ColorTheme = Tuple[str, str, str]
def _format_header(
header: str, branch_name: str, branch_url: Optional[str] = None
) -> str:
# Following line does not lower CI->Ci and SQLancer->Sqlancer. It only
# capitalizes the first letter and doesn't touch the rest of the word
result = " ".join([w[0].upper() + w[1:] for w in header.split(" ") if w])
result = result.replace("Clickhouse", "ClickHouse")
result = result.replace("clickhouse", "ClickHouse")
result = header
if "ClickHouse" not in result:
result = f"ClickHouse {result}"
if branch_url:

View File

@ -1,61 +1,19 @@
#!/usr/bin/env python
import unittest
from unittest.mock import patch, MagicMock
from version_helper import get_version_from_string
import docker_server as ds
# di.logging.basicConfig(level=di.logging.INFO)
class TestDockerServer(unittest.TestCase):
def test_gen_tags(self):
version = get_version_from_string("22.2.2.2")
cases = (
("latest", ["latest", "22", "22.2", "22.2.2", "22.2.2.2"]),
("major", ["22", "22.2", "22.2.2", "22.2.2.2"]),
("minor", ["22.2", "22.2.2", "22.2.2.2"]),
("patch", ["22.2.2", "22.2.2.2"]),
("release-latest", ["latest", "22", "22.2", "22.2.2", "22.2.2.2"]),
("release", ["22", "22.2", "22.2.2", "22.2.2.2"]),
("head", ["head"]),
)
for case in cases:
release_type = case[0]
self.assertEqual(case[1], ds.gen_tags(version, release_type))
with self.assertRaises(ValueError):
ds.gen_tags(version, "auto")
@patch("docker_server.get_tagged_versions")
def test_auto_release_type(self, mock_tagged_versions: MagicMock) -> None:
mock_tagged_versions.return_value = [
get_version_from_string("1.1.1.1"),
get_version_from_string("1.2.1.1"),
get_version_from_string("2.1.1.1"),
get_version_from_string("2.2.1.1"),
get_version_from_string("2.2.2.1"),
]
cases_less = (
(get_version_from_string("1.0.1.1"), "minor"),
(get_version_from_string("1.1.2.1"), "minor"),
(get_version_from_string("1.3.1.1"), "major"),
(get_version_from_string("2.1.2.1"), "minor"),
(get_version_from_string("2.2.1.3"), "patch"),
(get_version_from_string("2.2.3.1"), "latest"),
(get_version_from_string("2.3.1.1"), "latest"),
)
for case in cases_less:
release = ds.auto_release_type(case[0], "auto")
self.assertEqual(case[1], release)
cases_equal = (
(get_version_from_string("1.1.1.1"), "minor"),
(get_version_from_string("1.2.1.1"), "major"),
(get_version_from_string("2.1.1.1"), "minor"),
(get_version_from_string("2.2.1.1"), "patch"),
(get_version_from_string("2.2.2.1"), "latest"),
)
for case in cases_equal:
release = ds.auto_release_type(case[0], "auto")
self.assertEqual(case[1], release)

View File

@ -1,16 +1,19 @@
#!/usr/bin/env python3
# pylint: disable=unused-argument
# pylint: disable=broad-exception-raised
import logging
import os
import pytest # pylint:disable=import-error; for style check
from helpers.cluster import run_and_check
from helpers.cluster import run_and_check, is_port_free
from helpers.network import _NetworkManager
# This is a workaround for a problem with logging in pytest [1].
#
# [1]: https://github.com/pytest-dev/pytest/issues/5502
logging.raiseExceptions = False
PORTS_PER_WORKER = 50
@pytest.fixture(scope="session", autouse=True)
@ -111,5 +114,40 @@ def pytest_addoption(parser):
)
def get_unique_free_ports(total):
ports = []
for port in range(30000, 55000):
if is_port_free(port) and port not in ports:
ports.append(port)
if len(ports) == total:
return ports
raise Exception(f"Can't collect {total} ports. Collected: {len(ports)}")
def pytest_configure(config):
os.environ["INTEGRATION_TESTS_RUN_ID"] = config.option.run_id
# When running tests without pytest-xdist,
# the `pytest_xdist_setupnodes` hook is not executed
worker_ports = os.getenv("WORKER_FREE_PORTS", None)
if worker_ports is None:
master_ports = get_unique_free_ports(PORTS_PER_WORKER)
os.environ["WORKER_FREE_PORTS"] = " ".join([str(p) for p in master_ports])
def pytest_xdist_setupnodes(config, specs):
# Find {PORTS_PER_WORKER} * {number of xdist workers} ports and
# allocate pool of {PORTS_PER_WORKER} ports to each worker
# Get number of xdist workers
num_workers = len(specs)
# Get free ports which will be distributed across workers
ports = get_unique_free_ports(num_workers * PORTS_PER_WORKER)
# Iterate over specs of workers and add allocated ports to env variable
for i, spec in enumerate(specs):
start_range = i * PORTS_PER_WORKER
per_workrer_ports = ports[start_range : start_range + PORTS_PER_WORKER]
spec.env["WORKER_FREE_PORTS"] = " ".join([str(p) for p in per_workrer_ports])

View File

@ -135,6 +135,52 @@ def get_free_port():
return s.getsockname()[1]
def is_port_free(port: int) -> bool:
try:
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s:
s.bind(("", port))
return True
except socket.error:
return False
class PortPoolManager:
"""
This class is used for distribution of ports allocated to single pytest-xdist worker
It can be used by multiple ClickHouseCluster instances
"""
# Shared between instances
all_ports = None
free_ports = None
def __init__(self):
self.used_ports = []
if self.all_ports is None:
worker_ports = os.getenv("WORKER_FREE_PORTS")
ports = [int(p) for p in worker_ports.split(" ")]
# Static vars
PortPoolManager.all_ports = ports
PortPoolManager.free_ports = ports
def get_port(self):
for port in self.free_ports:
if is_port_free(port):
self.free_ports.remove(port)
self.used_ports.append(port)
return port
raise Exception(
f"No free ports: {self.all_ports}",
)
def return_used_ports(self):
self.free_ports.extend(self.used_ports)
self.used_ports.clear()
def retry_exception(num, delay, func, exception=Exception, *args, **kwargs):
"""
Retry if `func()` throws, `num` times.
@ -248,7 +294,7 @@ def check_rabbitmq_is_available(rabbitmq_id, cookie):
),
stdout=subprocess.PIPE,
)
p.communicate()
p.wait(timeout=60)
return p.returncode == 0
@ -716,62 +762,67 @@ class ClickHouseCluster:
.stop()
)
self.port_pool = PortPoolManager()
@property
def kafka_port(self):
if self._kafka_port:
return self._kafka_port
self._kafka_port = get_free_port()
self._kafka_port = self.port_pool.get_port()
return self._kafka_port
@property
def schema_registry_port(self):
if self._schema_registry_port:
return self._schema_registry_port
self._schema_registry_port = get_free_port()
self._schema_registry_port = self.port_pool.get_port()
return self._schema_registry_port
@property
def schema_registry_auth_port(self):
if self._schema_registry_auth_port:
return self._schema_registry_auth_port
self._schema_registry_auth_port = get_free_port()
self._schema_registry_auth_port = self.port_pool.get_port()
return self._schema_registry_auth_port
@property
def kerberized_kafka_port(self):
if self._kerberized_kafka_port:
return self._kerberized_kafka_port
self._kerberized_kafka_port = get_free_port()
self._kerberized_kafka_port = self.port_pool.get_port()
return self._kerberized_kafka_port
@property
def azurite_port(self):
if self._azurite_port:
return self._azurite_port
self._azurite_port = get_free_port()
self._azurite_port = self.port_pool.get_port()
return self._azurite_port
@property
def mongo_port(self):
if self._mongo_port:
return self._mongo_port
self._mongo_port = get_free_port()
self._mongo_port = self.port_pool.get_port()
return self._mongo_port
@property
def mongo_no_cred_port(self):
if self._mongo_no_cred_port:
return self._mongo_no_cred_port
self._mongo_no_cred_port = get_free_port()
self._mongo_no_cred_port = self.port_pool.get_port()
return self._mongo_no_cred_port
@property
def redis_port(self):
if self._redis_port:
return self._redis_port
self._redis_port = get_free_port()
self._redis_port = self.port_pool.get_port()
return self._redis_port
def __exit__(self, exc_type, exc_val, exc_tb):
self.port_pool.return_used_ports()
def print_all_docker_pieces(self):
res_networks = subprocess.check_output(
f"docker network ls --filter name='{self.project_name}*'",

View File

@ -4,6 +4,8 @@ import logging
import pytest
import os
import minio
import random
import string
from helpers.cluster import ClickHouseCluster
from helpers.mock_servers import start_s3_mock
@ -45,6 +47,11 @@ def cluster():
cluster.shutdown()
def randomize_query_id(query_id, random_suffix_length=10):
letters = string.ascii_letters + string.digits
return f"{query_id}_{''.join(random.choice(letters) for _ in range(random_suffix_length))}"
@pytest.fixture(scope="module")
def init_broken_s3(cluster):
yield start_s3_mock(cluster, "broken_s3", "8083")
@ -61,6 +68,7 @@ def test_upload_after_check_works(cluster, broken_s3):
node.query(
"""
DROP TABLE IF EXISTS s3_upload_after_check_works;
CREATE TABLE s3_upload_after_check_works (
id Int64,
data String
@ -127,7 +135,9 @@ def test_upload_s3_fail_create_multi_part_upload(cluster, broken_s3, compression
broken_s3.setup_at_create_multi_part_upload()
insert_query_id = f"INSERT_INTO_TABLE_FUNCTION_FAIL_CREATE_MPU_{compression}"
insert_query_id = randomize_query_id(
f"INSERT_INTO_TABLE_FUNCTION_FAIL_CREATE_MPU_{compression}"
)
error = node.query_and_get_error(
f"""
INSERT INTO
@ -169,7 +179,9 @@ def test_upload_s3_fail_upload_part_when_multi_part_upload(
broken_s3.setup_fake_multpartuploads()
broken_s3.setup_at_part_upload(count=1, after=2)
insert_query_id = f"INSERT_INTO_TABLE_FUNCTION_FAIL_UPLOAD_PART_{compression}"
insert_query_id = randomize_query_id(
f"INSERT_INTO_TABLE_FUNCTION_FAIL_UPLOAD_PART_{compression}"
)
error = node.query_and_get_error(
f"""
INSERT INTO
@ -221,7 +233,7 @@ def test_when_error_is_retried(cluster, broken_s3, action_and_message):
broken_s3.setup_fake_multpartuploads()
broken_s3.setup_at_part_upload(count=3, after=2, action=action)
insert_query_id = f"INSERT_INTO_TABLE_{action}_RETRIED"
insert_query_id = randomize_query_id(f"INSERT_INTO_TABLE_{action}_RETRIED")
node.query(
f"""
INSERT INTO
@ -250,7 +262,7 @@ def test_when_error_is_retried(cluster, broken_s3, action_and_message):
assert s3_errors == 3
broken_s3.setup_at_part_upload(count=1000, after=2, action=action)
insert_query_id = f"INSERT_INTO_TABLE_{action}_RETRIED_1"
insert_query_id = randomize_query_id(f"INSERT_INTO_TABLE_{action}_RETRIED_1")
error = node.query_and_get_error(
f"""
INSERT INTO
@ -285,7 +297,7 @@ def test_when_s3_broken_pipe_at_upload_is_retried(cluster, broken_s3):
action="broken_pipe",
)
insert_query_id = f"TEST_WHEN_S3_BROKEN_PIPE_AT_UPLOAD"
insert_query_id = randomize_query_id(f"TEST_WHEN_S3_BROKEN_PIPE_AT_UPLOAD")
node.query(
f"""
INSERT INTO
@ -319,7 +331,7 @@ def test_when_s3_broken_pipe_at_upload_is_retried(cluster, broken_s3):
after=2,
action="broken_pipe",
)
insert_query_id = f"TEST_WHEN_S3_BROKEN_PIPE_AT_UPLOAD_1"
insert_query_id = randomize_query_id(f"TEST_WHEN_S3_BROKEN_PIPE_AT_UPLOAD_1")
error = node.query_and_get_error(
f"""
INSERT INTO
@ -361,7 +373,7 @@ def test_when_s3_connection_reset_by_peer_at_upload_is_retried(
action_args=["1"] if send_something else ["0"],
)
insert_query_id = (
insert_query_id = randomize_query_id(
f"TEST_WHEN_S3_CONNECTION_RESET_BY_PEER_AT_UPLOAD_{send_something}"
)
node.query(
@ -398,7 +410,7 @@ def test_when_s3_connection_reset_by_peer_at_upload_is_retried(
action="connection_reset_by_peer",
action_args=["1"] if send_something else ["0"],
)
insert_query_id = (
insert_query_id = randomize_query_id(
f"TEST_WHEN_S3_CONNECTION_RESET_BY_PEER_AT_UPLOAD_{send_something}_1"
)
error = node.query_and_get_error(
@ -443,7 +455,7 @@ def test_when_s3_connection_reset_by_peer_at_create_mpu_retried(
action_args=["1"] if send_something else ["0"],
)
insert_query_id = (
insert_query_id = randomize_query_id(
f"TEST_WHEN_S3_CONNECTION_RESET_BY_PEER_AT_MULTIPARTUPLOAD_{send_something}"
)
node.query(
@ -481,7 +493,7 @@ def test_when_s3_connection_reset_by_peer_at_create_mpu_retried(
action_args=["1"] if send_something else ["0"],
)
insert_query_id = (
insert_query_id = randomize_query_id(
f"TEST_WHEN_S3_CONNECTION_RESET_BY_PEER_AT_MULTIPARTUPLOAD_{send_something}_1"
)
error = node.query_and_get_error(
@ -521,7 +533,7 @@ def test_query_is_canceled_with_inf_retries(cluster, broken_s3):
action="connection_refused",
)
insert_query_id = f"TEST_QUERY_IS_CANCELED_WITH_INF_RETRIES"
insert_query_id = randomize_query_id(f"TEST_QUERY_IS_CANCELED_WITH_INF_RETRIES")
request = node.get_query_request(
f"""
INSERT INTO
@ -579,7 +591,7 @@ def test_adaptive_timeouts(cluster, broken_s3, node_name):
count=1000000,
)
insert_query_id = f"TEST_ADAPTIVE_TIMEOUTS_{node_name}"
insert_query_id = randomize_query_id(f"TEST_ADAPTIVE_TIMEOUTS_{node_name}")
node.query(
f"""
INSERT INTO
@ -631,6 +643,7 @@ def test_no_key_found_disk(cluster, broken_s3):
node.query(
"""
DROP TABLE IF EXISTS no_key_found_disk;
CREATE TABLE no_key_found_disk (
id Int64
) ENGINE=MergeTree()
@ -689,3 +702,15 @@ def test_no_key_found_disk(cluster, broken_s3):
"DB::Exception: The specified key does not exist. This error happened for S3 disk."
in error
)
s3_disk_no_key_errors_metric_value = int(
node.query(
"""
SELECT value
FROM system.metrics
WHERE metric = 'S3DiskNoKeyErrors'
"""
).strip()
)
assert s3_disk_no_key_errors_metric_value > 0

View File

@ -141,6 +141,9 @@ def test_drop_if_exists():
def test_replication():
node1.query("CREATE FUNCTION f2 AS (x, y) -> x - y")
node1.query(
"CREATE FUNCTION f3 AS () -> (SELECT sum(s) FROM (SELECT 1 as s UNION ALL SELECT 1 as s))"
)
assert (
node1.query("SELECT create_query FROM system.functions WHERE name='f2'")
@ -154,7 +157,11 @@ def test_replication():
assert node1.query("SELECT f2(12,3)") == "9\n"
assert node2.query("SELECT f2(12,3)") == "9\n"
assert node1.query("SELECT f3()") == "2\n"
assert node2.query("SELECT f3()") == "2\n"
node1.query("DROP FUNCTION f2")
node1.query("DROP FUNCTION f3")
assert (
node1.query("SELECT create_query FROM system.functions WHERE name='f2'") == ""
)
@ -214,7 +221,9 @@ def test_reload_zookeeper():
)
# config reloads, but can still work
node1.query("CREATE FUNCTION f2 AS (x, y) -> x - y")
node1.query(
"CREATE FUNCTION f2 AS () -> (SELECT sum(s) FROM (SELECT 1 as s UNION ALL SELECT 1 as s))"
)
assert_eq_with_retry(
node2,
"SELECT name FROM system.functions WHERE name IN ['f1', 'f2'] ORDER BY name",
@ -269,7 +278,7 @@ def test_reload_zookeeper():
TSV(["f1", "f2", "f3"]),
)
assert node2.query("SELECT f1(12, 3), f2(12, 3), f3(12, 3)") == TSV([[15, 9, 4]])
assert node2.query("SELECT f1(12, 3), f2(), f3(12, 3)") == TSV([[15, 2, 4]])
active_zk_connections = get_active_zk_connections()
assert (
@ -307,3 +316,13 @@ def test_start_without_zookeeper():
"CREATE FUNCTION f1 AS (x, y) -> (x + y)\n",
)
node1.query("DROP FUNCTION f1")
def test_server_restart():
node1.query(
"CREATE FUNCTION f1 AS () -> (SELECT sum(s) FROM (SELECT 1 as s UNION ALL SELECT 1 as s))"
)
assert node1.query("SELECT f1()") == "2\n"
node1.restart_clickhouse()
assert node1.query("SELECT f1()") == "2\n"
node1.query("DROP FUNCTION f1")

View File

@ -8,6 +8,8 @@ import os
import json
import time
import glob
import random
import string
import pyspark
import delta
@ -52,6 +54,11 @@ def get_spark():
return builder.master("local").getOrCreate()
def randomize_table_name(table_name, random_suffix_length=10):
letters = string.ascii_letters + string.digits
return f"{table_name}{''.join(random.choice(letters) for _ in range(random_suffix_length))}"
@pytest.fixture(scope="module")
def started_cluster():
try:
@ -151,7 +158,7 @@ def test_single_log_file(started_cluster):
spark = started_cluster.spark_session
minio_client = started_cluster.minio_client
bucket = started_cluster.minio_bucket
TABLE_NAME = "test_single_log_file"
TABLE_NAME = randomize_table_name("test_single_log_file")
inserted_data = "SELECT number as a, toString(number + 1) as b FROM numbers(100)"
parquet_data_path = create_initial_data_file(
@ -175,7 +182,7 @@ def test_partition_by(started_cluster):
spark = started_cluster.spark_session
minio_client = started_cluster.minio_client
bucket = started_cluster.minio_bucket
TABLE_NAME = "test_partition_by"
TABLE_NAME = randomize_table_name("test_partition_by")
write_delta_from_df(
spark,
@ -197,7 +204,7 @@ def test_checkpoint(started_cluster):
spark = started_cluster.spark_session
minio_client = started_cluster.minio_client
bucket = started_cluster.minio_bucket
TABLE_NAME = "test_checkpoint"
TABLE_NAME = randomize_table_name("test_checkpoint")
write_delta_from_df(
spark,
@ -272,7 +279,7 @@ def test_multiple_log_files(started_cluster):
spark = started_cluster.spark_session
minio_client = started_cluster.minio_client
bucket = started_cluster.minio_bucket
TABLE_NAME = "test_multiple_log_files"
TABLE_NAME = randomize_table_name("test_multiple_log_files")
write_delta_from_df(
spark, generate_data(spark, 0, 100), f"/{TABLE_NAME}", mode="overwrite"
@ -310,7 +317,7 @@ def test_metadata(started_cluster):
spark = started_cluster.spark_session
minio_client = started_cluster.minio_client
bucket = started_cluster.minio_bucket
TABLE_NAME = "test_metadata"
TABLE_NAME = randomize_table_name("test_metadata")
parquet_data_path = create_initial_data_file(
started_cluster,
@ -339,9 +346,9 @@ def test_metadata(started_cluster):
def test_types(started_cluster):
TABLE_NAME = "test_types"
TABLE_NAME = randomize_table_name("test_types")
spark = started_cluster.spark_session
result_file = f"{TABLE_NAME}_result_2"
result_file = randomize_table_name(f"{TABLE_NAME}_result_2")
delta_table = (
DeltaTable.create(spark)
@ -415,7 +422,7 @@ def test_restart_broken(started_cluster):
spark = started_cluster.spark_session
minio_client = started_cluster.minio_client
bucket = "broken"
TABLE_NAME = "test_restart_broken"
TABLE_NAME = randomize_table_name("test_restart_broken")
if not minio_client.bucket_exists(bucket):
minio_client.make_bucket(bucket)
@ -452,6 +459,18 @@ def test_restart_broken(started_cluster):
f"SELECT count() FROM {TABLE_NAME}"
)
s3_disk_no_key_errors_metric_value = int(
instance.query(
"""
SELECT value
FROM system.metrics
WHERE metric = 'S3DiskNoKeyErrors'
"""
).strip()
)
assert s3_disk_no_key_errors_metric_value == 0
minio_client.make_bucket(bucket)
upload_directory(minio_client, bucket, f"/{TABLE_NAME}", "")
@ -464,7 +483,7 @@ def test_restart_broken_table_function(started_cluster):
spark = started_cluster.spark_session
minio_client = started_cluster.minio_client
bucket = "broken2"
TABLE_NAME = "test_restart_broken_table_function"
TABLE_NAME = randomize_table_name("test_restart_broken_table_function")
if not minio_client.bucket_exists(bucket):
minio_client.make_bucket(bucket)
@ -518,7 +537,7 @@ def test_partition_columns(started_cluster):
spark = started_cluster.spark_session
minio_client = started_cluster.minio_client
bucket = started_cluster.minio_bucket
TABLE_NAME = "test_partition_columns"
TABLE_NAME = randomize_table_name("test_partition_columns")
result_file = f"{TABLE_NAME}"
partition_columns = ["b", "c", "d", "e"]

View File

@ -10,9 +10,9 @@
<type>cache</type>
<disk>local_disk</disk>
<path>/tiny_local_cache/</path>
<max_size>10M</max_size>
<max_file_segment_size>1M</max_file_segment_size>
<boundary_alignment>1M</boundary_alignment>
<max_size>12M</max_size>
<max_file_segment_size>100K</max_file_segment_size>
<boundary_alignment>100K</boundary_alignment>
<cache_on_write_operations>1</cache_on_write_operations>
</tiny_local_cache>

View File

@ -7,6 +7,9 @@ import fnmatch
from helpers.cluster import ClickHouseCluster
from helpers.client import QueryRuntimeException
MB = 1024 * 1024
cluster = ClickHouseCluster(__file__)
node = cluster.add_instance(
@ -36,15 +39,30 @@ def test_cache_evicted_by_temporary_data(start_cluster):
q("SELECT sum(size) FROM system.filesystem_cache").strip()
)
assert get_cache_size() == 0
dump_debug_info = lambda: "\n".join(
[
">>> filesystem_cache <<<",
q("SELECT * FROM system.filesystem_cache FORMAT Vertical"),
">>> remote_data_paths <<<",
q("SELECT * FROM system.remote_data_paths FORMAT Vertical"),
">>> tiny_local_cache_local_disk <<<",
q(
"SELECT * FROM system.disks WHERE name = 'tiny_local_cache_local_disk' FORMAT Vertical"
),
]
)
assert get_free_space() > 8 * 1024 * 1024
q("SYSTEM DROP FILESYSTEM CACHE")
q("DROP TABLE IF EXISTS t1 SYNC")
assert get_cache_size() == 0, dump_debug_info()
assert get_free_space() > 8 * MB, dump_debug_info()
# Codec is NONE to make cache size predictable
q(
"CREATE TABLE t1 (x UInt64 CODEC(NONE), y UInt64 CODEC(NONE)) ENGINE = MergeTree ORDER BY x SETTINGS storage_policy = 'tiny_local_cache'"
"CREATE TABLE t1 (x UInt64 CODEC(NONE)) ENGINE = MergeTree ORDER BY x SETTINGS storage_policy = 'tiny_local_cache'"
)
q("INSERT INTO t1 SELECT number, number FROM numbers(1024 * 1024)")
q("INSERT INTO t1 SELECT number FROM numbers(1024 * 1024)")
# To be sure that nothing is reading the cache and entries for t1 can be evited
q("OPTIMIZE TABLE t1 FINAL")
@ -54,11 +72,11 @@ def test_cache_evicted_by_temporary_data(start_cluster):
q("SELECT sum(x) FROM t1")
cache_size_with_t1 = get_cache_size()
assert cache_size_with_t1 > 8 * 1024 * 1024
assert cache_size_with_t1 > 8 * MB, dump_debug_info()
# Almost all disk space is occupied by t1 cache
free_space_with_t1 = get_free_space()
assert free_space_with_t1 < 4 * 1024 * 1024
assert free_space_with_t1 < 4 * MB, dump_debug_info()
# Try to sort the table, but fail because of lack of disk space
with pytest.raises(QueryRuntimeException) as exc:
@ -76,31 +94,27 @@ def test_cache_evicted_by_temporary_data(start_cluster):
# Some data evicted from cache by temporary data
cache_size_after_eviction = get_cache_size()
assert cache_size_after_eviction < cache_size_with_t1
assert cache_size_after_eviction < cache_size_with_t1, dump_debug_info()
# Disk space freed, at least 3 MB, because temporary data tried to write 4 MB
assert get_free_space() > free_space_with_t1 + 3 * 1024 * 1024
assert get_free_space() > free_space_with_t1 + 3 * MB, dump_debug_info()
# Read some data to fill the cache again
q("SELECT avg(y) FROM t1")
q("SELECT avg(x) FROM t1")
cache_size_with_t1 = get_cache_size()
assert cache_size_with_t1 > 8 * 1024 * 1024, q(
"SELECT * FROM system.filesystem_cache FORMAT Vertical"
)
assert cache_size_with_t1 > 8 * MB, dump_debug_info()
# Almost all disk space is occupied by t1 cache
free_space_with_t1 = get_free_space()
assert free_space_with_t1 < 4 * 1024 * 1024, q(
"SELECT * FROM system.disks WHERE name = 'tiny_local_cache_local_disk' FORMAT Vertical"
)
assert free_space_with_t1 < 4 * MB, dump_debug_info()
node.http_query(
"SELECT randomPrintableASCII(1024) FROM numbers(8 * 1024) FORMAT TSV",
params={"buffer_size": 0, "wait_end_of_query": 1},
)
assert get_free_space() > free_space_with_t1 + 3 * 1024 * 1024
assert get_free_space() > free_space_with_t1 + 3 * MB, dump_debug_info()
# not enough space for buffering 32 MB
with pytest.raises(Exception) as exc:
@ -112,4 +126,4 @@ def test_cache_evicted_by_temporary_data(start_cluster):
str(exc.value), "*Failed to reserve * for temporary file*"
), exc.value
q("DROP TABLE IF EXISTS t1")
q("DROP TABLE IF EXISTS t1 SYNC")

View File

@ -18,20 +18,25 @@ def started_cluster():
def test_persistence():
create_function_query1 = "CREATE FUNCTION MySum1 AS (a, b) -> a + b"
create_function_query2 = "CREATE FUNCTION MySum2 AS (a, b) -> MySum1(a, b) + b"
create_function_query3 = "CREATE FUNCTION MyUnion AS () -> (SELECT sum(s) FROM (SELECT 1 as s UNION ALL SELECT 1 as s))"
instance.query(create_function_query1)
instance.query(create_function_query2)
instance.query(create_function_query3)
assert instance.query("SELECT MySum1(1,2)") == "3\n"
assert instance.query("SELECT MySum2(1,2)") == "5\n"
assert instance.query("SELECT MyUnion()") == "2\n"
instance.restart_clickhouse()
assert instance.query("SELECT MySum1(1,2)") == "3\n"
assert instance.query("SELECT MySum2(1,2)") == "5\n"
assert instance.query("SELECT MyUnion()") == "2\n"
instance.query("DROP FUNCTION MySum2")
instance.query("DROP FUNCTION MySum1")
instance.query("DROP FUNCTION MyUnion")
instance.restart_clickhouse()
@ -48,3 +53,10 @@ def test_persistence():
or "Function with name 'MySum2' does not exist. In scope SELECT MySum2(1, 2)"
in error_message
)
error_message = instance.query_and_get_error("SELECT MyUnion()")
assert (
"Unknown function MyUnion" in error_message
or "Function with name 'MyUnion' does not exist. In scope SELECT MyUnion"
in error_message
)

View File

@ -1,11 +1,8 @@
<test>
<query>SELECT arrayReduce('count', range(100000000))</query>
<query>SELECT arrayReduce('sum', range(100000000))</query>
<query>SELECT arrayReduceInRanges('count', [(1, 100000000)], range(100000000))</query>
<query>SELECT arrayReduceInRanges('sum', [(1, 100000000)], range(100000000))</query>
<query>SELECT arrayReduceInRanges('count', arrayZip(range(1000000), range(1000000)), range(100000000))[123456]</query>
<query>SELECT arrayReduceInRanges('sum', arrayZip(range(1000000), range(1000000)), range(100000000))[123456]</query>
<query>SELECT arrayReduce('count', range(1000000)) FROM numbers_mt(500000000) format Null</query>
<query>SELECT arrayReduce('sum', range(1000000)) FROM numbers_mt(500000000) format Null</query>
<query>SELECT arrayReduceInRanges('count', [(1, 1000000)], range(1000000)) FROM numbers_mt(500000000) format Null</query>
<query>SELECT arrayReduceInRanges('sum', [(1, 1000000)], range(1000000)) FROM numbers_mt(500000000) format Null</query>
<query>SELECT arrayReduceInRanges('count', arrayZip(range(1000000), range(1000000)), range(1000000))[123456]</query>
<query>SELECT arrayReduceInRanges('sum', arrayZip(range(1000000), range(1000000)), range(1000000))[123456]</query>
</test>

View File

@ -10,8 +10,8 @@
PARTITION BY toYYYYMM(d) ORDER BY key
</create_query>
<fill_query>INSERT INTO optimized_select_final SELECT toDate('2000-01-01'), 2*number, randomPrintableASCII(1000) FROM numbers(2500000)</fill_query>
<fill_query>INSERT INTO optimized_select_final SELECT toDate('2020-01-01'), 2*number+1, randomPrintableASCII(1000) FROM numbers(2500000)</fill_query>
<fill_query>INSERT INTO optimized_select_final SELECT toDate('2000-01-01'), 2*number, randomPrintableASCII(1000) FROM numbers(1000000)</fill_query>
<fill_query>INSERT INTO optimized_select_final SELECT toDate('2020-01-01'), 2*number+1, randomPrintableASCII(1000) FROM numbers(1000000)</fill_query>
<query>SELECT * FROM optimized_select_final FINAL FORMAT Null SETTINGS max_threads = 8</query>
<query>SELECT * FROM optimized_select_final FINAL WHERE key % 10 = 0 FORMAT Null</query>

View File

@ -5,15 +5,16 @@
ORDER BY (c1, c2)
SETTINGS min_rows_for_wide_part = 1000000000 AS
SELECT *
FROM generateRandom('c1 UInt32, c2 UInt64, s1 String, arr1 Array(UInt32), c3 UInt64, s2 String', 0, 30, 30)
FROM generateRandom('c1 UInt32, c2 UInt64, s1 String, arr1 Array(UInt32), c3 UInt64, s2 String', 0, 5, 6)
LIMIT 50000000
SETTINGS max_insert_threads = 8
</create_query>
<settings>
<max_threads>8</max_threads>
</settings>
<query short="1">SELECT count() FROM mt_comp_parts WHERE NOT ignore(c1)</query>
<query short="1">SELECT count() FROM mt_comp_parts WHERE NOT ignore(s1)</query>
<query>SELECT count() FROM mt_comp_parts WHERE NOT ignore(c2, s1, arr1, s2)</query>
<query>SELECT count() FROM mt_comp_parts WHERE NOT ignore(c1, s1, c3)</query>
<query>SELECT count() FROM mt_comp_parts WHERE NOT ignore(c1, c2, c3)</query>

View File

@ -118,7 +118,7 @@ then
# far in the future and have unrelated test changes.
base=$(git -C right/ch merge-base pr origin/master)
git -C right/ch diff --name-only "$base" pr -- . | tee all-changed-files.txt
git -C right/ch diff --name-only "$base" pr -- tests/performance/*.xml | tee changed-test-definitions.txt
git -C right/ch diff --name-only --diff-filter=d "$base" pr -- tests/performance/*.xml | tee changed-test-definitions.txt
git -C right/ch diff --name-only "$base" pr -- :!tests/performance/*.xml :!docker/test/performance-comparison | tee other-changed-files.txt
fi

View File

@ -345,6 +345,16 @@ for query_index in queries_to_run:
print(f"display-name\t{query_index}\t{tsv_escape(query_display_name)}")
for conn_index, c in enumerate(all_connections):
try:
c.execute("SYSTEM JEMALLOC PURGE")
print(f"purging jemalloc arenas\t{conn_index}\t{c.last_query.elapsed}")
except KeyboardInterrupt:
raise
except:
continue
# Prewarm: run once on both servers. Helps to bring the data into memory,
# precompile the queries, etc.
# A query might not run on the old server if it uses a function added in the

View File

@ -1,11 +0,0 @@
<!-- https://github.com/ClickHouse/ClickHouse/issues/37900 -->
<test>
<create_query>create table views_max_insert_threads_null (a UInt64) Engine = Null</create_query>
<create_query>create materialized view views_max_insert_threads_mv Engine = Null AS select now() as ts, max(a) from views_max_insert_threads_null group by ts</create_query>
<query>insert into views_max_insert_threads_null select * from numbers_mt(3000000000) settings max_threads = 16, max_insert_threads=16</query>
<drop_query>drop table if exists views_max_insert_threads_null</drop_query>
<drop_query>drop table if exists views_max_insert_threads_mv</drop_query>
</test>

View File

@ -8,13 +8,13 @@
40
41
0
41
2 42
2 42
43
0
43
11
11

View File

@ -8,6 +8,18 @@ CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)
set -e -o pipefail
# Wait when the dictionary will update the value for 13 on its own:
function wait_for_dict_upate()
{
for ((i = 0; i < 100; ++i)); do
if [ "$(${CLICKHOUSE_CLIENT} --query "SELECT dictGetInt64('${CLICKHOUSE_DATABASE}.dict', 'y', toUInt64(13))")" != -1 ]; then
return 0
fi
sleep 0.5
done
return 1
}
$CLICKHOUSE_CLIENT <<EOF
CREATE TABLE ${CLICKHOUSE_DATABASE}.table(x Int64, y Int64, insert_time DateTime) ENGINE = MergeTree ORDER BY tuple();
INSERT INTO ${CLICKHOUSE_DATABASE}.table VALUES (12, 102, now());
@ -19,7 +31,7 @@ CREATE DICTIONARY ${CLICKHOUSE_DATABASE}.dict
insert_time DateTime
)
PRIMARY KEY x
SOURCE(CLICKHOUSE(HOST 'localhost' PORT tcpPort() USER 'default' TABLE 'table' DB '${CLICKHOUSE_DATABASE}' UPDATE_FIELD 'insert_time'))
SOURCE(CLICKHOUSE(HOST 'localhost' PORT tcpPort() USER 'default' TABLE 'table' DB '${CLICKHOUSE_DATABASE}' UPDATE_FIELD 'insert_time' UPDATE_LAG 60))
LAYOUT(FLAT())
LIFETIME(1);
EOF
@ -29,11 +41,10 @@ $CLICKHOUSE_CLIENT --query "SELECT '12 -> ', dictGetInt64('${CLICKHOUSE_DATABASE
$CLICKHOUSE_CLIENT --query "INSERT INTO ${CLICKHOUSE_DATABASE}.table VALUES (13, 103, now())"
$CLICKHOUSE_CLIENT --query "INSERT INTO ${CLICKHOUSE_DATABASE}.table VALUES (14, 104, now() - INTERVAL 1 DAY)"
# Wait when the dictionary will update the value for 13 on its own:
while [ "$(${CLICKHOUSE_CLIENT} --query "SELECT dictGetInt64('${CLICKHOUSE_DATABASE}.dict', 'y', toUInt64(13))")" = -1 ]
do
sleep 0.5
done
if ! wait_for_dict_upate; then
echo "Dictionary had not been reloaded" >&2
exit 1
fi
$CLICKHOUSE_CLIENT --query "SELECT '13 -> ', dictGetInt64('${CLICKHOUSE_DATABASE}.dict', 'y', toUInt64(13))"

View File

@ -1,4 +1,4 @@
-- Tags: no-random-merge-tree-settings, no-tsan, no-debug, no-object-storage
-- Tags: no-random-merge-tree-settings, no-random-settings, no-tsan, no-debug, no-object-storage, long
-- no-tsan: too slow
-- no-object-storage: for remote tables we use thread pool even when reading with one stream, so memory consumption is higher
@ -16,7 +16,7 @@ CREATE TABLE adaptive_table(
value String
) ENGINE MergeTree()
ORDER BY key
SETTINGS index_granularity_bytes=1048576,
SETTINGS index_granularity_bytes = 1048576,
min_bytes_for_wide_part = 0,
min_rows_for_wide_part = 0,
enable_vertical_merge_algorithm = 0;

View File

@ -64,7 +64,7 @@ toStartOfMonth;toDateTime64;false 2099-07-07
type;toStartOfMonth;toDateTime64;false Date
toStartOfWeek;toDate32;false 2099-07-07
type;toStartOfWeek;toDate32;false Date
toStartOfWeek;toDateTime64;false 2099-07-07
toStartOfWeek;toDateTime64;false 1970-01-01
type;toStartOfWeek;toDateTime64;false Date
toMonday;toDate32;false 2099-07-08
type;toMonday;toDate32;false Date

View File

@ -7,7 +7,7 @@ CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)
# shellcheck source=../shell_config.sh
. "$CURDIR"/../shell_config.sh
export MY_CLICKHOUSE_CLIENT="$CLICKHOUSE_CLIENT --async_insert_busy_timeout_ms 10 --async_insert_max_data_size 1 --async_insert 1"
export MY_CLICKHOUSE_CLIENT="$CLICKHOUSE_CLIENT --async_insert_busy_timeout_min_ms 50 --async_insert_busy_timeout_max_ms 50 --async_insert 1"
function insert1()
{
@ -29,11 +29,8 @@ function insert3()
{
local TIMELIMIT=$((SECONDS+$1))
while [ $SECONDS -lt "$TIMELIMIT" ]; do
${MY_CLICKHOUSE_CLIENT} --insert_keeper_fault_injection_probability=0 --wait_for_async_insert 1 -q "INSERT INTO async_inserts_race VALUES (7, 'g') (8, 'h')" &
sleep 0.05
${MY_CLICKHOUSE_CLIENT} --insert_keeper_fault_injection_probability=0 --wait_for_async_insert 1 -q "INSERT INTO async_inserts_race VALUES (7, 'g') (8, 'h')"
done
wait
}
function select1()

Some files were not shown because too many files have changed in this diff Show More