mirror of
https://github.com/ClickHouse/ClickHouse.git
synced 2024-11-27 18:12:02 +00:00
Merge branch 'master' into fix-idiotic-code
This commit is contained in:
commit
0ebbe25c07
173
.github/actions/release/action.yml
vendored
Normal file
173
.github/actions/release/action.yml
vendored
Normal file
@ -0,0 +1,173 @@
|
||||
name: Release
|
||||
|
||||
description: Makes patch releases and creates new release branch
|
||||
|
||||
inputs:
|
||||
ref:
|
||||
description: 'Git reference (branch or commit sha) from which to create the release'
|
||||
required: true
|
||||
type: string
|
||||
type:
|
||||
description: 'The type of release: "new" for a new release or "patch" for a patch release'
|
||||
required: true
|
||||
type: choice
|
||||
options:
|
||||
- patch
|
||||
- new
|
||||
dry-run:
|
||||
description: 'Dry run'
|
||||
required: false
|
||||
default: true
|
||||
type: boolean
|
||||
token:
|
||||
required: true
|
||||
type: string
|
||||
|
||||
runs:
|
||||
using: "composite"
|
||||
steps:
|
||||
- name: Prepare Release Info
|
||||
shell: bash
|
||||
run: |
|
||||
python3 ./tests/ci/create_release.py --prepare-release-info \
|
||||
--ref ${{ inputs.ref }} --release-type ${{ inputs.type }} \
|
||||
${{ inputs.dry-run && '--dry-run' || '' }}
|
||||
echo "::group::Release Info"
|
||||
python3 -m json.tool /tmp/release_info.json
|
||||
echo "::endgroup::"
|
||||
release_tag=$(jq -r '.release_tag' /tmp/release_info.json)
|
||||
commit_sha=$(jq -r '.commit_sha' /tmp/release_info.json)
|
||||
echo "Release Tag: $release_tag"
|
||||
echo "RELEASE_TAG=$release_tag" >> "$GITHUB_ENV"
|
||||
echo "COMMIT_SHA=$commit_sha" >> "$GITHUB_ENV"
|
||||
- name: Download All Release Artifacts
|
||||
if: ${{ inputs.type == 'patch' }}
|
||||
shell: bash
|
||||
run: |
|
||||
python3 ./tests/ci/create_release.py --download-packages ${{ inputs.dry-run && '--dry-run' || '' }}
|
||||
- name: Push Git Tag for the Release
|
||||
shell: bash
|
||||
run: |
|
||||
python3 ./tests/ci/create_release.py --push-release-tag ${{ inputs.dry-run && '--dry-run' || '' }}
|
||||
- name: Push New Release Branch
|
||||
if: ${{ inputs.type == 'new' }}
|
||||
shell: bash
|
||||
run: |
|
||||
python3 ./tests/ci/create_release.py --push-new-release-branch ${{ inputs.dry-run && '--dry-run' || '' }}
|
||||
- name: Bump CH Version and Update Contributors' List
|
||||
shell: bash
|
||||
run: |
|
||||
python3 ./tests/ci/create_release.py --create-bump-version-pr ${{ inputs.dry-run && '--dry-run' || '' }}
|
||||
- name: Bump Docker versions, Changelog, Security
|
||||
if: ${{ inputs.type == 'patch' }}
|
||||
shell: bash
|
||||
run: |
|
||||
python3 ./tests/ci/create_release.py --set-progress-started --progress "update ChangeLog"
|
||||
[ "$(git branch --show-current)" != "master" ] && echo "not on the master" && exit 1
|
||||
echo "List versions"
|
||||
./utils/list-versions/list-versions.sh > ./utils/list-versions/version_date.tsv
|
||||
echo "Update docker version"
|
||||
./utils/list-versions/update-docker-version.sh
|
||||
echo "Generate ChangeLog"
|
||||
export CI=1
|
||||
docker run -u "${UID}:${GID}" -e PYTHONUNBUFFERED=1 -e CI=1 --network=host \
|
||||
--volume=".:/ClickHouse" clickhouse/style-test \
|
||||
/ClickHouse/tests/ci/changelog.py -v --debug-helpers \
|
||||
--gh-user-or-token=${{ inputs.token }} --jobs=5 \
|
||||
--output="/ClickHouse/docs/changelogs/${{ env.RELEASE_TAG }}.md" ${{ env.RELEASE_TAG }}
|
||||
git add ./docs/changelogs/${{ env.RELEASE_TAG }}.md
|
||||
echo "Generate Security"
|
||||
python3 ./utils/security-generator/generate_security.py > SECURITY.md
|
||||
git diff HEAD
|
||||
- name: Create ChangeLog PR
|
||||
if: ${{ inputs.type == 'patch' && ! inputs.dry-run }}
|
||||
uses: peter-evans/create-pull-request@v6
|
||||
with:
|
||||
author: "robot-clickhouse <robot-clickhouse@users.noreply.github.com>"
|
||||
token: ${{ inputs.token }}
|
||||
committer: "robot-clickhouse <robot-clickhouse@users.noreply.github.com>"
|
||||
commit-message: Update version_date.tsv and changelogs after ${{ env.RELEASE_TAG }}
|
||||
branch: auto/${{ env.RELEASE_TAG }}
|
||||
assignees: ${{ github.event.sender.login }} # assign the PR to the tag pusher
|
||||
delete-branch: true
|
||||
title: Update version_date.tsv and changelog after ${{ env.RELEASE_TAG }}
|
||||
labels: do not test
|
||||
body: |
|
||||
Update version_date.tsv and changelogs after ${{ env.RELEASE_TAG }}
|
||||
### Changelog category (leave one):
|
||||
- Not for changelog (changelog entry is not required)
|
||||
- name: Reset changes if Dry-run
|
||||
if: ${{ inputs.dry-run }}
|
||||
shell: bash
|
||||
run: |
|
||||
git reset --hard HEAD
|
||||
- name: Checkout back to GITHUB_REF
|
||||
shell: bash
|
||||
run: |
|
||||
git checkout "$GITHUB_REF_NAME"
|
||||
# set current progress to OK
|
||||
python3 ./tests/ci/create_release.py --set-progress-completed
|
||||
- name: Create GH Release
|
||||
shell: bash
|
||||
if: ${{ inputs.type == 'patch' }}
|
||||
run: |
|
||||
python3 ./tests/ci/create_release.py --create-gh-release ${{ inputs.dry-run && '--dry-run' || '' }}
|
||||
- name: Export TGZ Packages
|
||||
if: ${{ inputs.type == 'patch' }}
|
||||
shell: bash
|
||||
run: |
|
||||
python3 ./tests/ci/artifactory.py --export-tgz ${{ inputs.dry-run && '--dry-run' || '' }}
|
||||
- name: Test TGZ Packages
|
||||
if: ${{ inputs.type == 'patch' }}
|
||||
shell: bash
|
||||
run: |
|
||||
python3 ./tests/ci/artifactory.py --test-tgz ${{ inputs.dry-run && '--dry-run' || '' }}
|
||||
- name: Export RPM Packages
|
||||
if: ${{ inputs.type == 'patch' }}
|
||||
shell: bash
|
||||
run: |
|
||||
python3 ./tests/ci/artifactory.py --export-rpm ${{ inputs.dry-run && '--dry-run' || '' }}
|
||||
- name: Test RPM Packages
|
||||
if: ${{ inputs.type == 'patch' }}
|
||||
shell: bash
|
||||
run: |
|
||||
python3 ./tests/ci/artifactory.py --test-rpm ${{ inputs.dry-run && '--dry-run' || '' }}
|
||||
- name: Export Debian Packages
|
||||
if: ${{ inputs.type == 'patch' }}
|
||||
shell: bash
|
||||
run: |
|
||||
python3 ./tests/ci/artifactory.py --export-debian ${{ inputs.dry-run && '--dry-run' || '' }}
|
||||
- name: Test Debian Packages
|
||||
if: ${{ inputs.type == 'patch' }}
|
||||
shell: bash
|
||||
run: |
|
||||
python3 ./tests/ci/artifactory.py --test-debian ${{ inputs.dry-run && '--dry-run' || '' }}
|
||||
- name: Docker clickhouse/clickhouse-server building
|
||||
if: ${{ inputs.type == 'patch' }}
|
||||
shell: bash
|
||||
run: |
|
||||
python3 ./tests/ci/create_release.py --set-progress-started --progress "docker server release"
|
||||
cd "./tests/ci"
|
||||
export CHECK_NAME="Docker server image"
|
||||
python3 docker_server.py --release-type auto --version ${{ env.RELEASE_TAG }} --check-name "$CHECK_NAME" --sha ${{ env.COMMIT_SHA }} ${{ ! inputs.dry-run && '--push' || '' }}
|
||||
python3 ./tests/ci/create_release.py --set-progress-completed
|
||||
- name: Docker clickhouse/clickhouse-keeper building
|
||||
if: ${{ inputs.type == 'patch' }}
|
||||
shell: bash
|
||||
run: |
|
||||
python3 ./tests/ci/create_release.py --set-progress-started --progress "docker keeper release"
|
||||
cd "./tests/ci"
|
||||
export CHECK_NAME="Docker keeper image"
|
||||
python3 docker_server.py --release-type auto --version ${{ env.RELEASE_TAG }} --check-name "$CHECK_NAME" --sha ${{ env.COMMIT_SHA }} ${{ ! inputs.dry-run && '--push' || '' }}
|
||||
python3 ./tests/ci/create_release.py --set-progress-completed
|
||||
- name: Set Release progress completed
|
||||
shell: bash
|
||||
run: |
|
||||
# If we here - set completed status, to post proper Slack OK or FAIL message in the next step
|
||||
python3 ./tests/ci/create_release.py --set-progress-started --progress "completed"
|
||||
python3 ./tests/ci/create_release.py --set-progress-completed
|
||||
- name: Post Slack Message
|
||||
if: ${{ !cancelled() }}
|
||||
shell: bash
|
||||
run: |
|
||||
python3 ./tests/ci/create_release.py --post-status ${{ inputs.dry-run && '--dry-run' || '' }}
|
96
.github/workflows/auto_release.yml
vendored
96
.github/workflows/auto_release.yml
vendored
@ -1,44 +1,110 @@
|
||||
name: AutoRelease
|
||||
|
||||
env:
|
||||
# Force the stdout and stderr streams to be unbuffered
|
||||
PYTHONUNBUFFERED: 1
|
||||
DRY_RUN: true
|
||||
|
||||
concurrency:
|
||||
group: auto-release
|
||||
group: release
|
||||
on: # yamllint disable-line rule:truthy
|
||||
# schedule:
|
||||
# - cron: '0 10-16 * * 1-5'
|
||||
# Workflow uses a test bucket for packages and dry run mode (no real releases)
|
||||
schedule:
|
||||
- cron: '0 9 * * *'
|
||||
- cron: '0 15 * * *'
|
||||
workflow_dispatch:
|
||||
inputs:
|
||||
dry-run:
|
||||
description: 'Dry run'
|
||||
required: false
|
||||
default: true
|
||||
type: boolean
|
||||
|
||||
jobs:
|
||||
CherryPick:
|
||||
runs-on: [self-hosted, style-checker-aarch64]
|
||||
AutoRelease:
|
||||
runs-on: [self-hosted, release-maker]
|
||||
steps:
|
||||
- name: DebugInfo
|
||||
uses: hmarr/debug-action@f7318c783045ac39ed9bb497e22ce835fdafbfe6
|
||||
- name: Set envs
|
||||
# https://docs.github.com/en/actions/learn-github-actions/workflow-commands-for-github-actions#multiline-strings
|
||||
run: |
|
||||
cat >> "$GITHUB_ENV" << 'EOF'
|
||||
TEMP_PATH=${{runner.temp}}/cherry_pick
|
||||
ROBOT_CLICKHOUSE_SSH_KEY<<RCSK
|
||||
${{secrets.ROBOT_CLICKHOUSE_SSH_KEY}}
|
||||
RCSK
|
||||
REPO_OWNER=ClickHouse
|
||||
REPO_NAME=ClickHouse
|
||||
REPO_TEAM=core
|
||||
EOF
|
||||
- name: Set DRY_RUN for schedule
|
||||
if: ${{ github.event_name == 'schedule' }}
|
||||
run: echo "DRY_RUN=true" >> "$GITHUB_ENV"
|
||||
- name: Set DRY_RUN for dispatch
|
||||
if: ${{ github.event_name == 'workflow_dispatch' }}
|
||||
run: echo "DRY_RUN=${{ github.event.inputs.dry-run }}" >> "$GITHUB_ENV"
|
||||
- name: Check out repository code
|
||||
uses: ClickHouse/checkout@v1
|
||||
with:
|
||||
clear-repository: true
|
||||
token: ${{secrets.ROBOT_CLICKHOUSE_COMMIT_TOKEN}}
|
||||
fetch-depth: 0
|
||||
- name: Auto-release
|
||||
- name: Auto Release Prepare
|
||||
run: |
|
||||
cd "$GITHUB_WORKSPACE/tests/ci"
|
||||
python3 auto_release.py --release-after-days=3
|
||||
python3 auto_release.py --prepare
|
||||
echo "::group::Auto Release Info"
|
||||
python3 -m json.tool /tmp/autorelease_info.json
|
||||
echo "::endgroup::"
|
||||
{
|
||||
echo 'AUTO_RELEASE_PARAMS<<EOF'
|
||||
cat /tmp/autorelease_info.json
|
||||
echo 'EOF'
|
||||
} >> "$GITHUB_ENV"
|
||||
- name: Post Release Branch statuses
|
||||
run: |
|
||||
cd "$GITHUB_WORKSPACE/tests/ci"
|
||||
python3 auto_release.py --post-status
|
||||
- name: Release ${{ fromJson(env.AUTO_RELEASE_PARAMS).releases[0].release_branch }}
|
||||
if: ${{ fromJson(env.AUTO_RELEASE_PARAMS).releases[0] && fromJson(env.AUTO_RELEASE_PARAMS).releases[0].ready }}
|
||||
uses: ./.github/actions/release
|
||||
with:
|
||||
ref: ${{ fromJson(env.AUTO_RELEASE_PARAMS).releases[0].commit_sha }}
|
||||
type: patch
|
||||
dry-run: ${{ env.DRY_RUN }}
|
||||
token: ${{secrets.ROBOT_CLICKHOUSE_COMMIT_TOKEN}}
|
||||
- name: Release ${{ fromJson(env.AUTO_RELEASE_PARAMS).releases[1].release_branch }}
|
||||
if: ${{ fromJson(env.AUTO_RELEASE_PARAMS).releases[0] && fromJson(env.AUTO_RELEASE_PARAMS).releases[1].ready }}
|
||||
uses: ./.github/actions/release
|
||||
with:
|
||||
ref: ${{ fromJson(env.AUTO_RELEASE_PARAMS).releases[1].commit_sha }}
|
||||
type: patch
|
||||
dry-run: ${{ env.DRY_RUN }}
|
||||
token: ${{secrets.ROBOT_CLICKHOUSE_COMMIT_TOKEN}}
|
||||
- name: Release ${{ fromJson(env.AUTO_RELEASE_PARAMS).releases[2].release_branch }}
|
||||
if: ${{ fromJson(env.AUTO_RELEASE_PARAMS).releases[2] && fromJson(env.AUTO_RELEASE_PARAMS).releases[2].ready }}
|
||||
uses: ./.github/actions/release
|
||||
with:
|
||||
ref: ${{ fromJson(env.AUTO_RELEASE_PARAMS).releases[2].commit_sha }}
|
||||
type: patch
|
||||
dry-run: ${{ env.DRY_RUN }}
|
||||
token: ${{secrets.ROBOT_CLICKHOUSE_COMMIT_TOKEN}}
|
||||
- name: Release ${{ fromJson(env.AUTO_RELEASE_PARAMS).releases[3].release_branch }}
|
||||
if: ${{ fromJson(env.AUTO_RELEASE_PARAMS).releases[3] && fromJson(env.AUTO_RELEASE_PARAMS).releases[3].ready }}
|
||||
uses: ./.github/actions/release
|
||||
with:
|
||||
ref: ${{ fromJson(env.AUTO_RELEASE_PARAMS).releases[3].commit_sha }}
|
||||
type: patch
|
||||
dry-run: ${{ env.DRY_RUN }}
|
||||
token: ${{secrets.ROBOT_CLICKHOUSE_COMMIT_TOKEN}}
|
||||
- name: Release ${{ fromJson(env.AUTO_RELEASE_PARAMS).releases[4].release_branch }}
|
||||
if: ${{ fromJson(env.AUTO_RELEASE_PARAMS).releases[4] && fromJson(env.AUTO_RELEASE_PARAMS).releases[4].ready }}
|
||||
uses: ./.github/actions/release
|
||||
with:
|
||||
ref: ${{ fromJson(env.AUTO_RELEASE_PARAMS).releases[4].commit_sha }}
|
||||
type: patch
|
||||
dry-run: ${{ env.DRY_RUN }}
|
||||
token: ${{secrets.ROBOT_CLICKHOUSE_COMMIT_TOKEN}}
|
||||
- name: Post Slack Message
|
||||
if: ${{ !cancelled() }}
|
||||
run: |
|
||||
cd "$GITHUB_WORKSPACE/tests/ci"
|
||||
python3 auto_release.py --post-auto-release-complete --wf-status ${{ job.status }}
|
||||
- name: Clean up
|
||||
if: always()
|
||||
run: |
|
||||
docker ps --quiet | xargs --no-run-if-empty docker kill ||:
|
||||
docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||:
|
||||
|
132
.github/workflows/create_release.yml
vendored
132
.github/workflows/create_release.yml
vendored
@ -2,7 +2,6 @@ name: CreateRelease
|
||||
|
||||
concurrency:
|
||||
group: release
|
||||
|
||||
'on':
|
||||
workflow_dispatch:
|
||||
inputs:
|
||||
@ -31,136 +30,15 @@ jobs:
|
||||
steps:
|
||||
- name: DebugInfo
|
||||
uses: hmarr/debug-action@f7318c783045ac39ed9bb497e22ce835fdafbfe6
|
||||
- name: Set envs
|
||||
# https://docs.github.com/en/actions/learn-github-actions/workflow-commands-for-github-actions#multiline-strings
|
||||
run: |
|
||||
cat >> "$GITHUB_ENV" << 'EOF'
|
||||
ROBOT_CLICKHOUSE_SSH_KEY<<RCSK
|
||||
${{secrets.ROBOT_CLICKHOUSE_SSH_KEY}}
|
||||
RCSK
|
||||
RELEASE_INFO_FILE=${{ runner.temp }}/release_info.json
|
||||
EOF
|
||||
- name: Check out repository code
|
||||
uses: ClickHouse/checkout@v1
|
||||
with:
|
||||
token: ${{secrets.ROBOT_CLICKHOUSE_COMMIT_TOKEN}}
|
||||
fetch-depth: 0
|
||||
- name: Prepare Release Info
|
||||
run: |
|
||||
python3 ./tests/ci/create_release.py --prepare-release-info \
|
||||
--ref ${{ inputs.ref }} --release-type ${{ inputs.type }} \
|
||||
--outfile ${{ env.RELEASE_INFO_FILE }} ${{ inputs.dry-run && '--dry-run' || '' }}
|
||||
echo "::group::Release Info"
|
||||
python3 -m json.tool "$RELEASE_INFO_FILE"
|
||||
echo "::endgroup::"
|
||||
release_tag=$(jq -r '.release_tag' "$RELEASE_INFO_FILE")
|
||||
commit_sha=$(jq -r '.commit_sha' "$RELEASE_INFO_FILE")
|
||||
echo "Release Tag: $release_tag"
|
||||
echo "RELEASE_TAG=$release_tag" >> "$GITHUB_ENV"
|
||||
echo "COMMIT_SHA=$commit_sha" >> "$GITHUB_ENV"
|
||||
- name: Download All Release Artifacts
|
||||
if: ${{ inputs.type == 'patch' }}
|
||||
run: |
|
||||
python3 ./tests/ci/create_release.py --infile "$RELEASE_INFO_FILE" --download-packages ${{ inputs.dry-run && '--dry-run' || '' }}
|
||||
- name: Push Git Tag for the Release
|
||||
run: |
|
||||
python3 ./tests/ci/create_release.py --push-release-tag --infile "$RELEASE_INFO_FILE" ${{ inputs.dry-run && '--dry-run' || '' }}
|
||||
- name: Push New Release Branch
|
||||
if: ${{ inputs.type == 'new' }}
|
||||
run: |
|
||||
python3 ./tests/ci/create_release.py --push-new-release-branch --infile "$RELEASE_INFO_FILE" ${{ inputs.dry-run && '--dry-run' || '' }}
|
||||
- name: Bump CH Version and Update Contributors' List
|
||||
run: |
|
||||
python3 ./tests/ci/create_release.py --create-bump-version-pr --infile "$RELEASE_INFO_FILE" ${{ inputs.dry-run && '--dry-run' || '' }}
|
||||
- name: Checkout master
|
||||
run: |
|
||||
git checkout master
|
||||
- name: Bump Docker versions, Changelog, Security
|
||||
if: ${{ inputs.type == 'patch' }}
|
||||
run: |
|
||||
[ "$(git branch --show-current)" != "master" ] && echo "not on the master" && exit 1
|
||||
echo "List versions"
|
||||
./utils/list-versions/list-versions.sh > ./utils/list-versions/version_date.tsv
|
||||
echo "Update docker version"
|
||||
./utils/list-versions/update-docker-version.sh
|
||||
echo "Generate ChangeLog"
|
||||
export CI=1
|
||||
docker run -u "${UID}:${GID}" -e PYTHONUNBUFFERED=1 -e CI=1 --network=host \
|
||||
--volume=".:/ClickHouse" clickhouse/style-test \
|
||||
/ClickHouse/tests/ci/changelog.py -v --debug-helpers \
|
||||
--gh-user-or-token="$GH_TOKEN" --jobs=5 \
|
||||
--output="/ClickHouse/docs/changelogs/${{ env.RELEASE_TAG }}.md" ${{ env.RELEASE_TAG }}
|
||||
git add ./docs/changelogs/${{ env.RELEASE_TAG }}.md
|
||||
echo "Generate Security"
|
||||
python3 ./utils/security-generator/generate_security.py > SECURITY.md
|
||||
git diff HEAD
|
||||
- name: Create ChangeLog PR
|
||||
if: ${{ inputs.type == 'patch' && ! inputs.dry-run }}
|
||||
uses: peter-evans/create-pull-request@v6
|
||||
- name: Call Release Action
|
||||
uses: ./.github/actions/release
|
||||
with:
|
||||
author: "robot-clickhouse <robot-clickhouse@users.noreply.github.com>"
|
||||
ref: ${{ inputs.ref }}
|
||||
type: ${{ inputs.type }}
|
||||
dry-run: ${{ inputs.dry-run }}
|
||||
token: ${{secrets.ROBOT_CLICKHOUSE_COMMIT_TOKEN}}
|
||||
committer: "robot-clickhouse <robot-clickhouse@users.noreply.github.com>"
|
||||
commit-message: Update version_date.tsv and changelogs after ${{ env.RELEASE_TAG }}
|
||||
branch: auto/${{ env.RELEASE_TAG }}
|
||||
assignees: ${{ github.event.sender.login }} # assign the PR to the tag pusher
|
||||
delete-branch: true
|
||||
title: Update version_date.tsv and changelog after ${{ env.RELEASE_TAG }}
|
||||
labels: do not test
|
||||
body: |
|
||||
Update version_date.tsv and changelogs after ${{ env.RELEASE_TAG }}
|
||||
### Changelog category (leave one):
|
||||
- Not for changelog (changelog entry is not required)
|
||||
- name: Reset changes if Dry-run
|
||||
if: ${{ inputs.dry-run }}
|
||||
run: |
|
||||
git reset --hard HEAD
|
||||
- name: Checkout back to GITHUB_REF
|
||||
run: |
|
||||
git checkout "$GITHUB_REF_NAME"
|
||||
- name: Create GH Release
|
||||
if: ${{ inputs.type == 'patch' }}
|
||||
run: |
|
||||
python3 ./tests/ci/create_release.py --create-gh-release \
|
||||
--infile ${{ env.RELEASE_INFO_FILE }} ${{ inputs.dry-run && '--dry-run' || '' }}
|
||||
|
||||
- name: Export TGZ Packages
|
||||
if: ${{ inputs.type == 'patch' }}
|
||||
run: |
|
||||
python3 ./tests/ci/artifactory.py --export-tgz --infile ${{ env.RELEASE_INFO_FILE }} ${{ inputs.dry-run && '--dry-run' || '' }}
|
||||
- name: Test TGZ Packages
|
||||
if: ${{ inputs.type == 'patch' }}
|
||||
run: |
|
||||
python3 ./tests/ci/artifactory.py --test-tgz --infile ${{ env.RELEASE_INFO_FILE }} ${{ inputs.dry-run && '--dry-run' || '' }}
|
||||
- name: Export RPM Packages
|
||||
if: ${{ inputs.type == 'patch' }}
|
||||
run: |
|
||||
python3 ./tests/ci/artifactory.py --export-rpm --infile ${{ env.RELEASE_INFO_FILE }} ${{ inputs.dry-run && '--dry-run' || '' }}
|
||||
- name: Test RPM Packages
|
||||
if: ${{ inputs.type == 'patch' }}
|
||||
run: |
|
||||
python3 ./tests/ci/artifactory.py --test-rpm --infile ${{ env.RELEASE_INFO_FILE }} ${{ inputs.dry-run && '--dry-run' || '' }}
|
||||
- name: Export Debian Packages
|
||||
if: ${{ inputs.type == 'patch' }}
|
||||
run: |
|
||||
python3 ./tests/ci/artifactory.py --export-debian --infile ${{ env.RELEASE_INFO_FILE }} ${{ inputs.dry-run && '--dry-run' || '' }}
|
||||
- name: Test Debian Packages
|
||||
if: ${{ inputs.type == 'patch' }}
|
||||
run: |
|
||||
python3 ./tests/ci/artifactory.py --test-debian --infile ${{ env.RELEASE_INFO_FILE }} ${{ inputs.dry-run && '--dry-run' || '' }}
|
||||
- name: Docker clickhouse/clickhouse-server building
|
||||
if: ${{ inputs.type == 'patch' }}
|
||||
run: |
|
||||
cd "./tests/ci"
|
||||
export CHECK_NAME="Docker server image"
|
||||
python3 docker_server.py --release-type auto --version ${{ env.RELEASE_TAG }} --check-name "$CHECK_NAME" --sha ${{ env.COMMIT_SHA }} ${{ ! inputs.dry-run && '--push' || '' }}
|
||||
- name: Docker clickhouse/clickhouse-keeper building
|
||||
if: ${{ inputs.type == 'patch' }}
|
||||
run: |
|
||||
cd "./tests/ci"
|
||||
export CHECK_NAME="Docker keeper image"
|
||||
python3 docker_server.py --release-type auto --version ${{ env.RELEASE_TAG }} --check-name "$CHECK_NAME" --sha ${{ env.COMMIT_SHA }} ${{ ! inputs.dry-run && '--push' || '' }}
|
||||
- name: Post Slack Message
|
||||
if: always()
|
||||
run: |
|
||||
echo Slack Message
|
||||
|
@ -6,8 +6,8 @@ source /setup_export_logs.sh
|
||||
# fail on errors, verbose and export all env variables
|
||||
set -e -x -a
|
||||
|
||||
MAX_RUN_TIME=${MAX_RUN_TIME:-7200}
|
||||
MAX_RUN_TIME=$((MAX_RUN_TIME == 0 ? 7200 : MAX_RUN_TIME))
|
||||
MAX_RUN_TIME=${MAX_RUN_TIME:-9000}
|
||||
MAX_RUN_TIME=$((MAX_RUN_TIME == 0 ? 9000 : MAX_RUN_TIME))
|
||||
|
||||
USE_DATABASE_REPLICATED=${USE_DATABASE_REPLICATED:=0}
|
||||
USE_SHARED_CATALOG=${USE_SHARED_CATALOG:=0}
|
||||
@ -224,7 +224,7 @@ function run_tests()
|
||||
else
|
||||
# All other configurations are OK.
|
||||
ADDITIONAL_OPTIONS+=('--jobs')
|
||||
ADDITIONAL_OPTIONS+=('7')
|
||||
ADDITIONAL_OPTIONS+=('8')
|
||||
fi
|
||||
|
||||
if [[ -n "$RUN_BY_HASH_NUM" ]] && [[ -n "$RUN_BY_HASH_TOTAL" ]]; then
|
||||
|
@ -6,38 +6,38 @@ sidebar_label: Playground
|
||||
|
||||
# ClickHouse Playground {#clickhouse-playground}
|
||||
|
||||
[ClickHouse Playground](https://play.clickhouse.com/play?user=play) allows people to experiment with ClickHouse by running queries instantly, without setting up their server or cluster.
|
||||
Several example datasets are available in Playground.
|
||||
[ClickHouse Playground](https://play.clickhouse.com/play?user=play) позволяет пользователям экспериментировать с ClickHouse, выполняя запросы мгновенно, без необходимости настройки сервера или кластера.
|
||||
В Playground доступны несколько примеров наборов данных.
|
||||
|
||||
You can make queries to Playground using any HTTP client, for example [curl](https://curl.haxx.se) or [wget](https://www.gnu.org/software/wget/), or set up a connection using [JDBC](../interfaces/jdbc.md) or [ODBC](../interfaces/odbc.md) drivers. More information about software products that support ClickHouse is available [here](../interfaces/index.md).
|
||||
Вы можете выполнять запросы к Playground, используя любой HTTP-клиент, например [curl](https://curl.haxx.se) или [wget](https://www.gnu.org/software/wget/), или настроить соединение, используя драйверы [JDBC](../interfaces/jdbc.md) или [ODBC](../interfaces/odbc.md). Дополнительную информацию о программных продуктах, поддерживающих ClickHouse, можно найти [здесь](../interfaces/index.md).
|
||||
|
||||
## Credentials {#credentials}
|
||||
## Учетные данные {#credentials}
|
||||
|
||||
| Parameter | Value |
|
||||
| Параметр | Значение |
|
||||
|:--------------------|:-----------------------------------|
|
||||
| HTTPS endpoint | `https://play.clickhouse.com:443/` |
|
||||
| Native TCP endpoint | `play.clickhouse.com:9440` |
|
||||
| User | `explorer` or `play` |
|
||||
| Password | (empty) |
|
||||
| HTTPS-адрес | `https://play.clickhouse.com:443/` |
|
||||
| TCP-адрес | `play.clickhouse.com:9440` |
|
||||
| Пользователь | `explorer` или `play` |
|
||||
| Пароль | (пусто) |
|
||||
|
||||
## Limitations {#limitations}
|
||||
## Ограничения {#limitations}
|
||||
|
||||
The queries are executed as a read-only user. It implies some limitations:
|
||||
Запросы выполняются от имени пользователя с правами только на чтение. Это предполагает некоторые ограничения:
|
||||
|
||||
- DDL queries are not allowed
|
||||
- INSERT queries are not allowed
|
||||
- DDL-запросы не разрешены
|
||||
- INSERT-запросы не разрешены
|
||||
|
||||
The service also have quotas on its usage.
|
||||
Сервис также имеет квоты на использование.
|
||||
|
||||
## Examples {#examples}
|
||||
## Примеры {#examples}
|
||||
|
||||
HTTPS endpoint example with `curl`:
|
||||
Пример использования HTTPS-адреса с `curl`:
|
||||
|
||||
```bash
|
||||
curl "https://play.clickhouse.com/?user=explorer" --data-binary "SELECT 'Play ClickHouse'"
|
||||
```
|
||||
|
||||
TCP endpoint example with [CLI](../interfaces/cli.md):
|
||||
Пример использования TCP-адреса с [CLI](../interfaces/cli.md):
|
||||
|
||||
``` bash
|
||||
clickhouse client --secure --host play.clickhouse.com --user explorer
|
||||
|
@ -52,6 +52,10 @@
|
||||
# include <Server/CertificateReloader.h>
|
||||
#endif
|
||||
|
||||
#if USE_GWP_ASAN
|
||||
# include <Common/GWPAsan.h>
|
||||
#endif
|
||||
|
||||
#include <Server/ProtocolServerAdapter.h>
|
||||
#include <Server/KeeperTCPHandlerFactory.h>
|
||||
|
||||
@ -639,6 +643,10 @@ try
|
||||
tryLogCurrentException(log, "Disabling cgroup memory observer because of an error during initialization");
|
||||
}
|
||||
|
||||
#if USE_GWP_ASAN
|
||||
GWPAsan::initFinished();
|
||||
#endif
|
||||
|
||||
|
||||
LOG_INFO(log, "Ready for connections.");
|
||||
|
||||
|
@ -2213,6 +2213,7 @@ try
|
||||
CannotAllocateThreadFaultInjector::setFaultProbability(server_settings.cannot_allocate_thread_fault_injection_probability);
|
||||
|
||||
#if USE_GWP_ASAN
|
||||
GWPAsan::initFinished();
|
||||
GWPAsan::setForceSampleProbability(server_settings.gwp_asan_force_sample_probability);
|
||||
#endif
|
||||
|
||||
|
@ -4124,7 +4124,9 @@ void QueryAnalyzer::resolveInterpolateColumnsNodeList(QueryTreeNodePtr & interpo
|
||||
|
||||
auto * column_to_interpolate = interpolate_node_typed.getExpression()->as<IdentifierNode>();
|
||||
if (!column_to_interpolate)
|
||||
throw Exception(ErrorCodes::LOGICAL_ERROR, "INTERPOLATE can work only for indentifiers, but {} is found",
|
||||
throw Exception(
|
||||
ErrorCodes::LOGICAL_ERROR,
|
||||
"INTERPOLATE can work only for identifiers, but {} is found",
|
||||
interpolate_node_typed.getExpression()->formatASTForErrorMessage());
|
||||
auto column_to_interpolate_name = column_to_interpolate->getIdentifier().getFullName();
|
||||
|
||||
|
@ -419,9 +419,6 @@ dbms_target_link_libraries (
|
||||
boost::circular_buffer
|
||||
boost::heap)
|
||||
|
||||
target_include_directories(clickhouse_common_io PUBLIC "${CMAKE_CURRENT_BINARY_DIR}/Core/include") # uses some includes from core
|
||||
dbms_target_include_directories(PUBLIC "${CMAKE_CURRENT_BINARY_DIR}/Core/include")
|
||||
|
||||
target_link_libraries(clickhouse_common_io PUBLIC
|
||||
ch_contrib::miniselect
|
||||
ch_contrib::pdqsort)
|
||||
|
@ -81,6 +81,10 @@
|
||||
#include <Common/config_version.h>
|
||||
#include "config.h"
|
||||
|
||||
#if USE_GWP_ASAN
|
||||
# include <Common/GWPAsan.h>
|
||||
#endif
|
||||
|
||||
|
||||
namespace fs = std::filesystem;
|
||||
using namespace std::literals;
|
||||
@ -3264,6 +3268,11 @@ void ClientBase::init(int argc, char ** argv)
|
||||
fatal_log = createLogger("ClientBase", fatal_channel_ptr.get(), Poco::Message::PRIO_FATAL);
|
||||
signal_listener = std::make_unique<SignalListener>(nullptr, fatal_log);
|
||||
signal_listener_thread.start(*signal_listener);
|
||||
|
||||
#if USE_GWP_ASAN
|
||||
GWPAsan::initFinished();
|
||||
#endif
|
||||
|
||||
}
|
||||
|
||||
}
|
||||
|
@ -366,13 +366,10 @@ void ColumnAggregateFunction::updateHashWithValue(size_t n, SipHash & hash) cons
|
||||
hash.update(wbuf.str().c_str(), wbuf.str().size());
|
||||
}
|
||||
|
||||
void ColumnAggregateFunction::updateWeakHash32(WeakHash32 & hash) const
|
||||
WeakHash32 ColumnAggregateFunction::getWeakHash32() const
|
||||
{
|
||||
auto s = data.size();
|
||||
if (hash.getData().size() != data.size())
|
||||
throw Exception(ErrorCodes::LOGICAL_ERROR, "Size of WeakHash32 does not match size of column: "
|
||||
"column size is {}, hash size is {}", std::to_string(s), hash.getData().size());
|
||||
|
||||
WeakHash32 hash(s);
|
||||
auto & hash_data = hash.getData();
|
||||
|
||||
std::vector<UInt8> v;
|
||||
@ -383,6 +380,8 @@ void ColumnAggregateFunction::updateWeakHash32(WeakHash32 & hash) const
|
||||
wbuf.finalize();
|
||||
hash_data[i] = ::updateWeakHash32(v.data(), v.size(), hash_data[i]);
|
||||
}
|
||||
|
||||
return hash;
|
||||
}
|
||||
|
||||
void ColumnAggregateFunction::updateHashFast(SipHash & hash) const
|
||||
|
@ -177,7 +177,7 @@ public:
|
||||
|
||||
void updateHashWithValue(size_t n, SipHash & hash) const override;
|
||||
|
||||
void updateWeakHash32(WeakHash32 & hash) const override;
|
||||
WeakHash32 getWeakHash32() const override;
|
||||
|
||||
void updateHashFast(SipHash & hash) const override;
|
||||
|
||||
|
@ -271,15 +271,12 @@ void ColumnArray::updateHashWithValue(size_t n, SipHash & hash) const
|
||||
getData().updateHashWithValue(offset + i, hash);
|
||||
}
|
||||
|
||||
void ColumnArray::updateWeakHash32(WeakHash32 & hash) const
|
||||
WeakHash32 ColumnArray::getWeakHash32() const
|
||||
{
|
||||
auto s = offsets->size();
|
||||
if (hash.getData().size() != s)
|
||||
throw Exception(ErrorCodes::LOGICAL_ERROR, "Size of WeakHash32 does not match size of column: "
|
||||
"column size is {}, hash size is {}", s, hash.getData().size());
|
||||
WeakHash32 hash(s);
|
||||
|
||||
WeakHash32 internal_hash(data->size());
|
||||
data->updateWeakHash32(internal_hash);
|
||||
WeakHash32 internal_hash = data->getWeakHash32();
|
||||
|
||||
Offset prev_offset = 0;
|
||||
const auto & offsets_data = getOffsets();
|
||||
@ -300,6 +297,8 @@ void ColumnArray::updateWeakHash32(WeakHash32 & hash) const
|
||||
|
||||
prev_offset = offsets_data[i];
|
||||
}
|
||||
|
||||
return hash;
|
||||
}
|
||||
|
||||
void ColumnArray::updateHashFast(SipHash & hash) const
|
||||
|
@ -82,7 +82,7 @@ public:
|
||||
const char * deserializeAndInsertFromArena(const char * pos) override;
|
||||
const char * skipSerializedInArena(const char * pos) const override;
|
||||
void updateHashWithValue(size_t n, SipHash & hash) const override;
|
||||
void updateWeakHash32(WeakHash32 & hash) const override;
|
||||
WeakHash32 getWeakHash32() const override;
|
||||
void updateHashFast(SipHash & hash) const override;
|
||||
#if !defined(ABORT_ON_LOGICAL_ERROR)
|
||||
void insertRangeFrom(const IColumn & src, size_t start, size_t length) override;
|
||||
|
@ -3,6 +3,7 @@
|
||||
#include <optional>
|
||||
#include <Core/Field.h>
|
||||
#include <Columns/IColumn.h>
|
||||
#include <Common/WeakHash.h>
|
||||
#include <IO/BufferWithOwnMemory.h>
|
||||
|
||||
|
||||
@ -98,7 +99,7 @@ public:
|
||||
const char * deserializeAndInsertFromArena(const char *) override { throwMustBeDecompressed(); }
|
||||
const char * skipSerializedInArena(const char *) const override { throwMustBeDecompressed(); }
|
||||
void updateHashWithValue(size_t, SipHash &) const override { throwMustBeDecompressed(); }
|
||||
void updateWeakHash32(WeakHash32 &) const override { throwMustBeDecompressed(); }
|
||||
WeakHash32 getWeakHash32() const override { throwMustBeDecompressed(); }
|
||||
void updateHashFast(SipHash &) const override { throwMustBeDecompressed(); }
|
||||
ColumnPtr filter(const Filter &, ssize_t) const override { throwMustBeDecompressed(); }
|
||||
void expand(const Filter &, bool) override { throwMustBeDecompressed(); }
|
||||
|
@ -137,18 +137,10 @@ void ColumnConst::updatePermutation(PermutationSortDirection /*direction*/, Perm
|
||||
{
|
||||
}
|
||||
|
||||
void ColumnConst::updateWeakHash32(WeakHash32 & hash) const
|
||||
WeakHash32 ColumnConst::getWeakHash32() const
|
||||
{
|
||||
if (hash.getData().size() != s)
|
||||
throw Exception(ErrorCodes::LOGICAL_ERROR, "Size of WeakHash32 does not match size of column: "
|
||||
"column size is {}, hash size is {}", std::to_string(s), std::to_string(hash.getData().size()));
|
||||
|
||||
WeakHash32 element_hash(1);
|
||||
data->updateWeakHash32(element_hash);
|
||||
size_t data_hash = element_hash.getData()[0];
|
||||
|
||||
for (auto & value : hash.getData())
|
||||
value = static_cast<UInt32>(intHashCRC32(data_hash, value));
|
||||
WeakHash32 element_hash = data->getWeakHash32();
|
||||
return WeakHash32(s, element_hash.getData()[0]);
|
||||
}
|
||||
|
||||
void ColumnConst::compareColumn(
|
||||
|
@ -204,7 +204,7 @@ public:
|
||||
data->updateHashWithValue(0, hash);
|
||||
}
|
||||
|
||||
void updateWeakHash32(WeakHash32 & hash) const override;
|
||||
WeakHash32 getWeakHash32() const override;
|
||||
|
||||
void updateHashFast(SipHash & hash) const override
|
||||
{
|
||||
|
@ -28,7 +28,6 @@ namespace ErrorCodes
|
||||
extern const int PARAMETER_OUT_OF_BOUND;
|
||||
extern const int SIZES_OF_COLUMNS_DOESNT_MATCH;
|
||||
extern const int NOT_IMPLEMENTED;
|
||||
extern const int LOGICAL_ERROR;
|
||||
}
|
||||
|
||||
template <is_decimal T>
|
||||
@ -76,13 +75,10 @@ void ColumnDecimal<T>::updateHashWithValue(size_t n, SipHash & hash) const
|
||||
}
|
||||
|
||||
template <is_decimal T>
|
||||
void ColumnDecimal<T>::updateWeakHash32(WeakHash32 & hash) const
|
||||
WeakHash32 ColumnDecimal<T>::getWeakHash32() const
|
||||
{
|
||||
auto s = data.size();
|
||||
|
||||
if (hash.getData().size() != s)
|
||||
throw Exception(ErrorCodes::LOGICAL_ERROR, "Size of WeakHash32 does not match size of column: "
|
||||
"column size is {}, hash size is {}", std::to_string(s), std::to_string(hash.getData().size()));
|
||||
WeakHash32 hash(s);
|
||||
|
||||
const T * begin = data.data();
|
||||
const T * end = begin + s;
|
||||
@ -94,6 +90,8 @@ void ColumnDecimal<T>::updateWeakHash32(WeakHash32 & hash) const
|
||||
++begin;
|
||||
++hash_data;
|
||||
}
|
||||
|
||||
return hash;
|
||||
}
|
||||
|
||||
template <is_decimal T>
|
||||
|
@ -102,7 +102,7 @@ public:
|
||||
const char * deserializeAndInsertFromArena(const char * pos) override;
|
||||
const char * skipSerializedInArena(const char * pos) const override;
|
||||
void updateHashWithValue(size_t n, SipHash & hash) const override;
|
||||
void updateWeakHash32(WeakHash32 & hash) const override;
|
||||
WeakHash32 getWeakHash32() const override;
|
||||
void updateHashFast(SipHash & hash) const override;
|
||||
#if !defined(ABORT_ON_LOGICAL_ERROR)
|
||||
int compareAt(size_t n, size_t m, const IColumn & rhs_, int nan_direction_hint) const override;
|
||||
|
@ -4,6 +4,7 @@
|
||||
#include <Columns/ColumnVector.h>
|
||||
#include <Columns/ColumnVariant.h>
|
||||
#include <DataTypes/IDataType.h>
|
||||
#include <Common/WeakHash.h>
|
||||
|
||||
|
||||
namespace DB
|
||||
@ -174,9 +175,9 @@ public:
|
||||
|
||||
void updateHashWithValue(size_t n, SipHash & hash) const override;
|
||||
|
||||
void updateWeakHash32(WeakHash32 & hash) const override
|
||||
WeakHash32 getWeakHash32() const override
|
||||
{
|
||||
variant_column->updateWeakHash32(hash);
|
||||
return variant_column->getWeakHash32();
|
||||
}
|
||||
|
||||
void updateHashFast(SipHash & hash) const override
|
||||
|
@ -137,14 +137,10 @@ void ColumnFixedString::updateHashWithValue(size_t index, SipHash & hash) const
|
||||
hash.update(reinterpret_cast<const char *>(&chars[n * index]), n);
|
||||
}
|
||||
|
||||
void ColumnFixedString::updateWeakHash32(WeakHash32 & hash) const
|
||||
WeakHash32 ColumnFixedString::getWeakHash32() const
|
||||
{
|
||||
auto s = size();
|
||||
|
||||
if (hash.getData().size() != s)
|
||||
throw Exception(ErrorCodes::LOGICAL_ERROR, "Size of WeakHash32 does not match size of column: "
|
||||
"column size is {}, "
|
||||
"hash size is {}", std::to_string(s), std::to_string(hash.getData().size()));
|
||||
WeakHash32 hash(s);
|
||||
|
||||
const UInt8 * pos = chars.data();
|
||||
UInt32 * hash_data = hash.getData().data();
|
||||
@ -156,6 +152,8 @@ void ColumnFixedString::updateWeakHash32(WeakHash32 & hash) const
|
||||
pos += n;
|
||||
++hash_data;
|
||||
}
|
||||
|
||||
return hash;
|
||||
}
|
||||
|
||||
void ColumnFixedString::updateHashFast(SipHash & hash) const
|
||||
|
@ -133,7 +133,7 @@ public:
|
||||
|
||||
void updateHashWithValue(size_t index, SipHash & hash) const override;
|
||||
|
||||
void updateWeakHash32(WeakHash32 & hash) const override;
|
||||
WeakHash32 getWeakHash32() const override;
|
||||
|
||||
void updateHashFast(SipHash & hash) const override;
|
||||
|
||||
|
@ -4,6 +4,7 @@
|
||||
#include <Core/NamesAndTypes.h>
|
||||
#include <Core/ColumnsWithTypeAndName.h>
|
||||
#include <Columns/IColumn.h>
|
||||
#include <Common/WeakHash.h>
|
||||
|
||||
|
||||
namespace DB
|
||||
@ -130,9 +131,9 @@ public:
|
||||
throw Exception(ErrorCodes::NOT_IMPLEMENTED, "updateHashWithValue is not implemented for {}", getName());
|
||||
}
|
||||
|
||||
void updateWeakHash32(WeakHash32 &) const override
|
||||
WeakHash32 getWeakHash32() const override
|
||||
{
|
||||
throw Exception(ErrorCodes::NOT_IMPLEMENTED, "updateWeakHash32 is not implemented for {}", getName());
|
||||
throw Exception(ErrorCodes::NOT_IMPLEMENTED, "getWeakHash32 is not implemented for {}", getName());
|
||||
}
|
||||
|
||||
void updateHashFast(SipHash &) const override
|
||||
|
@ -7,8 +7,7 @@
|
||||
#include <Common/HashTable/HashMap.h>
|
||||
#include <Common/WeakHash.h>
|
||||
#include <Common/assert_cast.h>
|
||||
#include "Storages/IndicesDescription.h"
|
||||
#include "base/types.h"
|
||||
#include <base/types.h>
|
||||
#include <base/sort.h>
|
||||
#include <base/scope_guard.h>
|
||||
|
||||
@ -320,19 +319,10 @@ const char * ColumnLowCardinality::skipSerializedInArena(const char * pos) const
|
||||
return getDictionary().skipSerializedInArena(pos);
|
||||
}
|
||||
|
||||
void ColumnLowCardinality::updateWeakHash32(WeakHash32 & hash) const
|
||||
WeakHash32 ColumnLowCardinality::getWeakHash32() const
|
||||
{
|
||||
auto s = size();
|
||||
|
||||
if (hash.getData().size() != s)
|
||||
throw Exception(ErrorCodes::LOGICAL_ERROR, "Size of WeakHash32 does not match size of column: "
|
||||
"column size is {}, hash size is {}", std::to_string(s), std::to_string(hash.getData().size()));
|
||||
|
||||
const auto & dict = getDictionary().getNestedColumn();
|
||||
WeakHash32 dict_hash(dict->size());
|
||||
dict->updateWeakHash32(dict_hash);
|
||||
|
||||
idx.updateWeakHash(hash, dict_hash);
|
||||
WeakHash32 dict_hash = getDictionary().getNestedColumn()->getWeakHash32();
|
||||
return idx.getWeakHash(dict_hash);
|
||||
}
|
||||
|
||||
void ColumnLowCardinality::updateHashFast(SipHash & hash) const
|
||||
@ -832,10 +822,11 @@ bool ColumnLowCardinality::Index::containsDefault() const
|
||||
return contains;
|
||||
}
|
||||
|
||||
void ColumnLowCardinality::Index::updateWeakHash(WeakHash32 & hash, WeakHash32 & dict_hash) const
|
||||
WeakHash32 ColumnLowCardinality::Index::getWeakHash(const WeakHash32 & dict_hash) const
|
||||
{
|
||||
WeakHash32 hash(positions->size());
|
||||
auto & hash_data = hash.getData();
|
||||
auto & dict_hash_data = dict_hash.getData();
|
||||
const auto & dict_hash_data = dict_hash.getData();
|
||||
|
||||
auto update_weak_hash = [&](auto x)
|
||||
{
|
||||
@ -844,10 +835,11 @@ void ColumnLowCardinality::Index::updateWeakHash(WeakHash32 & hash, WeakHash32 &
|
||||
auto size = data.size();
|
||||
|
||||
for (size_t i = 0; i < size; ++i)
|
||||
hash_data[i] = static_cast<UInt32>(intHashCRC32(dict_hash_data[data[i]], hash_data[i]));
|
||||
hash_data[i] = dict_hash_data[data[i]];
|
||||
};
|
||||
|
||||
callForType(std::move(update_weak_hash), size_of_type);
|
||||
return hash;
|
||||
}
|
||||
|
||||
void ColumnLowCardinality::Index::collectSerializedValueSizes(
|
||||
|
@ -111,7 +111,7 @@ public:
|
||||
getDictionary().updateHashWithValue(getIndexes().getUInt(n), hash);
|
||||
}
|
||||
|
||||
void updateWeakHash32(WeakHash32 & hash) const override;
|
||||
WeakHash32 getWeakHash32() const override;
|
||||
|
||||
void updateHashFast(SipHash &) const override;
|
||||
|
||||
@ -325,7 +325,7 @@ public:
|
||||
|
||||
bool containsDefault() const;
|
||||
|
||||
void updateWeakHash(WeakHash32 & hash, WeakHash32 & dict_hash) const;
|
||||
WeakHash32 getWeakHash(const WeakHash32 & dict_hash) const;
|
||||
|
||||
void collectSerializedValueSizes(PaddedPODArray<UInt64> & sizes, const PaddedPODArray<UInt64> & dict_sizes) const;
|
||||
|
||||
|
@ -143,9 +143,9 @@ void ColumnMap::updateHashWithValue(size_t n, SipHash & hash) const
|
||||
nested->updateHashWithValue(n, hash);
|
||||
}
|
||||
|
||||
void ColumnMap::updateWeakHash32(WeakHash32 & hash) const
|
||||
WeakHash32 ColumnMap::getWeakHash32() const
|
||||
{
|
||||
nested->updateWeakHash32(hash);
|
||||
return nested->getWeakHash32();
|
||||
}
|
||||
|
||||
void ColumnMap::updateHashFast(SipHash & hash) const
|
||||
|
@ -64,7 +64,7 @@ public:
|
||||
const char * deserializeAndInsertFromArena(const char * pos) override;
|
||||
const char * skipSerializedInArena(const char * pos) const override;
|
||||
void updateHashWithValue(size_t n, SipHash & hash) const override;
|
||||
void updateWeakHash32(WeakHash32 & hash) const override;
|
||||
WeakHash32 getWeakHash32() const override;
|
||||
void updateHashFast(SipHash & hash) const override;
|
||||
|
||||
#if !defined(ABORT_ON_LOGICAL_ERROR)
|
||||
|
@ -56,25 +56,21 @@ void ColumnNullable::updateHashWithValue(size_t n, SipHash & hash) const
|
||||
getNestedColumn().updateHashWithValue(n, hash);
|
||||
}
|
||||
|
||||
void ColumnNullable::updateWeakHash32(WeakHash32 & hash) const
|
||||
WeakHash32 ColumnNullable::getWeakHash32() const
|
||||
{
|
||||
auto s = size();
|
||||
|
||||
if (hash.getData().size() != s)
|
||||
throw Exception(ErrorCodes::LOGICAL_ERROR, "Size of WeakHash32 does not match size of column: "
|
||||
"column size is {}, hash size is {}", std::to_string(s), std::to_string(hash.getData().size()));
|
||||
|
||||
WeakHash32 old_hash = hash;
|
||||
nested_column->updateWeakHash32(hash);
|
||||
WeakHash32 hash = nested_column->getWeakHash32();
|
||||
|
||||
const auto & null_map_data = getNullMapData();
|
||||
auto & hash_data = hash.getData();
|
||||
auto & old_hash_data = old_hash.getData();
|
||||
|
||||
/// Use old data for nulls.
|
||||
/// Use default for nulls.
|
||||
for (size_t row = 0; row < s; ++row)
|
||||
if (null_map_data[row])
|
||||
hash_data[row] = old_hash_data[row];
|
||||
hash_data[row] = WeakHash32::kDefaultInitialValue;
|
||||
|
||||
return hash;
|
||||
}
|
||||
|
||||
void ColumnNullable::updateHashFast(SipHash & hash) const
|
||||
|
@ -133,7 +133,7 @@ public:
|
||||
void protect() override;
|
||||
ColumnPtr replicate(const Offsets & replicate_offsets) const override;
|
||||
void updateHashWithValue(size_t n, SipHash & hash) const override;
|
||||
void updateWeakHash32(WeakHash32 & hash) const override;
|
||||
WeakHash32 getWeakHash32() const override;
|
||||
void updateHashFast(SipHash & hash) const override;
|
||||
void getExtremes(Field & min, Field & max) const override;
|
||||
// Special function for nullable minmax index
|
||||
|
@ -5,6 +5,7 @@
|
||||
#include <Core/Names.h>
|
||||
#include <DataTypes/Serializations/SubcolumnsTree.h>
|
||||
#include <Common/PODArray.h>
|
||||
#include <Common/WeakHash.h>
|
||||
|
||||
#include <DataTypes/IDataType.h>
|
||||
|
||||
@ -252,7 +253,7 @@ public:
|
||||
const char * deserializeAndInsertFromArena(const char *) override { throwMustBeConcrete(); }
|
||||
const char * skipSerializedInArena(const char *) const override { throwMustBeConcrete(); }
|
||||
void updateHashWithValue(size_t, SipHash &) const override { throwMustBeConcrete(); }
|
||||
void updateWeakHash32(WeakHash32 &) const override { throwMustBeConcrete(); }
|
||||
WeakHash32 getWeakHash32() const override { throwMustBeConcrete(); }
|
||||
void updateHashFast(SipHash & hash) const override;
|
||||
void expand(const Filter &, bool) override { throwMustBeConcrete(); }
|
||||
bool hasEqualValues() const override { throwMustBeConcrete(); }
|
||||
|
@ -678,20 +678,22 @@ void ColumnSparse::updateHashWithValue(size_t n, SipHash & hash) const
|
||||
values->updateHashWithValue(getValueIndex(n), hash);
|
||||
}
|
||||
|
||||
void ColumnSparse::updateWeakHash32(WeakHash32 & hash) const
|
||||
WeakHash32 ColumnSparse::getWeakHash32() const
|
||||
{
|
||||
if (hash.getData().size() != _size)
|
||||
throw Exception(ErrorCodes::LOGICAL_ERROR, "Size of WeakHash32 does not match size of column: "
|
||||
"column size is {}, hash size is {}", _size, hash.getData().size());
|
||||
WeakHash32 values_hash = values->getWeakHash32();
|
||||
WeakHash32 hash(size());
|
||||
|
||||
auto & hash_data = hash.getData();
|
||||
auto & values_hash_data = values_hash.getData();
|
||||
|
||||
auto offset_it = begin();
|
||||
auto & hash_data = hash.getData();
|
||||
for (size_t i = 0; i < _size; ++i, ++offset_it)
|
||||
{
|
||||
size_t value_index = offset_it.getValueIndex();
|
||||
auto data_ref = values->getDataAt(value_index);
|
||||
hash_data[i] = ::updateWeakHash32(reinterpret_cast<const UInt8 *>(data_ref.data), data_ref.size, hash_data[i]);
|
||||
hash_data[i] = values_hash_data[value_index];
|
||||
}
|
||||
|
||||
return hash;
|
||||
}
|
||||
|
||||
void ColumnSparse::updateHashFast(SipHash & hash) const
|
||||
|
@ -139,7 +139,7 @@ public:
|
||||
void protect() override;
|
||||
ColumnPtr replicate(const Offsets & replicate_offsets) const override;
|
||||
void updateHashWithValue(size_t n, SipHash & hash) const override;
|
||||
void updateWeakHash32(WeakHash32 & hash) const override;
|
||||
WeakHash32 getWeakHash32() const override;
|
||||
void updateHashFast(SipHash & hash) const override;
|
||||
void getExtremes(Field & min, Field & max) const override;
|
||||
|
||||
|
@ -108,13 +108,10 @@ MutableColumnPtr ColumnString::cloneResized(size_t to_size) const
|
||||
return res;
|
||||
}
|
||||
|
||||
void ColumnString::updateWeakHash32(WeakHash32 & hash) const
|
||||
WeakHash32 ColumnString::getWeakHash32() const
|
||||
{
|
||||
auto s = offsets.size();
|
||||
|
||||
if (hash.getData().size() != s)
|
||||
throw Exception(ErrorCodes::LOGICAL_ERROR, "Size of WeakHash32 does not match size of column: "
|
||||
"column size is {}, hash size is {}", std::to_string(s), std::to_string(hash.getData().size()));
|
||||
WeakHash32 hash(s);
|
||||
|
||||
const UInt8 * pos = chars.data();
|
||||
UInt32 * hash_data = hash.getData().data();
|
||||
@ -130,6 +127,8 @@ void ColumnString::updateWeakHash32(WeakHash32 & hash) const
|
||||
prev_offset = offset;
|
||||
++hash_data;
|
||||
}
|
||||
|
||||
return hash;
|
||||
}
|
||||
|
||||
|
||||
|
@ -212,7 +212,7 @@ public:
|
||||
hash.update(reinterpret_cast<const char *>(&chars[offset]), string_size);
|
||||
}
|
||||
|
||||
void updateWeakHash32(WeakHash32 & hash) const override;
|
||||
WeakHash32 getWeakHash32() const override;
|
||||
|
||||
void updateHashFast(SipHash & hash) const override
|
||||
{
|
||||
|
@ -201,6 +201,7 @@ bool ColumnTuple::tryInsert(const Field & x)
|
||||
return false;
|
||||
}
|
||||
}
|
||||
++column_length;
|
||||
|
||||
return true;
|
||||
}
|
||||
@ -236,6 +237,7 @@ void ColumnTuple::doInsertManyFrom(const IColumn & src, size_t position, size_t
|
||||
|
||||
for (size_t i = 0; i < tuple_size; ++i)
|
||||
columns[i]->insertManyFrom(*src_tuple.columns[i], position, length);
|
||||
column_length += length;
|
||||
}
|
||||
|
||||
void ColumnTuple::insertDefault()
|
||||
@ -308,16 +310,15 @@ void ColumnTuple::updateHashWithValue(size_t n, SipHash & hash) const
|
||||
column->updateHashWithValue(n, hash);
|
||||
}
|
||||
|
||||
void ColumnTuple::updateWeakHash32(WeakHash32 & hash) const
|
||||
WeakHash32 ColumnTuple::getWeakHash32() const
|
||||
{
|
||||
auto s = size();
|
||||
|
||||
if (hash.getData().size() != s)
|
||||
throw Exception(ErrorCodes::LOGICAL_ERROR, "Size of WeakHash32 does not match size of column: "
|
||||
"column size is {}, hash size is {}", std::to_string(s), std::to_string(hash.getData().size()));
|
||||
WeakHash32 hash(s);
|
||||
|
||||
for (const auto & column : columns)
|
||||
column->updateWeakHash32(hash);
|
||||
hash.update(column->getWeakHash32());
|
||||
|
||||
return hash;
|
||||
}
|
||||
|
||||
void ColumnTuple::updateHashFast(SipHash & hash) const
|
||||
|
@ -81,7 +81,7 @@ public:
|
||||
const char * deserializeAndInsertFromArena(const char * pos) override;
|
||||
const char * skipSerializedInArena(const char * pos) const override;
|
||||
void updateHashWithValue(size_t n, SipHash & hash) const override;
|
||||
void updateWeakHash32(WeakHash32 & hash) const override;
|
||||
WeakHash32 getWeakHash32() const override;
|
||||
void updateHashFast(SipHash & hash) const override;
|
||||
#if !defined(ABORT_ON_LOGICAL_ERROR)
|
||||
void insertRangeFrom(const IColumn & src, size_t start, size_t length) override;
|
||||
|
@ -789,36 +789,26 @@ void ColumnVariant::updateHashWithValue(size_t n, SipHash & hash) const
|
||||
variants[localDiscriminatorByGlobal(global_discr)]->updateHashWithValue(offsetAt(n), hash);
|
||||
}
|
||||
|
||||
void ColumnVariant::updateWeakHash32(WeakHash32 & hash) const
|
||||
WeakHash32 ColumnVariant::getWeakHash32() const
|
||||
{
|
||||
auto s = size();
|
||||
|
||||
if (hash.getData().size() != s)
|
||||
throw Exception(ErrorCodes::LOGICAL_ERROR, "Size of WeakHash32 does not match size of column: "
|
||||
"column size is {}, hash size is {}", std::to_string(s), std::to_string(hash.getData().size()));
|
||||
|
||||
/// If we have only NULLs, keep hash unchanged.
|
||||
if (hasOnlyNulls())
|
||||
return;
|
||||
return WeakHash32(s);
|
||||
|
||||
/// Optimization for case when there is only 1 non-empty variant and no NULLs.
|
||||
/// In this case we can just calculate weak hash for this variant.
|
||||
if (auto non_empty_local_discr = getLocalDiscriminatorOfOneNoneEmptyVariantNoNulls())
|
||||
{
|
||||
variants[*non_empty_local_discr]->updateWeakHash32(hash);
|
||||
return;
|
||||
}
|
||||
return variants[*non_empty_local_discr]->getWeakHash32();
|
||||
|
||||
/// Calculate weak hash for all variants.
|
||||
std::vector<WeakHash32> nested_hashes;
|
||||
for (const auto & variant : variants)
|
||||
{
|
||||
WeakHash32 nested_hash(variant->size());
|
||||
variant->updateWeakHash32(nested_hash);
|
||||
nested_hashes.emplace_back(std::move(nested_hash));
|
||||
}
|
||||
nested_hashes.emplace_back(variant->getWeakHash32());
|
||||
|
||||
/// For each row hash is a hash of corresponding row from corresponding variant.
|
||||
WeakHash32 hash(s);
|
||||
auto & hash_data = hash.getData();
|
||||
const auto & local_discriminators_data = getLocalDiscriminators();
|
||||
const auto & offsets_data = getOffsets();
|
||||
@ -827,11 +817,10 @@ void ColumnVariant::updateWeakHash32(WeakHash32 & hash) const
|
||||
Discriminator discr = local_discriminators_data[i];
|
||||
/// Update hash only for non-NULL values
|
||||
if (discr != NULL_DISCRIMINATOR)
|
||||
{
|
||||
auto nested_hash = nested_hashes[local_discriminators_data[i]].getData()[offsets_data[i]];
|
||||
hash_data[i] = static_cast<UInt32>(hashCRC32(nested_hash, hash_data[i]));
|
||||
}
|
||||
hash_data[i] = nested_hashes[discr].getData()[offsets_data[i]];
|
||||
}
|
||||
|
||||
return hash;
|
||||
}
|
||||
|
||||
void ColumnVariant::updateHashFast(SipHash & hash) const
|
||||
|
@ -213,7 +213,7 @@ public:
|
||||
const char * deserializeVariantAndInsertFromArena(Discriminator global_discr, const char * pos);
|
||||
const char * skipSerializedInArena(const char * pos) const override;
|
||||
void updateHashWithValue(size_t n, SipHash & hash) const override;
|
||||
void updateWeakHash32(WeakHash32 & hash) const override;
|
||||
WeakHash32 getWeakHash32() const override;
|
||||
void updateHashFast(SipHash & hash) const override;
|
||||
ColumnPtr filter(const Filter & filt, ssize_t result_size_hint) const override;
|
||||
void expand(const Filter & mask, bool inverted) override;
|
||||
|
@ -73,13 +73,10 @@ void ColumnVector<T>::updateHashWithValue(size_t n, SipHash & hash) const
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
void ColumnVector<T>::updateWeakHash32(WeakHash32 & hash) const
|
||||
WeakHash32 ColumnVector<T>::getWeakHash32() const
|
||||
{
|
||||
auto s = data.size();
|
||||
|
||||
if (hash.getData().size() != s)
|
||||
throw Exception(ErrorCodes::LOGICAL_ERROR, "Size of WeakHash32 does not match size of column: "
|
||||
"column size is {}, hash size is {}", std::to_string(s), std::to_string(hash.getData().size()));
|
||||
WeakHash32 hash(s);
|
||||
|
||||
const T * begin = data.data();
|
||||
const T * end = begin + s;
|
||||
@ -91,6 +88,8 @@ void ColumnVector<T>::updateWeakHash32(WeakHash32 & hash) const
|
||||
++begin;
|
||||
++hash_data;
|
||||
}
|
||||
|
||||
return hash;
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
|
@ -114,7 +114,7 @@ public:
|
||||
|
||||
void updateHashWithValue(size_t n, SipHash & hash) const override;
|
||||
|
||||
void updateWeakHash32(WeakHash32 & hash) const override;
|
||||
WeakHash32 getWeakHash32() const override;
|
||||
|
||||
void updateHashFast(SipHash & hash) const override;
|
||||
|
||||
|
@ -300,10 +300,10 @@ public:
|
||||
/// passed bytes to hash must identify sequence of values unambiguously.
|
||||
virtual void updateHashWithValue(size_t n, SipHash & hash) const = 0;
|
||||
|
||||
/// Update hash function value. Hash is calculated for each element.
|
||||
/// Get hash function value. Hash is calculated for each element.
|
||||
/// It's a fast weak hash function. Mainly need to scatter data between threads.
|
||||
/// WeakHash32 must have the same size as column.
|
||||
virtual void updateWeakHash32(WeakHash32 & hash) const = 0;
|
||||
virtual WeakHash32 getWeakHash32() const = 0;
|
||||
|
||||
/// Update state of hash with all column.
|
||||
virtual void updateHashFast(SipHash & hash) const = 0;
|
||||
|
@ -1,6 +1,7 @@
|
||||
#pragma once
|
||||
|
||||
#include <Columns/IColumn.h>
|
||||
#include <Common/WeakHash.h>
|
||||
|
||||
|
||||
namespace DB
|
||||
@ -63,8 +64,9 @@ public:
|
||||
{
|
||||
}
|
||||
|
||||
void updateWeakHash32(WeakHash32 & /*hash*/) const override
|
||||
WeakHash32 getWeakHash32() const override
|
||||
{
|
||||
return WeakHash32(s);
|
||||
}
|
||||
|
||||
void updateHashFast(SipHash & /*hash*/) const override
|
||||
|
@ -1,6 +1,7 @@
|
||||
#pragma once
|
||||
#include <optional>
|
||||
#include <Columns/IColumn.h>
|
||||
#include <Common/WeakHash.h>
|
||||
|
||||
namespace DB
|
||||
{
|
||||
@ -166,9 +167,9 @@ public:
|
||||
throw Exception(ErrorCodes::NOT_IMPLEMENTED, "Method scatter is not supported for ColumnUnique.");
|
||||
}
|
||||
|
||||
void updateWeakHash32(WeakHash32 &) const override
|
||||
WeakHash32 getWeakHash32() const override
|
||||
{
|
||||
throw Exception(ErrorCodes::NOT_IMPLEMENTED, "Method updateWeakHash32 is not supported for ColumnUnique.");
|
||||
throw Exception(ErrorCodes::NOT_IMPLEMENTED, "Method getWeakHash32 is not supported for ColumnUnique.");
|
||||
}
|
||||
|
||||
void updateHashFast(SipHash &) const override
|
||||
|
@ -60,8 +60,7 @@ TEST(WeakHash32, ColumnVectorU8)
|
||||
data.push_back(i);
|
||||
}
|
||||
|
||||
WeakHash32 hash(col->size());
|
||||
col->updateWeakHash32(hash);
|
||||
WeakHash32 hash = col->getWeakHash32();
|
||||
|
||||
checkColumn(hash.getData(), col->getData());
|
||||
}
|
||||
@ -77,8 +76,7 @@ TEST(WeakHash32, ColumnVectorI8)
|
||||
data.push_back(i);
|
||||
}
|
||||
|
||||
WeakHash32 hash(col->size());
|
||||
col->updateWeakHash32(hash);
|
||||
WeakHash32 hash = col->getWeakHash32();
|
||||
|
||||
checkColumn(hash.getData(), col->getData());
|
||||
}
|
||||
@ -94,8 +92,7 @@ TEST(WeakHash32, ColumnVectorU16)
|
||||
data.push_back(i);
|
||||
}
|
||||
|
||||
WeakHash32 hash(col->size());
|
||||
col->updateWeakHash32(hash);
|
||||
WeakHash32 hash = col->getWeakHash32();
|
||||
|
||||
checkColumn(hash.getData(), col->getData());
|
||||
}
|
||||
@ -111,8 +108,7 @@ TEST(WeakHash32, ColumnVectorI16)
|
||||
data.push_back(i);
|
||||
}
|
||||
|
||||
WeakHash32 hash(col->size());
|
||||
col->updateWeakHash32(hash);
|
||||
WeakHash32 hash = col->getWeakHash32();
|
||||
|
||||
checkColumn(hash.getData(), col->getData());
|
||||
}
|
||||
@ -128,8 +124,7 @@ TEST(WeakHash32, ColumnVectorU32)
|
||||
data.push_back(i << 16u);
|
||||
}
|
||||
|
||||
WeakHash32 hash(col->size());
|
||||
col->updateWeakHash32(hash);
|
||||
WeakHash32 hash = col->getWeakHash32();
|
||||
|
||||
checkColumn(hash.getData(), col->getData());
|
||||
}
|
||||
@ -145,8 +140,7 @@ TEST(WeakHash32, ColumnVectorI32)
|
||||
data.push_back(i << 16);
|
||||
}
|
||||
|
||||
WeakHash32 hash(col->size());
|
||||
col->updateWeakHash32(hash);
|
||||
WeakHash32 hash = col->getWeakHash32();
|
||||
|
||||
checkColumn(hash.getData(), col->getData());
|
||||
}
|
||||
@ -162,8 +156,7 @@ TEST(WeakHash32, ColumnVectorU64)
|
||||
data.push_back(i << 32u);
|
||||
}
|
||||
|
||||
WeakHash32 hash(col->size());
|
||||
col->updateWeakHash32(hash);
|
||||
WeakHash32 hash = col->getWeakHash32();
|
||||
|
||||
checkColumn(hash.getData(), col->getData());
|
||||
}
|
||||
@ -179,8 +172,7 @@ TEST(WeakHash32, ColumnVectorI64)
|
||||
data.push_back(i << 32);
|
||||
}
|
||||
|
||||
WeakHash32 hash(col->size());
|
||||
col->updateWeakHash32(hash);
|
||||
WeakHash32 hash = col->getWeakHash32();
|
||||
|
||||
checkColumn(hash.getData(), col->getData());
|
||||
}
|
||||
@ -204,8 +196,7 @@ TEST(WeakHash32, ColumnVectorU128)
|
||||
}
|
||||
}
|
||||
|
||||
WeakHash32 hash(col->size());
|
||||
col->updateWeakHash32(hash);
|
||||
WeakHash32 hash = col->getWeakHash32();
|
||||
|
||||
checkColumn(hash.getData(), eq_data);
|
||||
}
|
||||
@ -221,8 +212,7 @@ TEST(WeakHash32, ColumnVectorI128)
|
||||
data.push_back(i << 32);
|
||||
}
|
||||
|
||||
WeakHash32 hash(col->size());
|
||||
col->updateWeakHash32(hash);
|
||||
WeakHash32 hash = col->getWeakHash32();
|
||||
|
||||
checkColumn(hash.getData(), col->getData());
|
||||
}
|
||||
@ -238,8 +228,7 @@ TEST(WeakHash32, ColumnDecimal32)
|
||||
data.push_back(i << 16);
|
||||
}
|
||||
|
||||
WeakHash32 hash(col->size());
|
||||
col->updateWeakHash32(hash);
|
||||
WeakHash32 hash = col->getWeakHash32();
|
||||
|
||||
checkColumn(hash.getData(), col->getData());
|
||||
}
|
||||
@ -255,8 +244,7 @@ TEST(WeakHash32, ColumnDecimal64)
|
||||
data.push_back(i << 32);
|
||||
}
|
||||
|
||||
WeakHash32 hash(col->size());
|
||||
col->updateWeakHash32(hash);
|
||||
WeakHash32 hash = col->getWeakHash32();
|
||||
|
||||
checkColumn(hash.getData(), col->getData());
|
||||
}
|
||||
@ -272,8 +260,7 @@ TEST(WeakHash32, ColumnDecimal128)
|
||||
data.push_back(i << 32);
|
||||
}
|
||||
|
||||
WeakHash32 hash(col->size());
|
||||
col->updateWeakHash32(hash);
|
||||
WeakHash32 hash = col->getWeakHash32();
|
||||
|
||||
checkColumn(hash.getData(), col->getData());
|
||||
}
|
||||
@ -294,8 +281,7 @@ TEST(WeakHash32, ColumnString1)
|
||||
}
|
||||
}
|
||||
|
||||
WeakHash32 hash(col->size());
|
||||
col->updateWeakHash32(hash);
|
||||
WeakHash32 hash = col->getWeakHash32();
|
||||
|
||||
checkColumn(hash.getData(), data);
|
||||
}
|
||||
@ -331,8 +317,7 @@ TEST(WeakHash32, ColumnString2)
|
||||
}
|
||||
}
|
||||
|
||||
WeakHash32 hash(col->size());
|
||||
col->updateWeakHash32(hash);
|
||||
WeakHash32 hash = col->getWeakHash32();
|
||||
|
||||
checkColumn(hash.getData(), data);
|
||||
}
|
||||
@ -369,8 +354,7 @@ TEST(WeakHash32, ColumnString3)
|
||||
}
|
||||
}
|
||||
|
||||
WeakHash32 hash(col->size());
|
||||
col->updateWeakHash32(hash);
|
||||
WeakHash32 hash = col->getWeakHash32();
|
||||
|
||||
checkColumn(hash.getData(), data);
|
||||
}
|
||||
@ -397,8 +381,7 @@ TEST(WeakHash32, ColumnFixedString)
|
||||
}
|
||||
}
|
||||
|
||||
WeakHash32 hash(col->size());
|
||||
col->updateWeakHash32(hash);
|
||||
WeakHash32 hash = col->getWeakHash32();
|
||||
|
||||
checkColumn(hash.getData(), data);
|
||||
}
|
||||
@ -444,8 +427,7 @@ TEST(WeakHash32, ColumnArray)
|
||||
|
||||
auto col_arr = ColumnArray::create(std::move(val), std::move(off));
|
||||
|
||||
WeakHash32 hash(col_arr->size());
|
||||
col_arr->updateWeakHash32(hash);
|
||||
WeakHash32 hash = col_arr->getWeakHash32();
|
||||
|
||||
checkColumn(hash.getData(), eq_data);
|
||||
}
|
||||
@ -479,8 +461,7 @@ TEST(WeakHash32, ColumnArray2)
|
||||
|
||||
auto col_arr = ColumnArray::create(std::move(val), std::move(off));
|
||||
|
||||
WeakHash32 hash(col_arr->size());
|
||||
col_arr->updateWeakHash32(hash);
|
||||
WeakHash32 hash = col_arr->getWeakHash32();
|
||||
|
||||
checkColumn(hash.getData(), eq_data);
|
||||
}
|
||||
@ -536,8 +517,7 @@ TEST(WeakHash32, ColumnArrayArray)
|
||||
auto col_arr = ColumnArray::create(std::move(val), std::move(off));
|
||||
auto col_arr_arr = ColumnArray::create(std::move(col_arr), std::move(off2));
|
||||
|
||||
WeakHash32 hash(col_arr_arr->size());
|
||||
col_arr_arr->updateWeakHash32(hash);
|
||||
WeakHash32 hash = col_arr_arr->getWeakHash32();
|
||||
|
||||
checkColumn(hash.getData(), eq_data);
|
||||
}
|
||||
@ -555,8 +535,7 @@ TEST(WeakHash32, ColumnConst)
|
||||
|
||||
auto col_const = ColumnConst::create(std::move(inner_col), 256);
|
||||
|
||||
WeakHash32 hash(col_const->size());
|
||||
col_const->updateWeakHash32(hash);
|
||||
WeakHash32 hash = col_const->getWeakHash32();
|
||||
|
||||
checkColumn(hash.getData(), data);
|
||||
}
|
||||
@ -576,8 +555,7 @@ TEST(WeakHash32, ColumnLowcardinality)
|
||||
}
|
||||
}
|
||||
|
||||
WeakHash32 hash(col->size());
|
||||
col->updateWeakHash32(hash);
|
||||
WeakHash32 hash = col->getWeakHash32();
|
||||
|
||||
checkColumn(hash.getData(), data);
|
||||
}
|
||||
@ -602,8 +580,7 @@ TEST(WeakHash32, ColumnNullable)
|
||||
|
||||
auto col_null = ColumnNullable::create(std::move(col), std::move(mask));
|
||||
|
||||
WeakHash32 hash(col_null->size());
|
||||
col_null->updateWeakHash32(hash);
|
||||
WeakHash32 hash = col_null->getWeakHash32();
|
||||
|
||||
checkColumn(hash.getData(), eq);
|
||||
}
|
||||
@ -633,8 +610,7 @@ TEST(WeakHash32, ColumnTupleUInt64UInt64)
|
||||
columns.emplace_back(std::move(col2));
|
||||
auto col_tuple = ColumnTuple::create(std::move(columns));
|
||||
|
||||
WeakHash32 hash(col_tuple->size());
|
||||
col_tuple->updateWeakHash32(hash);
|
||||
WeakHash32 hash = col_tuple->getWeakHash32();
|
||||
|
||||
checkColumn(hash.getData(), eq);
|
||||
}
|
||||
@ -671,8 +647,7 @@ TEST(WeakHash32, ColumnTupleUInt64String)
|
||||
columns.emplace_back(std::move(col2));
|
||||
auto col_tuple = ColumnTuple::create(std::move(columns));
|
||||
|
||||
WeakHash32 hash(col_tuple->size());
|
||||
col_tuple->updateWeakHash32(hash);
|
||||
WeakHash32 hash = col_tuple->getWeakHash32();
|
||||
|
||||
checkColumn(hash.getData(), eq);
|
||||
}
|
||||
@ -709,8 +684,7 @@ TEST(WeakHash32, ColumnTupleUInt64FixedString)
|
||||
columns.emplace_back(std::move(col2));
|
||||
auto col_tuple = ColumnTuple::create(std::move(columns));
|
||||
|
||||
WeakHash32 hash(col_tuple->size());
|
||||
col_tuple->updateWeakHash32(hash);
|
||||
WeakHash32 hash = col_tuple->getWeakHash32();
|
||||
|
||||
checkColumn(hash.getData(), eq);
|
||||
}
|
||||
@ -756,8 +730,7 @@ TEST(WeakHash32, ColumnTupleUInt64Array)
|
||||
columns.emplace_back(ColumnArray::create(std::move(val), std::move(off)));
|
||||
auto col_tuple = ColumnTuple::create(std::move(columns));
|
||||
|
||||
WeakHash32 hash(col_tuple->size());
|
||||
col_tuple->updateWeakHash32(hash);
|
||||
WeakHash32 hash = col_tuple->getWeakHash32();
|
||||
|
||||
checkColumn(hash.getData(), eq_data);
|
||||
}
|
||||
|
@ -68,7 +68,7 @@ void * allocNoTrack(size_t size, size_t alignment)
|
||||
{
|
||||
void * buf;
|
||||
#if USE_GWP_ASAN
|
||||
if (unlikely(GWPAsan::GuardedAlloc.shouldSample()))
|
||||
if (unlikely(GWPAsan::shouldSample()))
|
||||
{
|
||||
if (void * ptr = GWPAsan::GuardedAlloc.allocate(size, alignment))
|
||||
{
|
||||
@ -185,7 +185,7 @@ void * Allocator<clear_memory_, populate>::realloc(void * buf, size_t old_size,
|
||||
}
|
||||
|
||||
#if USE_GWP_ASAN
|
||||
if (unlikely(GWPAsan::GuardedAlloc.shouldSample()))
|
||||
if (unlikely(GWPAsan::shouldSample()))
|
||||
{
|
||||
auto trace_alloc = CurrentMemoryTracker::alloc(new_size);
|
||||
if (void * ptr = GWPAsan::GuardedAlloc.allocate(new_size, alignment))
|
||||
|
@ -244,6 +244,15 @@ private:
|
||||
const char * className() const noexcept override { return "DB::ErrnoException"; }
|
||||
};
|
||||
|
||||
/// An exception to use in unit tests to test interfaces.
|
||||
/// It is distinguished from others, so it does not have to be logged.
|
||||
class TestException : public Exception
|
||||
{
|
||||
public:
|
||||
using Exception::Exception;
|
||||
};
|
||||
|
||||
|
||||
using Exceptions = std::vector<std::exception_ptr>;
|
||||
|
||||
/** Try to write an exception to the log (and forget about it).
|
||||
|
@ -217,6 +217,13 @@ void printReport([[maybe_unused]] uintptr_t fault_address)
|
||||
reinterpret_cast<void **>(trace.data()), 0, trace_length, [&](const auto line) { LOG_FATAL(logger, fmt::runtime(line)); });
|
||||
}
|
||||
|
||||
std::atomic<bool> init_finished = false;
|
||||
|
||||
void initFinished()
|
||||
{
|
||||
init_finished.store(true, std::memory_order_relaxed);
|
||||
}
|
||||
|
||||
std::atomic<double> force_sample_probability = 0.0;
|
||||
|
||||
void setForceSampleProbability(double value)
|
||||
|
@ -19,12 +19,30 @@ bool isGWPAsanError(uintptr_t fault_address);
|
||||
|
||||
void printReport(uintptr_t fault_address);
|
||||
|
||||
extern std::atomic<bool> init_finished;
|
||||
|
||||
void initFinished();
|
||||
|
||||
extern std::atomic<double> force_sample_probability;
|
||||
|
||||
void setForceSampleProbability(double value);
|
||||
|
||||
/**
|
||||
* We'd like to postpone sampling allocations under the startup is finished. There are mainly
|
||||
* two reasons for that:
|
||||
*
|
||||
* - To avoid complex issues with initialization order
|
||||
* - Don't waste MaxSimultaneousAllocations on global objects as it's not useful
|
||||
*/
|
||||
inline bool shouldSample()
|
||||
{
|
||||
return init_finished.load(std::memory_order_relaxed) && GuardedAlloc.shouldSample();
|
||||
}
|
||||
|
||||
inline bool shouldForceSample()
|
||||
{
|
||||
if (!init_finished.load(std::memory_order_relaxed))
|
||||
return false;
|
||||
std::bernoulli_distribution dist(force_sample_probability.load(std::memory_order_relaxed));
|
||||
return dist(thread_local_rng);
|
||||
}
|
||||
|
@ -23,8 +23,20 @@ namespace DB
|
||||
|
||||
LazyPipeFDs TraceSender::pipe;
|
||||
|
||||
static thread_local bool inside_send = false;
|
||||
void TraceSender::send(TraceType trace_type, const StackTrace & stack_trace, Extras extras)
|
||||
{
|
||||
/** The method shouldn't be called recursively or throw exceptions.
|
||||
* There are several reasons:
|
||||
* - avoid infinite recursion when some of subsequent functions invoke tracing;
|
||||
* - avoid inconsistent writes if the method was interrupted by a signal handler in the middle of writing,
|
||||
* and then another tracing is invoked (e.g., from query profiler).
|
||||
*/
|
||||
if (unlikely(inside_send))
|
||||
return;
|
||||
inside_send = true;
|
||||
DENY_ALLOCATIONS_IN_SCOPE;
|
||||
|
||||
constexpr size_t buf_size = sizeof(char) /// TraceCollector stop flag
|
||||
+ sizeof(UInt8) /// String size
|
||||
+ QUERY_ID_MAX_LEN /// Maximum query_id length
|
||||
@ -80,6 +92,8 @@ void TraceSender::send(TraceType trace_type, const StackTrace & stack_trace, Ext
|
||||
writePODBinary(extras.increment, out);
|
||||
|
||||
out.next();
|
||||
|
||||
inside_send = false;
|
||||
}
|
||||
|
||||
}
|
||||
|
@ -1,2 +1,24 @@
|
||||
#include <Common/WeakHash.h>
|
||||
#include <Common/Exception.h>
|
||||
#include <Common/HashTable/Hash.h>
|
||||
|
||||
namespace DB
|
||||
{
|
||||
|
||||
namespace ErrorCodes
|
||||
{
|
||||
extern const int LOGICAL_ERROR;
|
||||
}
|
||||
|
||||
void WeakHash32::update(const WeakHash32 & other)
|
||||
{
|
||||
size_t size = data.size();
|
||||
if (size != other.data.size())
|
||||
throw Exception(ErrorCodes::LOGICAL_ERROR, "Size of WeakHash32 does not match:"
|
||||
"left size is {}, right size is {}", size, other.data.size());
|
||||
|
||||
for (size_t i = 0; i < size; ++i)
|
||||
data[i] = static_cast<UInt32>(intHashCRC32(other.data[i], data[i]));
|
||||
}
|
||||
|
||||
}
|
||||
|
@ -11,9 +11,8 @@ namespace DB
|
||||
/// The main purpose why this class needed is to support data initialization. Initially, every bit is 1.
|
||||
class WeakHash32
|
||||
{
|
||||
static constexpr UInt32 kDefaultInitialValue = ~UInt32(0);
|
||||
|
||||
public:
|
||||
static constexpr UInt32 kDefaultInitialValue = ~UInt32(0);
|
||||
|
||||
using Container = PaddedPODArray<UInt32>;
|
||||
|
||||
@ -22,6 +21,8 @@ public:
|
||||
|
||||
void reset(size_t size, UInt32 initial_value = kDefaultInitialValue) { data.assign(size, initial_value); }
|
||||
|
||||
void update(const WeakHash32 & other);
|
||||
|
||||
const Container & getData() const { return data; }
|
||||
Container & getData() { return data; }
|
||||
|
||||
|
@ -37,7 +37,7 @@ requires DB::OptionalArgument<TAlign...>
|
||||
inline ALWAYS_INLINE void * newImpl(std::size_t size, TAlign... align)
|
||||
{
|
||||
#if USE_GWP_ASAN
|
||||
if (unlikely(GWPAsan::GuardedAlloc.shouldSample()))
|
||||
if (unlikely(GWPAsan::shouldSample()))
|
||||
{
|
||||
if constexpr (sizeof...(TAlign) == 1)
|
||||
{
|
||||
@ -83,7 +83,7 @@ inline ALWAYS_INLINE void * newImpl(std::size_t size, TAlign... align)
|
||||
inline ALWAYS_INLINE void * newNoExcept(std::size_t size) noexcept
|
||||
{
|
||||
#if USE_GWP_ASAN
|
||||
if (unlikely(GWPAsan::GuardedAlloc.shouldSample()))
|
||||
if (unlikely(GWPAsan::shouldSample()))
|
||||
{
|
||||
if (void * ptr = GWPAsan::GuardedAlloc.allocate(size))
|
||||
{
|
||||
@ -102,7 +102,7 @@ inline ALWAYS_INLINE void * newNoExcept(std::size_t size) noexcept
|
||||
inline ALWAYS_INLINE void * newNoExcept(std::size_t size, std::align_val_t align) noexcept
|
||||
{
|
||||
#if USE_GWP_ASAN
|
||||
if (unlikely(GWPAsan::GuardedAlloc.shouldSample()))
|
||||
if (unlikely(GWPAsan::shouldSample()))
|
||||
{
|
||||
if (void * ptr = GWPAsan::GuardedAlloc.allocate(size, alignToSizeT(align)))
|
||||
{
|
||||
|
@ -54,16 +54,3 @@ TEST(ShellCommand, ExecuteWithInput)
|
||||
|
||||
EXPECT_EQ(res, "Hello, world!\n");
|
||||
}
|
||||
|
||||
TEST(ShellCommand, AutoWait)
|
||||
{
|
||||
// <defunct> hunting:
|
||||
for (int i = 0; i < 1000; ++i)
|
||||
{
|
||||
auto command = ShellCommand::execute("echo " + std::to_string(i));
|
||||
//command->wait(); // now automatic
|
||||
}
|
||||
|
||||
// std::cerr << "inspect me: ps auxwwf\n";
|
||||
// std::this_thread::sleep_for(std::chrono::seconds(100));
|
||||
}
|
||||
|
@ -47,32 +47,63 @@ bool allArgumentsAreConstants(const ColumnsWithTypeAndName & args)
|
||||
return true;
|
||||
}
|
||||
|
||||
/// Replaces single low cardinality column in a function call by its dictionary
|
||||
/// This can only happen after the arguments have been adapted in IFunctionOverloadResolver::getReturnType
|
||||
/// as it's only possible if there is one low cardinality column and, optionally, const columns
|
||||
ColumnPtr replaceLowCardinalityColumnsByNestedAndGetDictionaryIndexes(
|
||||
ColumnsWithTypeAndName & args, bool can_be_executed_on_default_arguments, size_t input_rows_count)
|
||||
{
|
||||
size_t num_rows = input_rows_count;
|
||||
/// We return the LC indexes so the LC can be reconstructed with the function result
|
||||
ColumnPtr indexes;
|
||||
|
||||
/// Find first LowCardinality column and replace it to nested dictionary.
|
||||
for (auto & column : args)
|
||||
{
|
||||
if (const auto * low_cardinality_column = checkAndGetColumn<ColumnLowCardinality>(column.column.get()))
|
||||
{
|
||||
/// Single LowCardinality column is supported now.
|
||||
if (indexes)
|
||||
throw Exception(ErrorCodes::LOGICAL_ERROR, "Expected single dictionary argument for function.");
|
||||
size_t number_low_cardinality_columns = 0;
|
||||
size_t last_low_cardinality = 0;
|
||||
size_t number_const_columns = 0;
|
||||
size_t number_full_columns = 0;
|
||||
|
||||
const auto * low_cardinality_type = checkAndGetDataType<DataTypeLowCardinality>(column.type.get());
|
||||
for (size_t i = 0; i < args.size(); i++)
|
||||
{
|
||||
auto const & arg = args[i];
|
||||
if (checkAndGetColumn<ColumnLowCardinality>(arg.column.get()))
|
||||
{
|
||||
number_low_cardinality_columns++;
|
||||
last_low_cardinality = i;
|
||||
}
|
||||
else if (checkAndGetColumn<ColumnConst>(arg.column.get()))
|
||||
number_const_columns++;
|
||||
else
|
||||
number_full_columns++;
|
||||
}
|
||||
|
||||
if (!number_low_cardinality_columns && !number_const_columns)
|
||||
return nullptr;
|
||||
|
||||
if (number_full_columns > 0 || number_low_cardinality_columns > 1)
|
||||
{
|
||||
/// This should not be possible but currently there are multiple tests in CI failing because of it
|
||||
/// TODO: Fix those cases, then enable this exception
|
||||
#if 0
|
||||
throw Exception(ErrorCodes::LOGICAL_ERROR, "Unexpected low cardinality types found. Low cardinality: {}. Full {}. Const {}",
|
||||
number_low_cardinality_columns, number_full_columns, number_const_columns);
|
||||
#else
|
||||
return nullptr;
|
||||
#endif
|
||||
}
|
||||
else if (number_low_cardinality_columns == 1)
|
||||
{
|
||||
auto & lc_arg = args[last_low_cardinality];
|
||||
|
||||
const auto * low_cardinality_type = checkAndGetDataType<DataTypeLowCardinality>(lc_arg.type.get());
|
||||
if (!low_cardinality_type)
|
||||
throw Exception(ErrorCodes::LOGICAL_ERROR,
|
||||
"Incompatible type for LowCardinality column: {}",
|
||||
column.type->getName());
|
||||
throw Exception(ErrorCodes::LOGICAL_ERROR, "Incompatible type for LowCardinality column: {}", lc_arg.type->getName());
|
||||
|
||||
const auto * low_cardinality_column = checkAndGetColumn<ColumnLowCardinality>(lc_arg.column.get());
|
||||
chassert(low_cardinality_column);
|
||||
|
||||
if (can_be_executed_on_default_arguments)
|
||||
{
|
||||
/// Normal case, when function can be executed on values' default.
|
||||
column.column = low_cardinality_column->getDictionary().getNestedColumn();
|
||||
lc_arg.column = low_cardinality_column->getDictionary().getNestedColumn();
|
||||
indexes = low_cardinality_column->getIndexesPtr();
|
||||
}
|
||||
else
|
||||
@ -80,21 +111,21 @@ ColumnPtr replaceLowCardinalityColumnsByNestedAndGetDictionaryIndexes(
|
||||
/// Special case when default value can't be used. Example: 1 % LowCardinality(Int).
|
||||
/// LowCardinality always contains default, so 1 % 0 will throw exception in normal case.
|
||||
auto dict_encoded = low_cardinality_column->getMinimalDictionaryEncodedColumn(0, low_cardinality_column->size());
|
||||
column.column = dict_encoded.dictionary;
|
||||
lc_arg.column = dict_encoded.dictionary;
|
||||
indexes = dict_encoded.indexes;
|
||||
}
|
||||
|
||||
num_rows = column.column->size();
|
||||
column.type = low_cardinality_type->getDictionaryType();
|
||||
}
|
||||
/// The new column will have a different number of rows, normally less but occasionally it might be more (NULL)
|
||||
input_rows_count = lc_arg.column->size();
|
||||
lc_arg.type = low_cardinality_type->getDictionaryType();
|
||||
}
|
||||
|
||||
/// Change size of constants.
|
||||
/// Change size of constants
|
||||
for (auto & column : args)
|
||||
{
|
||||
if (const auto * column_const = checkAndGetColumn<ColumnConst>(column.column.get()))
|
||||
{
|
||||
column.column = ColumnConst::create(recursiveRemoveLowCardinality(column_const->getDataColumnPtr()), num_rows);
|
||||
column.column = ColumnConst::create(recursiveRemoveLowCardinality(column_const->getDataColumnPtr()), input_rows_count);
|
||||
column.type = recursiveRemoveLowCardinality(column.type);
|
||||
}
|
||||
}
|
||||
@ -270,6 +301,8 @@ ColumnPtr IExecutableFunction::executeWithoutSparseColumns(const ColumnsWithType
|
||||
bool can_be_executed_on_default_arguments = canBeExecutedOnDefaultArguments();
|
||||
|
||||
const auto & dictionary_type = res_low_cardinality_type->getDictionaryType();
|
||||
/// The arguments should have been adapted in IFunctionOverloadResolver::getReturnType
|
||||
/// So there is only one low cardinality column (and optionally some const columns) and no full column
|
||||
ColumnPtr indexes = replaceLowCardinalityColumnsByNestedAndGetDictionaryIndexes(
|
||||
columns_without_low_cardinality, can_be_executed_on_default_arguments, input_rows_count);
|
||||
|
||||
|
@ -42,6 +42,10 @@ public:
|
||||
|
||||
bool useDefaultImplementationForNulls() const override { return false; }
|
||||
|
||||
bool useDefaultImplementationForLowCardinalityColumns() const override { return false; }
|
||||
|
||||
bool useDefaultImplementationForSparseColumns() const override { return false; }
|
||||
|
||||
bool isSuitableForConstantFolding() const override { return false; }
|
||||
|
||||
bool isSuitableForShortCircuitArgumentsExecution(const DataTypesWithConstInfo & /*arguments*/) const override { return false; }
|
||||
|
@ -310,7 +310,7 @@ IColumn::Selector ConcurrentHashJoin::selectDispatchBlock(const Strings & key_co
|
||||
{
|
||||
const auto & key_col = from_block.getByName(key_name).column->convertToFullColumnIfConst();
|
||||
const auto & key_col_no_lc = recursiveRemoveLowCardinality(recursiveRemoveSparse(key_col));
|
||||
key_col_no_lc->updateWeakHash32(hash);
|
||||
hash.update(key_col_no_lc->getWeakHash32());
|
||||
}
|
||||
return hashToSelector(hash, num_shards);
|
||||
}
|
||||
|
@ -121,9 +121,18 @@ String InterpreterShowTablesQuery::getRewrittenQuery()
|
||||
if (query.merges)
|
||||
{
|
||||
WriteBufferFromOwnString rewritten_query;
|
||||
rewritten_query << "SELECT table, database, round((elapsed * (1 / merges.progress)) - merges.elapsed, 2) AS estimate_complete, round(elapsed,2) elapsed, "
|
||||
"round(progress*100, 2) AS progress, is_mutation, formatReadableSize(total_size_bytes_compressed) AS size_compressed, "
|
||||
"formatReadableSize(memory_usage) AS memory_usage FROM system.merges";
|
||||
rewritten_query << R"(
|
||||
SELECT
|
||||
table,
|
||||
database,
|
||||
merges.progress > 0 ? round(merges.elapsed * (1 - merges.progress) / merges.progress, 2) : NULL AS estimate_complete,
|
||||
round(elapsed, 2) AS elapsed,
|
||||
round(progress * 100, 2) AS progress,
|
||||
is_mutation,
|
||||
formatReadableSize(total_size_bytes_compressed) AS size_compressed,
|
||||
formatReadableSize(memory_usage) AS memory_usage
|
||||
FROM system.merges
|
||||
)";
|
||||
|
||||
if (!query.like.empty())
|
||||
{
|
||||
|
@ -554,7 +554,7 @@ static Blocks scatterBlockByHashImpl(const Strings & key_columns_names, const Bl
|
||||
for (const auto & key_name : key_columns_names)
|
||||
{
|
||||
ColumnPtr key_col = materializeColumn(block, key_name);
|
||||
key_col->updateWeakHash32(hash);
|
||||
hash.update(key_col->getWeakHash32());
|
||||
}
|
||||
auto selector = hashToSelector(hash, sharder);
|
||||
|
||||
|
@ -7,7 +7,6 @@
|
||||
#include <Common/FieldVisitorToString.h>
|
||||
#include <Common/KnownObjectNames.h>
|
||||
#include <Common/SipHash.h>
|
||||
#include <Common/typeid_cast.h>
|
||||
#include <IO/Operators.h>
|
||||
#include <IO/WriteBufferFromString.h>
|
||||
#include <IO/WriteHelpers.h>
|
||||
@ -19,9 +18,6 @@
|
||||
#include <Parsers/queryToString.h>
|
||||
#include <Parsers/ASTSetQuery.h>
|
||||
#include <Parsers/FunctionSecretArgumentsFinderAST.h>
|
||||
#include <Core/QualifiedTableName.h>
|
||||
|
||||
#include <boost/algorithm/string.hpp>
|
||||
|
||||
|
||||
using namespace std::literals;
|
||||
@ -632,6 +628,7 @@ void ASTFunction::formatImplWithoutAlias(const FormatSettings & settings, Format
|
||||
settings.ostr << ", ";
|
||||
if (arguments->children[i]->as<ASTSetQuery>())
|
||||
settings.ostr << "SETTINGS ";
|
||||
nested_dont_need_parens.list_element_index = i;
|
||||
arguments->children[i]->formatImpl(settings, state, nested_dont_need_parens);
|
||||
}
|
||||
settings.ostr << (settings.hilite ? hilite_operator : "") << ']' << (settings.hilite ? hilite_none : "");
|
||||
@ -642,12 +639,14 @@ void ASTFunction::formatImplWithoutAlias(const FormatSettings & settings, Format
|
||||
{
|
||||
settings.ostr << (settings.hilite ? hilite_operator : "") << ((frame.need_parens && !alias.empty()) ? "tuple" : "") << '('
|
||||
<< (settings.hilite ? hilite_none : "");
|
||||
|
||||
for (size_t i = 0; i < arguments->children.size(); ++i)
|
||||
{
|
||||
if (i != 0)
|
||||
settings.ostr << ", ";
|
||||
if (arguments->children[i]->as<ASTSetQuery>())
|
||||
settings.ostr << "SETTINGS ";
|
||||
nested_dont_need_parens.list_element_index = i;
|
||||
arguments->children[i]->formatImpl(settings, state, nested_dont_need_parens);
|
||||
}
|
||||
settings.ostr << (settings.hilite ? hilite_operator : "") << ')' << (settings.hilite ? hilite_none : "");
|
||||
@ -663,6 +662,7 @@ void ASTFunction::formatImplWithoutAlias(const FormatSettings & settings, Format
|
||||
settings.ostr << ", ";
|
||||
if (arguments->children[i]->as<ASTSetQuery>())
|
||||
settings.ostr << "SETTINGS ";
|
||||
nested_dont_need_parens.list_element_index = i;
|
||||
arguments->children[i]->formatImpl(settings, state, nested_dont_need_parens);
|
||||
}
|
||||
settings.ostr << (settings.hilite ? hilite_operator : "") << ')' << (settings.hilite ? hilite_none : "");
|
||||
|
@ -745,7 +745,12 @@ void addWithFillStepIfNeeded(QueryPlan & query_plan,
|
||||
{
|
||||
auto & interpolate_node_typed = interpolate_node->as<InterpolateNode &>();
|
||||
|
||||
PlannerActionsVisitor planner_actions_visitor(planner_context);
|
||||
PlannerActionsVisitor planner_actions_visitor(
|
||||
planner_context,
|
||||
/* use_column_identifier_as_action_node_name_, (default value)*/ true,
|
||||
/// Prefer the INPUT to CONSTANT nodes (actions must be non constant)
|
||||
/* always_use_const_column_for_constant_nodes */ false);
|
||||
|
||||
auto expression_to_interpolate_expression_nodes = planner_actions_visitor.visit(*interpolate_actions_dag,
|
||||
interpolate_node_typed.getExpression());
|
||||
if (expression_to_interpolate_expression_nodes.size() != 1)
|
||||
|
@ -487,16 +487,33 @@ public:
|
||||
return node;
|
||||
}
|
||||
|
||||
const ActionsDAG::Node * addConstantIfNecessary(const std::string & node_name, const ColumnWithTypeAndName & column)
|
||||
[[nodiscard]] String addConstantIfNecessary(
|
||||
const std::string & node_name, const ColumnWithTypeAndName & column, bool always_use_const_column_for_constant_nodes)
|
||||
{
|
||||
chassert(column.column != nullptr);
|
||||
auto it = node_name_to_node.find(node_name);
|
||||
if (it != node_name_to_node.end() && (!always_use_const_column_for_constant_nodes || it->second->column))
|
||||
return {node_name};
|
||||
|
||||
if (it != node_name_to_node.end())
|
||||
return it->second;
|
||||
{
|
||||
/// There is a node with this name, but it doesn't have a column
|
||||
/// This likely happens because we executed the query until WithMergeableState with a const node in the
|
||||
/// WHERE clause and, as the results of headers are materialized, the column was removed
|
||||
/// Let's add a new column and keep this
|
||||
String dupped_name{node_name + "_dupped"};
|
||||
if (node_name_to_node.find(dupped_name) != node_name_to_node.end())
|
||||
return dupped_name;
|
||||
|
||||
const auto * node = &actions_dag.addColumn(column);
|
||||
node_name_to_node[dupped_name] = node;
|
||||
return dupped_name;
|
||||
}
|
||||
|
||||
const auto * node = &actions_dag.addColumn(column);
|
||||
node_name_to_node[node->result_name] = node;
|
||||
|
||||
return node;
|
||||
return {node_name};
|
||||
}
|
||||
|
||||
template <typename FunctionOrOverloadResolver>
|
||||
@ -525,7 +542,7 @@ public:
|
||||
}
|
||||
|
||||
private:
|
||||
std::unordered_map<std::string_view, const ActionsDAG::Node *> node_name_to_node;
|
||||
std::unordered_map<String, const ActionsDAG::Node *> node_name_to_node;
|
||||
ActionsDAG & actions_dag;
|
||||
QueryTreeNodePtr scope_node;
|
||||
};
|
||||
@ -533,9 +550,11 @@ private:
|
||||
class PlannerActionsVisitorImpl
|
||||
{
|
||||
public:
|
||||
PlannerActionsVisitorImpl(ActionsDAG & actions_dag,
|
||||
PlannerActionsVisitorImpl(
|
||||
ActionsDAG & actions_dag,
|
||||
const PlannerContextPtr & planner_context_,
|
||||
bool use_column_identifier_as_action_node_name_);
|
||||
bool use_column_identifier_as_action_node_name_,
|
||||
bool always_use_const_column_for_constant_nodes_);
|
||||
|
||||
ActionsDAG::NodeRawConstPtrs visit(QueryTreeNodePtr expression_node);
|
||||
|
||||
@ -595,14 +614,18 @@ private:
|
||||
const PlannerContextPtr planner_context;
|
||||
ActionNodeNameHelper action_node_name_helper;
|
||||
bool use_column_identifier_as_action_node_name;
|
||||
bool always_use_const_column_for_constant_nodes;
|
||||
};
|
||||
|
||||
PlannerActionsVisitorImpl::PlannerActionsVisitorImpl(ActionsDAG & actions_dag,
|
||||
PlannerActionsVisitorImpl::PlannerActionsVisitorImpl(
|
||||
ActionsDAG & actions_dag,
|
||||
const PlannerContextPtr & planner_context_,
|
||||
bool use_column_identifier_as_action_node_name_)
|
||||
bool use_column_identifier_as_action_node_name_,
|
||||
bool always_use_const_column_for_constant_nodes_)
|
||||
: planner_context(planner_context_)
|
||||
, action_node_name_helper(node_to_node_name, *planner_context, use_column_identifier_as_action_node_name_)
|
||||
, use_column_identifier_as_action_node_name(use_column_identifier_as_action_node_name_)
|
||||
, always_use_const_column_for_constant_nodes(always_use_const_column_for_constant_nodes_)
|
||||
{
|
||||
actions_stack.emplace_back(actions_dag, nullptr);
|
||||
}
|
||||
@ -725,17 +748,16 @@ PlannerActionsVisitorImpl::NodeNameAndNodeMinLevel PlannerActionsVisitorImpl::vi
|
||||
column.type = constant_type;
|
||||
column.column = column.type->createColumnConst(1, constant_literal);
|
||||
|
||||
actions_stack[0].addConstantIfNecessary(constant_node_name, column);
|
||||
String final_name = actions_stack[0].addConstantIfNecessary(constant_node_name, column, always_use_const_column_for_constant_nodes);
|
||||
|
||||
size_t actions_stack_size = actions_stack.size();
|
||||
for (size_t i = 1; i < actions_stack_size; ++i)
|
||||
{
|
||||
auto & actions_stack_node = actions_stack[i];
|
||||
actions_stack_node.addInputConstantColumnIfNecessary(constant_node_name, column);
|
||||
actions_stack_node.addInputConstantColumnIfNecessary(final_name, column);
|
||||
}
|
||||
|
||||
return {constant_node_name, Levels(0)};
|
||||
|
||||
return {final_name, Levels(0)};
|
||||
}
|
||||
|
||||
PlannerActionsVisitorImpl::NodeNameAndNodeMinLevel PlannerActionsVisitorImpl::visitLambda(const QueryTreeNodePtr & node)
|
||||
@ -864,16 +886,16 @@ PlannerActionsVisitorImpl::NodeNameAndNodeMinLevel PlannerActionsVisitorImpl::ma
|
||||
else
|
||||
column.column = std::move(column_set);
|
||||
|
||||
actions_stack[0].addConstantIfNecessary(column.name, column);
|
||||
String final_name = actions_stack[0].addConstantIfNecessary(column.name, column, always_use_const_column_for_constant_nodes);
|
||||
|
||||
size_t actions_stack_size = actions_stack.size();
|
||||
for (size_t i = 1; i < actions_stack_size; ++i)
|
||||
{
|
||||
auto & actions_stack_node = actions_stack[i];
|
||||
actions_stack_node.addInputConstantColumnIfNecessary(column.name, column);
|
||||
actions_stack_node.addInputConstantColumnIfNecessary(final_name, column);
|
||||
}
|
||||
|
||||
return {column.name, Levels(0)};
|
||||
return {final_name, Levels(0)};
|
||||
}
|
||||
|
||||
PlannerActionsVisitorImpl::NodeNameAndNodeMinLevel PlannerActionsVisitorImpl::visitIndexHintFunction(const QueryTreeNodePtr & node)
|
||||
@ -1010,14 +1032,19 @@ PlannerActionsVisitorImpl::NodeNameAndNodeMinLevel PlannerActionsVisitorImpl::vi
|
||||
|
||||
}
|
||||
|
||||
PlannerActionsVisitor::PlannerActionsVisitor(const PlannerContextPtr & planner_context_, bool use_column_identifier_as_action_node_name_)
|
||||
PlannerActionsVisitor::PlannerActionsVisitor(
|
||||
const PlannerContextPtr & planner_context_,
|
||||
bool use_column_identifier_as_action_node_name_,
|
||||
bool always_use_const_column_for_constant_nodes_)
|
||||
: planner_context(planner_context_)
|
||||
, use_column_identifier_as_action_node_name(use_column_identifier_as_action_node_name_)
|
||||
, always_use_const_column_for_constant_nodes(always_use_const_column_for_constant_nodes_)
|
||||
{}
|
||||
|
||||
ActionsDAG::NodeRawConstPtrs PlannerActionsVisitor::visit(ActionsDAG & actions_dag, QueryTreeNodePtr expression_node)
|
||||
{
|
||||
PlannerActionsVisitorImpl actions_visitor_impl(actions_dag, planner_context, use_column_identifier_as_action_node_name);
|
||||
PlannerActionsVisitorImpl actions_visitor_impl(
|
||||
actions_dag, planner_context, use_column_identifier_as_action_node_name, always_use_const_column_for_constant_nodes);
|
||||
return actions_visitor_impl.visit(expression_node);
|
||||
}
|
||||
|
||||
|
@ -27,11 +27,17 @@ using PlannerContextPtr = std::shared_ptr<PlannerContext>;
|
||||
* During actions build, there is special handling for following functions:
|
||||
* 1. Aggregate functions are added in actions dag as INPUT nodes. Aggregate functions arguments are not added.
|
||||
* 2. For function `in` and its variants, already collected sets from planner context are used.
|
||||
* 3. When building actions that use CONSTANT nodes, by default we ignore pre-existing INPUTs if those don't have
|
||||
* a column (a const column always has a column). This is for compatibility with previous headers. We disable this
|
||||
* behaviour when we explicitly want to override CONSTANT nodes with the input (resolving InterpolateNode for example)
|
||||
*/
|
||||
class PlannerActionsVisitor
|
||||
{
|
||||
public:
|
||||
explicit PlannerActionsVisitor(const PlannerContextPtr & planner_context_, bool use_column_identifier_as_action_node_name_ = true);
|
||||
explicit PlannerActionsVisitor(
|
||||
const PlannerContextPtr & planner_context_,
|
||||
bool use_column_identifier_as_action_node_name_ = true,
|
||||
bool always_use_const_column_for_constant_nodes_ = true);
|
||||
|
||||
/** Add actions necessary to calculate expression node into expression dag.
|
||||
* Necessary actions are not added in actions dag output.
|
||||
@ -42,6 +48,7 @@ public:
|
||||
private:
|
||||
const PlannerContextPtr planner_context;
|
||||
bool use_column_identifier_as_action_node_name = true;
|
||||
bool always_use_const_column_for_constant_nodes = true;
|
||||
};
|
||||
|
||||
/** Calculate query tree expression node action dag name and add them into node to name map.
|
||||
|
@ -109,7 +109,7 @@ void ScatterByPartitionTransform::generateOutputChunks()
|
||||
hash.reset(num_rows);
|
||||
|
||||
for (const auto & column_number : key_columns)
|
||||
columns[column_number]->updateWeakHash32(hash);
|
||||
hash.update(columns[column_number]->getWeakHash32());
|
||||
|
||||
const auto & hash_data = hash.getData();
|
||||
IColumn::Selector selector(num_rows);
|
||||
|
@ -155,6 +155,10 @@ void printExceptionWithRespectToAbort(LoggerPtr log, const String & query_id)
|
||||
{
|
||||
std::rethrow_exception(ex);
|
||||
}
|
||||
catch (const TestException &) // NOLINT
|
||||
{
|
||||
/// Exception from a unit test, ignore it.
|
||||
}
|
||||
catch (const Exception & e)
|
||||
{
|
||||
NOEXCEPT_SCOPE({
|
||||
|
@ -34,7 +34,7 @@ public:
|
||||
|
||||
auto choice = distribution(generator);
|
||||
if (choice == 0)
|
||||
throw std::runtime_error("Unlucky...");
|
||||
throw TestException();
|
||||
|
||||
return false;
|
||||
}
|
||||
@ -48,7 +48,7 @@ public:
|
||||
{
|
||||
auto choice = distribution(generator);
|
||||
if (choice == 0)
|
||||
throw std::runtime_error("Unlucky...");
|
||||
throw TestException();
|
||||
}
|
||||
|
||||
Priority getPriority() const override { return {}; }
|
||||
|
@ -69,9 +69,7 @@ ASTPtr getASTForExternalDatabaseFromQueryTree(const QueryTreeNodePtr & query_tre
|
||||
bool allow_where = true;
|
||||
if (const auto * join_node = join_tree->as<JoinNode>())
|
||||
{
|
||||
if (join_node->getStrictness() != JoinStrictness::All)
|
||||
allow_where = false;
|
||||
else if (join_node->getKind() == JoinKind::Left)
|
||||
if (join_node->getKind() == JoinKind::Left)
|
||||
allow_where = join_node->getLeftTableExpression()->isEqual(*table_expression);
|
||||
else if (join_node->getKind() == JoinKind::Right)
|
||||
allow_where = join_node->getRightTableExpression()->isEqual(*table_expression);
|
||||
|
@ -3,8 +3,13 @@ import time
|
||||
from pathlib import Path
|
||||
from typing import Optional
|
||||
from shutil import copy2
|
||||
from create_release import PackageDownloader, ReleaseInfo, ShellRunner
|
||||
from ci_utils import WithIter
|
||||
from create_release import (
|
||||
PackageDownloader,
|
||||
ReleaseInfo,
|
||||
ReleaseContextManager,
|
||||
ReleaseProgress,
|
||||
)
|
||||
from ci_utils import WithIter, Shell
|
||||
|
||||
|
||||
class MountPointApp(metaclass=WithIter):
|
||||
@ -76,19 +81,20 @@ class R2MountPoint:
|
||||
)
|
||||
|
||||
_TEST_MOUNT_CMD = f"mount | grep -q {self.MOUNT_POINT}"
|
||||
ShellRunner.run(_CLEAN_LOG_FILE_CMD)
|
||||
ShellRunner.run(_UNMOUNT_CMD)
|
||||
ShellRunner.run(_MKDIR_CMD)
|
||||
ShellRunner.run(_MKDIR_FOR_CACHE)
|
||||
ShellRunner.run(self.mount_cmd, async_=self.async_mount)
|
||||
Shell.run(_CLEAN_LOG_FILE_CMD)
|
||||
Shell.run(_UNMOUNT_CMD)
|
||||
Shell.run(_MKDIR_CMD)
|
||||
Shell.run(_MKDIR_FOR_CACHE)
|
||||
# didn't manage to use simple run() and not block or fail
|
||||
Shell.run_as_daemon(self.mount_cmd)
|
||||
if self.async_mount:
|
||||
time.sleep(3)
|
||||
ShellRunner.run(_TEST_MOUNT_CMD)
|
||||
Shell.run(_TEST_MOUNT_CMD, check=True)
|
||||
|
||||
@classmethod
|
||||
def teardown(cls):
|
||||
print(f"Unmount [{cls.MOUNT_POINT}]")
|
||||
ShellRunner.run(f"umount {cls.MOUNT_POINT}")
|
||||
Shell.run(f"umount {cls.MOUNT_POINT}")
|
||||
|
||||
|
||||
class RepoCodenames(metaclass=WithIter):
|
||||
@ -124,8 +130,8 @@ class DebianArtifactory:
|
||||
cmd = f"{REPREPRO_CMD_PREFIX} includedeb {self.codename} {' '.join(paths)}"
|
||||
print("Running export command:")
|
||||
print(f" {cmd}")
|
||||
ShellRunner.run(cmd)
|
||||
ShellRunner.run("sync")
|
||||
Shell.run(cmd, check=True)
|
||||
Shell.run("sync")
|
||||
|
||||
if self.codename == RepoCodenames.LTS:
|
||||
packages_with_version = [
|
||||
@ -137,16 +143,20 @@ class DebianArtifactory:
|
||||
cmd = f"{REPREPRO_CMD_PREFIX} copy {RepoCodenames.STABLE} {RepoCodenames.LTS} {' '.join(packages_with_version)}"
|
||||
print("Running copy command:")
|
||||
print(f" {cmd}")
|
||||
ShellRunner.run(cmd)
|
||||
ShellRunner.run("sync")
|
||||
Shell.run(cmd, check=True)
|
||||
Shell.run("sync")
|
||||
|
||||
def test_packages(self):
|
||||
ShellRunner.run("docker pull ubuntu:latest")
|
||||
Shell.run("docker pull ubuntu:latest")
|
||||
print(f"Test packages installation, version [{self.version}]")
|
||||
cmd = f"docker run --rm ubuntu:latest bash -c \"apt update -y; apt install -y sudo gnupg ca-certificates; apt-key adv --keyserver hkp://keyserver.ubuntu.com:80 --recv 8919F6BD2B48D754; echo 'deb {self.repo_url} stable main' | tee /etc/apt/sources.list.d/clickhouse.list; apt update -y; apt-get install -y clickhouse-client={self.version}\""
|
||||
debian_command = f"echo 'deb {self.repo_url} stable main' | tee /etc/apt/sources.list.d/clickhouse.list; apt update -y; apt-get install -y clickhouse-common-static={self.version} clickhouse-client={self.version}"
|
||||
cmd = f'docker run --rm ubuntu:latest bash -c "apt update -y; apt install -y sudo gnupg ca-certificates; apt-key adv --keyserver hkp://keyserver.ubuntu.com:80 --recv 8919F6BD2B48D754; {debian_command}"'
|
||||
print("Running test command:")
|
||||
print(f" {cmd}")
|
||||
ShellRunner.run(cmd)
|
||||
Shell.run(cmd, check=True)
|
||||
release_info = ReleaseInfo.from_file()
|
||||
release_info.debian_command = debian_command
|
||||
release_info.dump()
|
||||
|
||||
|
||||
def _copy_if_not_exists(src: Path, dst: Path) -> Path:
|
||||
@ -202,23 +212,27 @@ class RpmArtifactory:
|
||||
for command in commands:
|
||||
print("Running command:")
|
||||
print(f" {command}")
|
||||
ShellRunner.run(command)
|
||||
Shell.run(command, check=True)
|
||||
|
||||
update_public_key = f"gpg --armor --export {self._SIGN_KEY}"
|
||||
pub_key_path = dest_dir / "repodata" / "repomd.xml.key"
|
||||
print("Updating repomd.xml.key")
|
||||
pub_key_path.write_text(ShellRunner.run(update_public_key)[1])
|
||||
pub_key_path.write_text(Shell.run(update_public_key, check=True))
|
||||
if codename == RepoCodenames.LTS:
|
||||
self.export_packages(RepoCodenames.STABLE)
|
||||
ShellRunner.run("sync")
|
||||
Shell.run("sync")
|
||||
|
||||
def test_packages(self):
|
||||
ShellRunner.run("docker pull fedora:latest")
|
||||
Shell.run("docker pull fedora:latest")
|
||||
print(f"Test package installation, version [{self.version}]")
|
||||
cmd = f'docker run --rm fedora:latest /bin/bash -c "dnf -y install dnf-plugins-core && dnf config-manager --add-repo={self.repo_url} && dnf makecache && dnf -y install clickhouse-client-{self.version}-1"'
|
||||
rpm_command = f"dnf config-manager --add-repo={self.repo_url} && dnf makecache && dnf -y install clickhouse-client-{self.version}-1"
|
||||
cmd = f'docker run --rm fedora:latest /bin/bash -c "dnf -y install dnf-plugins-core && dnf config-manager --add-repo={self.repo_url} && {rpm_command}"'
|
||||
print("Running test command:")
|
||||
print(f" {cmd}")
|
||||
ShellRunner.run(cmd)
|
||||
Shell.run(cmd, check=True)
|
||||
release_info = ReleaseInfo.from_file()
|
||||
release_info.rpm_command = rpm_command
|
||||
release_info.dump()
|
||||
|
||||
|
||||
class TgzArtifactory:
|
||||
@ -256,23 +270,29 @@ class TgzArtifactory:
|
||||
|
||||
if codename == RepoCodenames.LTS:
|
||||
self.export_packages(RepoCodenames.STABLE)
|
||||
ShellRunner.run("sync")
|
||||
Shell.run("sync")
|
||||
|
||||
def test_packages(self):
|
||||
tgz_file = "/tmp/tmp.tgz"
|
||||
tgz_sha_file = "/tmp/tmp.tgz.sha512"
|
||||
ShellRunner.run(
|
||||
f"curl -o {tgz_file} -f0 {self.repo_url}/stable/clickhouse-client-{self.version}-arm64.tgz"
|
||||
cmd = f"curl -o {tgz_file} -f0 {self.repo_url}/stable/clickhouse-client-{self.version}-arm64.tgz"
|
||||
Shell.run(
|
||||
cmd,
|
||||
check=True,
|
||||
)
|
||||
ShellRunner.run(
|
||||
f"curl -o {tgz_sha_file} -f0 {self.repo_url}/stable/clickhouse-client-{self.version}-arm64.tgz.sha512"
|
||||
Shell.run(
|
||||
f"curl -o {tgz_sha_file} -f0 {self.repo_url}/stable/clickhouse-client-{self.version}-arm64.tgz.sha512",
|
||||
check=True,
|
||||
)
|
||||
expected_checksum = ShellRunner.run(f"cut -d ' ' -f 1 {tgz_sha_file}")
|
||||
actual_checksum = ShellRunner.run(f"sha512sum {tgz_file} | cut -d ' ' -f 1")
|
||||
expected_checksum = Shell.run(f"cut -d ' ' -f 1 {tgz_sha_file}", check=True)
|
||||
actual_checksum = Shell.run(f"sha512sum {tgz_file} | cut -d ' ' -f 1")
|
||||
assert (
|
||||
expected_checksum == actual_checksum
|
||||
), f"[{actual_checksum} != {expected_checksum}]"
|
||||
ShellRunner.run("rm /tmp/tmp.tgz*")
|
||||
Shell.run("rm /tmp/tmp.tgz*")
|
||||
release_info = ReleaseInfo.from_file()
|
||||
release_info.tgz_command = cmd
|
||||
release_info.dump()
|
||||
|
||||
|
||||
def parse_args() -> argparse.Namespace:
|
||||
@ -280,12 +300,6 @@ def parse_args() -> argparse.Namespace:
|
||||
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
|
||||
description="Adds release packages to the repository",
|
||||
)
|
||||
parser.add_argument(
|
||||
"--infile",
|
||||
type=str,
|
||||
required=True,
|
||||
help="input file with release info",
|
||||
)
|
||||
parser.add_argument(
|
||||
"--export-debian",
|
||||
action="store_true",
|
||||
@ -328,7 +342,7 @@ if __name__ == "__main__":
|
||||
args = parse_args()
|
||||
assert args.dry_run
|
||||
|
||||
release_info = ReleaseInfo.from_file(args.infile)
|
||||
release_info = ReleaseInfo.from_file()
|
||||
"""
|
||||
Use S3FS. RCLONE has some errors with r2 remote which I didn't figure out how to resolve:
|
||||
ERROR : IO error: NotImplemented: versionId not implemented
|
||||
@ -336,20 +350,26 @@ if __name__ == "__main__":
|
||||
"""
|
||||
mp = R2MountPoint(MountPointApp.S3FS, dry_run=args.dry_run)
|
||||
if args.export_debian:
|
||||
with ReleaseContextManager(release_progress=ReleaseProgress.EXPORT_DEB) as _:
|
||||
mp.init()
|
||||
DebianArtifactory(release_info, dry_run=args.dry_run).export_packages()
|
||||
mp.teardown()
|
||||
if args.export_rpm:
|
||||
with ReleaseContextManager(release_progress=ReleaseProgress.EXPORT_RPM) as _:
|
||||
mp.init()
|
||||
RpmArtifactory(release_info, dry_run=args.dry_run).export_packages()
|
||||
mp.teardown()
|
||||
if args.export_tgz:
|
||||
with ReleaseContextManager(release_progress=ReleaseProgress.EXPORT_TGZ) as _:
|
||||
mp.init()
|
||||
TgzArtifactory(release_info, dry_run=args.dry_run).export_packages()
|
||||
mp.teardown()
|
||||
if args.test_debian:
|
||||
with ReleaseContextManager(release_progress=ReleaseProgress.TEST_DEB) as _:
|
||||
DebianArtifactory(release_info, dry_run=args.dry_run).test_packages()
|
||||
if args.test_tgz:
|
||||
with ReleaseContextManager(release_progress=ReleaseProgress.TEST_TGZ) as _:
|
||||
TgzArtifactory(release_info, dry_run=args.dry_run).test_packages()
|
||||
if args.test_rpm:
|
||||
with ReleaseContextManager(release_progress=ReleaseProgress.TEST_RPM) as _:
|
||||
RpmArtifactory(release_info, dry_run=args.dry_run).test_packages()
|
||||
|
@ -1,17 +1,17 @@
|
||||
import argparse
|
||||
from datetime import timedelta, datetime
|
||||
import logging
|
||||
import dataclasses
|
||||
import json
|
||||
import os
|
||||
from commit_status_helper import get_commit_filtered_statuses
|
||||
import sys
|
||||
from typing import List
|
||||
|
||||
from get_robot_token import get_best_robot_token
|
||||
from github_helper import GitHub
|
||||
from release import Release, Repo as ReleaseRepo, RELEASE_READY_STATUS
|
||||
from ci_utils import Shell
|
||||
from env_helper import GITHUB_REPOSITORY
|
||||
from report import SUCCESS
|
||||
from ssh import SSHKey
|
||||
|
||||
LOGGER_NAME = __name__
|
||||
HELPER_LOGGERS = ["github_helper", LOGGER_NAME]
|
||||
logger = logging.getLogger(LOGGER_NAME)
|
||||
from ci_buddy import CIBuddy
|
||||
from ci_config import CI
|
||||
|
||||
|
||||
def parse_args():
|
||||
@ -21,120 +21,198 @@ def parse_args():
|
||||
)
|
||||
parser.add_argument("--token", help="GitHub token, if not set, used from smm")
|
||||
parser.add_argument(
|
||||
"--repo", default="ClickHouse/ClickHouse", help="Repo owner/name"
|
||||
)
|
||||
parser.add_argument("--dry-run", action="store_true", help="Do not create anything")
|
||||
parser.add_argument(
|
||||
"--release-after-days",
|
||||
type=int,
|
||||
default=3,
|
||||
help="Do automatic release on the latest green commit after the latest "
|
||||
"release if the newest release is older than the specified days",
|
||||
)
|
||||
parser.add_argument(
|
||||
"--debug-helpers",
|
||||
"--post-status",
|
||||
action="store_true",
|
||||
help="Add debug logging for this script and github_helper",
|
||||
help="Post release branch statuses",
|
||||
)
|
||||
parser.add_argument(
|
||||
"--remote-protocol",
|
||||
"-p",
|
||||
default="ssh",
|
||||
choices=ReleaseRepo.VALID,
|
||||
help="repo protocol for git commands remote, 'origin' is a special case and "
|
||||
"uses 'origin' as a remote",
|
||||
"--post-auto-release-complete",
|
||||
action="store_true",
|
||||
help="Post autorelease completion status",
|
||||
)
|
||||
parser.add_argument(
|
||||
"--prepare",
|
||||
action="store_true",
|
||||
help="Prepare autorelease info",
|
||||
)
|
||||
parser.add_argument(
|
||||
"--wf-status",
|
||||
type=str,
|
||||
default="",
|
||||
help="overall workflow status [success|failure]",
|
||||
)
|
||||
return parser.parse_args(), parser
|
||||
|
||||
|
||||
MAX_NUMBER_OF_COMMITS_TO_CONSIDER_FOR_RELEASE = 5
|
||||
AUTORELEASE_INFO_FILE = "/tmp/autorelease_info.json"
|
||||
|
||||
|
||||
@dataclasses.dataclass
|
||||
class ReleaseParams:
|
||||
ready: bool
|
||||
ci_status: str
|
||||
num_patches: int
|
||||
release_branch: str
|
||||
commit_sha: str
|
||||
commits_to_branch_head: int
|
||||
latest: bool
|
||||
|
||||
def to_dict(self):
|
||||
return dataclasses.asdict(self)
|
||||
|
||||
|
||||
@dataclasses.dataclass
|
||||
class AutoReleaseInfo:
|
||||
releases: List[ReleaseParams]
|
||||
|
||||
def add_release(self, release_params: ReleaseParams) -> None:
|
||||
self.releases.append(release_params)
|
||||
|
||||
def dump(self):
|
||||
print(f"Dump release info into [{AUTORELEASE_INFO_FILE}]")
|
||||
with open(AUTORELEASE_INFO_FILE, "w", encoding="utf-8") as f:
|
||||
print(json.dumps(dataclasses.asdict(self), indent=2), file=f)
|
||||
|
||||
@staticmethod
|
||||
def from_file() -> "AutoReleaseInfo":
|
||||
with open(AUTORELEASE_INFO_FILE, "r", encoding="utf-8") as json_file:
|
||||
res = json.load(json_file)
|
||||
releases = [ReleaseParams(**release) for release in res["releases"]]
|
||||
return AutoReleaseInfo(releases=releases)
|
||||
|
||||
|
||||
def _prepare(token):
|
||||
assert len(token) > 10
|
||||
os.environ["GH_TOKEN"] = token
|
||||
Shell.run("gh auth status", check=True)
|
||||
|
||||
gh = GitHub(token)
|
||||
prs = gh.get_release_pulls(GITHUB_REPOSITORY)
|
||||
prs.sort(key=lambda x: x.head.ref)
|
||||
branch_names = [pr.head.ref for pr in prs]
|
||||
print(f"Found release branches [{branch_names}]")
|
||||
|
||||
repo = gh.get_repo(GITHUB_REPOSITORY)
|
||||
autoRelease_info = AutoReleaseInfo(releases=[])
|
||||
|
||||
for pr in prs:
|
||||
print(f"\nChecking PR [{pr.head.ref}]")
|
||||
|
||||
refs = list(repo.get_git_matching_refs(f"tags/v{pr.head.ref}"))
|
||||
assert refs
|
||||
|
||||
refs.sort(key=lambda ref: ref.ref)
|
||||
latest_release_tag_ref = refs[-1]
|
||||
latest_release_tag = repo.get_git_tag(latest_release_tag_ref.object.sha)
|
||||
|
||||
commits = Shell.run(
|
||||
f"git rev-list --first-parent {latest_release_tag.tag}..origin/{pr.head.ref}",
|
||||
check=True,
|
||||
).split("\n")
|
||||
commit_num = len(commits)
|
||||
print(
|
||||
f"Previous release [{latest_release_tag.tag}] was [{commit_num}] commits ago, date [{latest_release_tag.tagger.date}]"
|
||||
)
|
||||
|
||||
return parser.parse_args()
|
||||
commits_to_check = commits[:-1] # Exclude the version bump commit
|
||||
commit_sha = ""
|
||||
commit_ci_status = ""
|
||||
commits_to_branch_head = 0
|
||||
|
||||
for idx, commit in enumerate(
|
||||
commits_to_check[:MAX_NUMBER_OF_COMMITS_TO_CONSIDER_FOR_RELEASE]
|
||||
):
|
||||
print(
|
||||
f"Check commit [{commit}] [{pr.head.ref}~{idx+1}] as release candidate"
|
||||
)
|
||||
commit_num -= 1
|
||||
|
||||
is_completed = CI.GHActions.check_wf_completed(
|
||||
token=token, commit_sha=commit
|
||||
)
|
||||
if not is_completed:
|
||||
print(f"CI is in progress for [{commit}] - check previous commit")
|
||||
commits_to_branch_head += 1
|
||||
continue
|
||||
|
||||
commit_ci_status = CI.GHActions.get_commit_status_by_name(
|
||||
token=token,
|
||||
commit_sha=commit,
|
||||
status_name=(CI.JobNames.BUILD_CHECK, "ClickHouse build check"),
|
||||
)
|
||||
commit_sha = commit
|
||||
if commit_ci_status == SUCCESS:
|
||||
break
|
||||
|
||||
print(f"CI status [{commit_ci_status}] - skip")
|
||||
commits_to_branch_head += 1
|
||||
|
||||
ready = False
|
||||
if commit_ci_status == SUCCESS and commit_sha:
|
||||
print(
|
||||
f"Add release ready info for commit [{commit_sha}] and release branch [{pr.head.ref}]"
|
||||
)
|
||||
ready = True
|
||||
else:
|
||||
print(f"WARNING: No ready commits found for release branch [{pr.head.ref}]")
|
||||
|
||||
autoRelease_info.add_release(
|
||||
ReleaseParams(
|
||||
release_branch=pr.head.ref,
|
||||
commit_sha=commit_sha,
|
||||
ready=ready,
|
||||
ci_status=commit_ci_status,
|
||||
num_patches=commit_num,
|
||||
commits_to_branch_head=commits_to_branch_head,
|
||||
latest=False,
|
||||
)
|
||||
)
|
||||
|
||||
if autoRelease_info.releases:
|
||||
autoRelease_info.releases[-1].latest = True
|
||||
|
||||
autoRelease_info.dump()
|
||||
|
||||
|
||||
def main():
|
||||
args = parse_args()
|
||||
logging.basicConfig(level=logging.INFO)
|
||||
if args.debug_helpers:
|
||||
for logger_name in HELPER_LOGGERS:
|
||||
logging.getLogger(logger_name).setLevel(logging.DEBUG)
|
||||
args, parser = parse_args()
|
||||
|
||||
token = args.token or get_best_robot_token()
|
||||
days_as_timedelta = timedelta(days=args.release_after_days)
|
||||
now = datetime.now()
|
||||
|
||||
gh = GitHub(token)
|
||||
prs = gh.get_release_pulls(args.repo)
|
||||
branch_names = [pr.head.ref for pr in prs]
|
||||
|
||||
logger.info("Found release branches: %s\n ", " \n".join(branch_names))
|
||||
repo = gh.get_repo(args.repo)
|
||||
|
||||
# In general there is no guarantee on which order the refs/commits are
|
||||
# returned from the API, so we have to order them.
|
||||
for pr in prs:
|
||||
logger.info("Checking PR %s", pr.head.ref)
|
||||
|
||||
refs = list(repo.get_git_matching_refs(f"tags/v{pr.head.ref}"))
|
||||
refs.sort(key=lambda ref: ref.ref)
|
||||
|
||||
latest_release_tag_ref = refs[-1]
|
||||
latest_release_tag = repo.get_git_tag(latest_release_tag_ref.object.sha)
|
||||
logger.info("That last release was done at %s", latest_release_tag.tagger.date)
|
||||
|
||||
if latest_release_tag.tagger.date + days_as_timedelta > now:
|
||||
logger.info(
|
||||
"Not enough days since the last release %s,"
|
||||
" no automatic release can be done",
|
||||
latest_release_tag.tag,
|
||||
if args.post_status:
|
||||
info = AutoReleaseInfo.from_file()
|
||||
for release_info in info.releases:
|
||||
if release_info.ready:
|
||||
CIBuddy(dry_run=False).post_info(
|
||||
title=f"Auto Release Status for {release_info.release_branch}",
|
||||
body=release_info.to_dict(),
|
||||
)
|
||||
continue
|
||||
|
||||
unreleased_commits = list(
|
||||
repo.get_commits(sha=pr.head.ref, since=latest_release_tag.tagger.date)
|
||||
else:
|
||||
CIBuddy(dry_run=False).post_warning(
|
||||
title=f"Auto Release Status for {release_info.release_branch}",
|
||||
body=release_info.to_dict(),
|
||||
)
|
||||
unreleased_commits.sort(
|
||||
key=lambda commit: commit.commit.committer.date, reverse=True
|
||||
if args.post_auto_release_complete:
|
||||
assert args.wf_status, "--wf-status Required with --post-auto-release-complete"
|
||||
if args.wf_status != SUCCESS:
|
||||
CIBuddy(dry_run=False).post_job_error(
|
||||
error_description="Autorelease workflow failed",
|
||||
job_name="Autorelease",
|
||||
with_instance_info=False,
|
||||
with_wf_link=True,
|
||||
critical=True,
|
||||
)
|
||||
|
||||
for commit in unreleased_commits:
|
||||
logger.info("Checking statuses of commit %s", commit.sha)
|
||||
statuses = get_commit_filtered_statuses(commit)
|
||||
all_success = all(st.state == SUCCESS for st in statuses)
|
||||
passed_ready_for_release_check = any(
|
||||
st.context == RELEASE_READY_STATUS and st.state == SUCCESS
|
||||
for st in statuses
|
||||
else:
|
||||
CIBuddy(dry_run=False).post_info(
|
||||
title=f"Autorelease completed",
|
||||
body="",
|
||||
with_wf_link=True,
|
||||
)
|
||||
if not (all_success and passed_ready_for_release_check):
|
||||
logger.info("Commit is not green, thus not suitable for release")
|
||||
continue
|
||||
|
||||
logger.info("Commit is ready for release, let's release!")
|
||||
|
||||
release = Release(
|
||||
ReleaseRepo(args.repo, args.remote_protocol),
|
||||
commit.sha,
|
||||
"patch",
|
||||
args.dry_run,
|
||||
True,
|
||||
)
|
||||
try:
|
||||
release.do(True, True, True)
|
||||
except:
|
||||
if release.has_rollback:
|
||||
logging.error(
|
||||
"!!The release process finished with error, read the output carefully!!"
|
||||
)
|
||||
logging.error(
|
||||
"Probably, rollback finished with error. "
|
||||
"If you don't see any of the following commands in the output, "
|
||||
"execute them manually:"
|
||||
)
|
||||
release.log_rollback()
|
||||
raise
|
||||
logging.info("New release is done!")
|
||||
break
|
||||
elif args.prepare:
|
||||
_prepare(token=args.token or get_best_robot_token())
|
||||
else:
|
||||
parser.print_help()
|
||||
sys.exit(2)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
if os.getenv("ROBOT_CLICKHOUSE_SSH_KEY", ""):
|
||||
with SSHKey("ROBOT_CLICKHOUSE_SSH_KEY"):
|
||||
main()
|
||||
else:
|
||||
main()
|
||||
|
@ -1110,13 +1110,14 @@ def main() -> int:
|
||||
ci_cache.print_status()
|
||||
|
||||
if IS_CI and not pr_info.is_merge_queue:
|
||||
# wait for pending jobs to be finished, await_jobs is a long blocking call
|
||||
ci_cache.await_pending_jobs(pr_info.is_release)
|
||||
|
||||
if pr_info.is_release:
|
||||
print("Release/master: CI Cache add pending records for all todo jobs")
|
||||
ci_cache.push_pending_all(pr_info.is_release)
|
||||
|
||||
# wait for pending jobs to be finished, await_jobs is a long blocking call
|
||||
ci_cache.await_pending_jobs(pr_info.is_release)
|
||||
|
||||
# conclude results
|
||||
result["git_ref"] = git_ref
|
||||
result["version"] = version
|
||||
@ -1292,10 +1293,11 @@ def main() -> int:
|
||||
pass
|
||||
if Utils.is_killed_with_oom():
|
||||
print("WARNING: OOM while job execution")
|
||||
print(subprocess.run("sudo dmesg -T", check=False))
|
||||
error_description = f"Out Of Memory, exit_code {job_report.exit_code}"
|
||||
else:
|
||||
error_description = f"Unknown, exit_code {job_report.exit_code}"
|
||||
CIBuddy().post_error(
|
||||
CIBuddy().post_job_error(
|
||||
error_description + f" after {int(job_report.duration)}s",
|
||||
job_name=_get_ext_check_name(args.job_name),
|
||||
)
|
||||
|
@ -1,5 +1,6 @@
|
||||
import json
|
||||
import os
|
||||
from typing import Union, Dict
|
||||
|
||||
import boto3
|
||||
import requests
|
||||
@ -60,14 +61,64 @@ class CIBuddy:
|
||||
except Exception as e:
|
||||
print(f"ERROR: Failed to post message, ex {e}")
|
||||
|
||||
def post_error(self, error_description, job_name="", with_instance_info=True):
|
||||
def _post_formatted(
|
||||
self, title: str, body: Union[Dict, str], with_wf_link: bool
|
||||
) -> None:
|
||||
message = title
|
||||
if isinstance(body, dict):
|
||||
for name, value in body.items():
|
||||
if "commit_sha" in name:
|
||||
value = (
|
||||
f"<https://github.com/{self.repo}/commit/{value}|{value[:8]}>"
|
||||
)
|
||||
message += f" *{name}*: {value}\n"
|
||||
else:
|
||||
message += body + "\n"
|
||||
run_id = os.getenv("GITHUB_RUN_ID", "")
|
||||
if with_wf_link and run_id:
|
||||
message += f" *workflow*: <https://github.com/{self.repo}/actions/runs/{run_id}|{run_id}>\n"
|
||||
self.post(message)
|
||||
|
||||
def post_info(
|
||||
self, title: str, body: Union[Dict, str], with_wf_link: bool = True
|
||||
) -> None:
|
||||
title_extended = f":white_circle: *{title}*\n\n"
|
||||
self._post_formatted(title_extended, body, with_wf_link)
|
||||
|
||||
def post_done(
|
||||
self, title: str, body: Union[Dict, str], with_wf_link: bool = True
|
||||
) -> None:
|
||||
title_extended = f":white_check_mark: *{title}*\n\n"
|
||||
self._post_formatted(title_extended, body, with_wf_link)
|
||||
|
||||
def post_warning(
|
||||
self, title: str, body: Union[Dict, str], with_wf_link: bool = True
|
||||
) -> None:
|
||||
title_extended = f":warning: *{title}*\n\n"
|
||||
self._post_formatted(title_extended, body, with_wf_link)
|
||||
|
||||
def post_critical(
|
||||
self, title: str, body: Union[Dict, str], with_wf_link: bool = True
|
||||
) -> None:
|
||||
title_extended = f":black_circle: *{title}*\n\n"
|
||||
self._post_formatted(title_extended, body, with_wf_link)
|
||||
|
||||
def post_job_error(
|
||||
self,
|
||||
error_description: str,
|
||||
job_name: str = "",
|
||||
with_instance_info: bool = True,
|
||||
with_wf_link: bool = True,
|
||||
critical: bool = False,
|
||||
) -> None:
|
||||
instance_id, instance_type = "unknown", "unknown"
|
||||
if with_instance_info:
|
||||
instance_id = Shell.run("ec2metadata --instance-id") or instance_id
|
||||
instance_type = Shell.run("ec2metadata --instance-type") or instance_type
|
||||
if not job_name:
|
||||
job_name = os.getenv("CHECK_NAME", "unknown")
|
||||
line_err = f":red_circle: *Error: {error_description}*\n\n"
|
||||
sign = ":red_circle:" if not critical else ":black_circle:"
|
||||
line_err = f"{sign} *Error: {error_description}*\n\n"
|
||||
line_ghr = f" *Runner:* `{instance_type}`, `{instance_id}`\n"
|
||||
line_job = f" *Job:* `{job_name}`\n"
|
||||
line_pr_ = f" *PR:* <https://github.com/{self.repo}/pull/{self.pr_number}|#{self.pr_number}>, <{self.commit_url}|{self.sha}>\n"
|
||||
@ -82,10 +133,13 @@ class CIBuddy:
|
||||
message += line_pr_
|
||||
else:
|
||||
message += line_br_
|
||||
run_id = os.getenv("GITHUB_RUN_ID", "")
|
||||
if with_wf_link and run_id:
|
||||
message += f" *workflow*: <https://github.com/{self.repo}/actions/runs/{run_id}|{run_id}>\n"
|
||||
self.post(message)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
# test
|
||||
buddy = CIBuddy(dry_run=True)
|
||||
buddy.post_error("TEst")
|
||||
buddy.post_job_error("TEst")
|
||||
|
@ -638,7 +638,14 @@ class CiCache:
|
||||
pushes pending records for all jobs that supposed to be run
|
||||
"""
|
||||
for job, job_config in self.jobs_to_do.items():
|
||||
if not job_config.has_digest():
|
||||
if (
|
||||
job in self.jobs_to_wait
|
||||
or not job_config.has_digest()
|
||||
or job_config.disable_await
|
||||
):
|
||||
# 1. "job in self.jobs_to_wait" - this job already has a pending record in cache
|
||||
# 2. "not job_config.has_digest()" - cache is not used for these jobs
|
||||
# 3. "job_config.disable_await" - await is explicitly disabled
|
||||
continue
|
||||
pending_state = PendingState(time.time(), run_url=GITHUB_RUN_URL)
|
||||
assert job_config.batches
|
||||
@ -708,7 +715,7 @@ class CiCache:
|
||||
Filter is to be applied in PRs to remove jobs that are not affected by the change
|
||||
:return:
|
||||
"""
|
||||
remove_from_to_do = []
|
||||
remove_from_workflow = []
|
||||
required_builds = []
|
||||
has_test_jobs_to_skip = False
|
||||
for job_name, job_config in self.jobs_to_do.items():
|
||||
@ -723,26 +730,41 @@ class CiCache:
|
||||
job=reference_name,
|
||||
job_config=reference_config,
|
||||
):
|
||||
remove_from_to_do.append(job_name)
|
||||
remove_from_workflow.append(job_name)
|
||||
has_test_jobs_to_skip = True
|
||||
else:
|
||||
required_builds += (
|
||||
job_config.required_builds if job_config.required_builds else []
|
||||
)
|
||||
if has_test_jobs_to_skip:
|
||||
# If there are tests to skip, it means build digest has not been changed.
|
||||
# If there are tests to skip, it means builds are not affected as well.
|
||||
# No need to test builds. Let's keep all builds required for test jobs and skip the others
|
||||
for job_name, job_config in self.jobs_to_do.items():
|
||||
if CI.is_build_job(job_name):
|
||||
if job_name not in required_builds:
|
||||
remove_from_to_do.append(job_name)
|
||||
remove_from_workflow.append(job_name)
|
||||
|
||||
for job in remove_from_to_do:
|
||||
for job in remove_from_workflow:
|
||||
print(f"Filter job [{job}] - not affected by the change")
|
||||
if job in self.jobs_to_do:
|
||||
del self.jobs_to_do[job]
|
||||
if job in self.jobs_to_wait:
|
||||
del self.jobs_to_wait[job]
|
||||
if job in self.jobs_to_skip:
|
||||
self.jobs_to_skip.remove(job)
|
||||
|
||||
# special handling for the special job: BUILD_CHECK
|
||||
has_builds = False
|
||||
for job in list(self.jobs_to_do) + self.jobs_to_skip:
|
||||
if CI.is_build_job(job):
|
||||
has_builds = True
|
||||
break
|
||||
if not has_builds:
|
||||
if CI.JobNames.BUILD_CHECK in self.jobs_to_do:
|
||||
print(
|
||||
f"Filter job [{CI.JobNames.BUILD_CHECK}] - no builds are required in the workflow"
|
||||
)
|
||||
del self.jobs_to_do[CI.JobNames.BUILD_CHECK]
|
||||
|
||||
def await_pending_jobs(self, is_release: bool, dry_run: bool = False) -> None:
|
||||
"""
|
||||
@ -884,3 +906,87 @@ class CiCache:
|
||||
self.jobs_to_wait[job] = job_config
|
||||
|
||||
return self
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
# for testing
|
||||
job_digest = {
|
||||
"package_release": "bbbd3519d1",
|
||||
"package_aarch64": "bbbd3519d1",
|
||||
"package_asan": "bbbd3519d1",
|
||||
"package_ubsan": "bbbd3519d1",
|
||||
"package_tsan": "bbbd3519d1",
|
||||
"package_msan": "bbbd3519d1",
|
||||
"package_debug": "bbbd3519d1",
|
||||
"package_release_coverage": "bbbd3519d1",
|
||||
"binary_release": "bbbd3519d1",
|
||||
"binary_tidy": "bbbd3519d1",
|
||||
"binary_darwin": "bbbd3519d1",
|
||||
"binary_aarch64": "bbbd3519d1",
|
||||
"binary_aarch64_v80compat": "bbbd3519d1",
|
||||
"binary_freebsd": "bbbd3519d1",
|
||||
"binary_darwin_aarch64": "bbbd3519d1",
|
||||
"binary_ppc64le": "bbbd3519d1",
|
||||
"binary_amd64_compat": "bbbd3519d1",
|
||||
"binary_amd64_musl": "bbbd3519d1",
|
||||
"binary_riscv64": "bbbd3519d1",
|
||||
"binary_s390x": "bbbd3519d1",
|
||||
"binary_loongarch64": "bbbd3519d1",
|
||||
"Builds": "f5dffeecb8",
|
||||
"Install packages (release)": "ba0c89660e",
|
||||
"Install packages (aarch64)": "ba0c89660e",
|
||||
"Stateful tests (asan)": "32a9a1aba9",
|
||||
"Stateful tests (tsan)": "32a9a1aba9",
|
||||
"Stateful tests (msan)": "32a9a1aba9",
|
||||
"Stateful tests (ubsan)": "32a9a1aba9",
|
||||
"Stateful tests (debug)": "32a9a1aba9",
|
||||
"Stateful tests (release)": "32a9a1aba9",
|
||||
"Stateful tests (coverage)": "32a9a1aba9",
|
||||
"Stateful tests (aarch64)": "32a9a1aba9",
|
||||
"Stateful tests (release, ParallelReplicas)": "32a9a1aba9",
|
||||
"Stateful tests (debug, ParallelReplicas)": "32a9a1aba9",
|
||||
"Stateless tests (asan)": "deb6778b88",
|
||||
"Stateless tests (tsan)": "deb6778b88",
|
||||
"Stateless tests (msan)": "deb6778b88",
|
||||
"Stateless tests (ubsan)": "deb6778b88",
|
||||
"Stateless tests (debug)": "deb6778b88",
|
||||
"Stateless tests (release)": "deb6778b88",
|
||||
"Stateless tests (coverage)": "deb6778b88",
|
||||
"Stateless tests (aarch64)": "deb6778b88",
|
||||
"Stateless tests (release, old analyzer, s3, DatabaseReplicated)": "deb6778b88",
|
||||
"Stateless tests (debug, s3 storage)": "deb6778b88",
|
||||
"Stateless tests (tsan, s3 storage)": "deb6778b88",
|
||||
"Stress test (debug)": "aa298abf10",
|
||||
"Stress test (tsan)": "aa298abf10",
|
||||
"Upgrade check (debug)": "5ce4d3ee02",
|
||||
"Integration tests (asan, old analyzer)": "42e58be3aa",
|
||||
"Integration tests (tsan)": "42e58be3aa",
|
||||
"Integration tests (aarch64)": "42e58be3aa",
|
||||
"Integration tests flaky check (asan)": "42e58be3aa",
|
||||
"Compatibility check (release)": "ecb69d8c4b",
|
||||
"Compatibility check (aarch64)": "ecb69d8c4b",
|
||||
"Unit tests (release)": "09d00b702e",
|
||||
"Unit tests (asan)": "09d00b702e",
|
||||
"Unit tests (msan)": "09d00b702e",
|
||||
"Unit tests (tsan)": "09d00b702e",
|
||||
"Unit tests (ubsan)": "09d00b702e",
|
||||
"AST fuzzer (debug)": "c38ebf947f",
|
||||
"AST fuzzer (asan)": "c38ebf947f",
|
||||
"AST fuzzer (msan)": "c38ebf947f",
|
||||
"AST fuzzer (tsan)": "c38ebf947f",
|
||||
"AST fuzzer (ubsan)": "c38ebf947f",
|
||||
"Stateless tests flaky check (asan)": "deb6778b88",
|
||||
"Performance Comparison (release)": "a8a7179258",
|
||||
"ClickBench (release)": "45c07c4aa6",
|
||||
"ClickBench (aarch64)": "45c07c4aa6",
|
||||
"Docker server image": "6a24d5b187",
|
||||
"Docker keeper image": "6a24d5b187",
|
||||
"Docs check": "4764154c62",
|
||||
"Fast test": "cb269133f2",
|
||||
"Style check": "ffffffffff",
|
||||
"Stateful tests (ubsan, ParallelReplicas)": "32a9a1aba9",
|
||||
"Stress test (msan)": "aa298abf10",
|
||||
"Upgrade check (asan)": "5ce4d3ee02",
|
||||
}
|
||||
ci_cache = CiCache(job_digests=job_digest, cache_enabled=True, s3=S3Helper())
|
||||
ci_cache.update()
|
||||
|
@ -32,6 +32,9 @@ class CI:
|
||||
from ci_definitions import MQ_JOBS as MQ_JOBS
|
||||
from ci_definitions import WorkflowStages as WorkflowStages
|
||||
from ci_definitions import Runners as Runners
|
||||
from ci_utils import Envs as Envs
|
||||
from ci_utils import Utils as Utils
|
||||
from ci_utils import GHActions as GHActions
|
||||
from ci_definitions import Labels as Labels
|
||||
from ci_definitions import TRUSTED_CONTRIBUTORS as TRUSTED_CONTRIBUTORS
|
||||
from ci_utils import CATEGORY_TO_LABEL as CATEGORY_TO_LABEL
|
||||
@ -310,13 +313,13 @@ class CI:
|
||||
required_builds=[BuildNames.PACKAGE_ASAN], num_batches=2
|
||||
),
|
||||
JobNames.STATELESS_TEST_TSAN: CommonJobConfigs.STATELESS_TEST.with_properties(
|
||||
required_builds=[BuildNames.PACKAGE_TSAN], num_batches=2
|
||||
required_builds=[BuildNames.PACKAGE_TSAN], num_batches=4
|
||||
),
|
||||
JobNames.STATELESS_TEST_MSAN: CommonJobConfigs.STATELESS_TEST.with_properties(
|
||||
required_builds=[BuildNames.PACKAGE_MSAN], num_batches=3
|
||||
required_builds=[BuildNames.PACKAGE_MSAN], num_batches=4
|
||||
),
|
||||
JobNames.STATELESS_TEST_UBSAN: CommonJobConfigs.STATELESS_TEST.with_properties(
|
||||
required_builds=[BuildNames.PACKAGE_UBSAN], num_batches=1
|
||||
required_builds=[BuildNames.PACKAGE_UBSAN], num_batches=2
|
||||
),
|
||||
JobNames.STATELESS_TEST_DEBUG: CommonJobConfigs.STATELESS_TEST.with_properties(
|
||||
required_builds=[BuildNames.PACKAGE_DEBUG], num_batches=2
|
||||
@ -325,24 +328,24 @@ class CI:
|
||||
required_builds=[BuildNames.PACKAGE_RELEASE],
|
||||
),
|
||||
JobNames.STATELESS_TEST_RELEASE_COVERAGE: CommonJobConfigs.STATELESS_TEST.with_properties(
|
||||
required_builds=[BuildNames.PACKAGE_RELEASE_COVERAGE], num_batches=5
|
||||
required_builds=[BuildNames.PACKAGE_RELEASE_COVERAGE], num_batches=6
|
||||
),
|
||||
JobNames.STATELESS_TEST_AARCH64: CommonJobConfigs.STATELESS_TEST.with_properties(
|
||||
required_builds=[BuildNames.PACKAGE_AARCH64],
|
||||
runner_type=Runners.FUNC_TESTER_ARM,
|
||||
),
|
||||
JobNames.STATELESS_TEST_OLD_ANALYZER_S3_REPLICATED_RELEASE: CommonJobConfigs.STATELESS_TEST.with_properties(
|
||||
required_builds=[BuildNames.PACKAGE_RELEASE], num_batches=3
|
||||
required_builds=[BuildNames.PACKAGE_RELEASE], num_batches=4
|
||||
),
|
||||
JobNames.STATELESS_TEST_S3_DEBUG: CommonJobConfigs.STATELESS_TEST.with_properties(
|
||||
required_builds=[BuildNames.PACKAGE_DEBUG], num_batches=2
|
||||
),
|
||||
JobNames.STATELESS_TEST_AZURE_ASAN: CommonJobConfigs.STATELESS_TEST.with_properties(
|
||||
required_builds=[BuildNames.PACKAGE_ASAN], num_batches=2, release_only=True
|
||||
required_builds=[BuildNames.PACKAGE_ASAN], num_batches=3, release_only=True
|
||||
),
|
||||
JobNames.STATELESS_TEST_S3_TSAN: CommonJobConfigs.STATELESS_TEST.with_properties(
|
||||
required_builds=[BuildNames.PACKAGE_TSAN],
|
||||
num_batches=3,
|
||||
num_batches=4,
|
||||
),
|
||||
JobNames.STRESS_TEST_DEBUG: CommonJobConfigs.STRESS_TEST.with_properties(
|
||||
required_builds=[BuildNames.PACKAGE_DEBUG],
|
||||
|
@ -351,6 +351,8 @@ class JobConfig:
|
||||
run_by_label: str = ""
|
||||
# to run always regardless of the job digest or/and label
|
||||
run_always: bool = False
|
||||
# disables CI await for a given job
|
||||
disable_await: bool = False
|
||||
# if the job needs to be run on the release branch, including master (building packages, docker server).
|
||||
# NOTE: Subsequent runs on the same branch with the similar digest are still considered skip-able.
|
||||
required_on_release_branch: bool = False
|
||||
@ -395,6 +397,7 @@ class CommonJobConfigs:
|
||||
],
|
||||
),
|
||||
runner_type=Runners.STYLE_CHECKER_ARM,
|
||||
disable_await=True,
|
||||
)
|
||||
COMPATIBILITY_TEST = JobConfig(
|
||||
job_name_keyword="compatibility",
|
||||
@ -430,7 +433,7 @@ class CommonJobConfigs:
|
||||
),
|
||||
run_command='functional_test_check.py "$CHECK_NAME"',
|
||||
runner_type=Runners.FUNC_TESTER,
|
||||
timeout=7200,
|
||||
timeout=9000,
|
||||
)
|
||||
STATEFUL_TEST = JobConfig(
|
||||
job_name_keyword="stateful",
|
||||
|
@ -1,9 +1,16 @@
|
||||
import os
|
||||
import re
|
||||
import subprocess
|
||||
import time
|
||||
from contextlib import contextmanager
|
||||
from pathlib import Path
|
||||
from typing import Any, Iterator, List, Union, Optional, Tuple
|
||||
from typing import Any, Iterator, List, Union, Optional, Sequence, Tuple
|
||||
|
||||
import requests
|
||||
|
||||
|
||||
class Envs:
|
||||
GITHUB_REPOSITORY = os.getenv("GITHUB_REPOSITORY", "ClickHouse/ClickHouse")
|
||||
|
||||
|
||||
LABEL_CATEGORIES = {
|
||||
@ -80,6 +87,71 @@ class GHActions:
|
||||
print(line)
|
||||
print("::endgroup::")
|
||||
|
||||
@staticmethod
|
||||
def get_commit_status_by_name(
|
||||
token: str, commit_sha: str, status_name: Union[str, Sequence]
|
||||
) -> str:
|
||||
assert len(token) == 40
|
||||
assert len(commit_sha) == 40
|
||||
assert is_hex(commit_sha)
|
||||
assert not is_hex(token)
|
||||
url = f"https://api.github.com/repos/{Envs.GITHUB_REPOSITORY}/commits/{commit_sha}/statuses?per_page={200}"
|
||||
headers = {
|
||||
"Authorization": f"token {token}",
|
||||
"Accept": "application/vnd.github.v3+json",
|
||||
}
|
||||
response = requests.get(url, headers=headers, timeout=5)
|
||||
|
||||
if isinstance(status_name, str):
|
||||
status_name = (status_name,)
|
||||
if response.status_code == 200:
|
||||
assert "next" not in response.links, "Response truncated"
|
||||
statuses = response.json()
|
||||
for status in statuses:
|
||||
if status["context"] in status_name:
|
||||
return status["state"] # type: ignore
|
||||
return ""
|
||||
|
||||
@staticmethod
|
||||
def check_wf_completed(token: str, commit_sha: str) -> bool:
|
||||
headers = {
|
||||
"Authorization": f"token {token}",
|
||||
"Accept": "application/vnd.github.v3+json",
|
||||
}
|
||||
url = f"https://api.github.com/repos/{Envs.GITHUB_REPOSITORY}/commits/{commit_sha}/check-runs?per_page={100}"
|
||||
|
||||
for i in range(3):
|
||||
try:
|
||||
response = requests.get(url, headers=headers, timeout=5)
|
||||
response.raise_for_status()
|
||||
# assert "next" not in response.links, "Response truncated"
|
||||
|
||||
data = response.json()
|
||||
assert data["check_runs"], "?"
|
||||
|
||||
for check in data["check_runs"]:
|
||||
if check["status"] != "completed":
|
||||
print(
|
||||
f" Check workflow status: Check not completed [{check['name']}]"
|
||||
)
|
||||
return False
|
||||
return True
|
||||
except Exception as e:
|
||||
print(f"ERROR: exception after attempt [{i}]: {e}")
|
||||
time.sleep(1)
|
||||
|
||||
return False
|
||||
|
||||
@staticmethod
|
||||
def get_pr_url_by_branch(repo, branch):
|
||||
get_url_cmd = (
|
||||
f"gh pr list --repo {repo} --head {branch} --json url --jq '.[0].url'"
|
||||
)
|
||||
url = Shell.run(get_url_cmd)
|
||||
if not url:
|
||||
print(f"ERROR: PR nor found, branch [{branch}]")
|
||||
return url
|
||||
|
||||
|
||||
class Shell:
|
||||
@classmethod
|
||||
@ -95,7 +167,11 @@ class Shell:
|
||||
return res.stdout.strip()
|
||||
|
||||
@classmethod
|
||||
def run(cls, command):
|
||||
def run(cls, command, check=False, dry_run=False):
|
||||
if dry_run:
|
||||
print(f"Dry-ryn. Would run command [{command}]")
|
||||
return ""
|
||||
print(f"Run command [{command}]")
|
||||
res = ""
|
||||
result = subprocess.run(
|
||||
command,
|
||||
@ -107,12 +183,24 @@ class Shell:
|
||||
)
|
||||
if result.returncode == 0:
|
||||
res = result.stdout
|
||||
else:
|
||||
print(
|
||||
f"ERROR: stdout {result.stdout.strip()}, stderr {result.stderr.strip()}"
|
||||
)
|
||||
if check:
|
||||
assert result.returncode == 0
|
||||
return res.strip()
|
||||
|
||||
@classmethod
|
||||
def run_as_daemon(cls, command):
|
||||
print(f"Run daemon command [{command}]")
|
||||
subprocess.Popen(command.split(" ")) # pylint:disable=consider-using-with
|
||||
return 0, ""
|
||||
|
||||
@classmethod
|
||||
def check(cls, command):
|
||||
result = subprocess.run(
|
||||
command + " 2>&1",
|
||||
command,
|
||||
shell=True,
|
||||
stdout=subprocess.PIPE,
|
||||
stderr=subprocess.PIPE,
|
||||
|
@ -2,7 +2,6 @@ import argparse
|
||||
import dataclasses
|
||||
import json
|
||||
import os
|
||||
import subprocess
|
||||
|
||||
from contextlib import contextmanager
|
||||
from copy import copy
|
||||
@ -13,7 +12,8 @@ from git_helper import Git, GIT_PREFIX
|
||||
from ssh import SSHAgent
|
||||
from env_helper import GITHUB_REPOSITORY, S3_BUILDS_BUCKET
|
||||
from s3_helper import S3Helper
|
||||
from ci_utils import Shell
|
||||
from ci_utils import Shell, GHActions
|
||||
from ci_buddy import CIBuddy
|
||||
from version_helper import (
|
||||
FILE_WITH_VERSION_PATH,
|
||||
GENERATED_CONTRIBUTORS,
|
||||
@ -27,34 +27,65 @@ from ci_config import CI
|
||||
|
||||
CMAKE_PATH = get_abs_path(FILE_WITH_VERSION_PATH)
|
||||
CONTRIBUTORS_PATH = get_abs_path(GENERATED_CONTRIBUTORS)
|
||||
RELEASE_INFO_FILE = "/tmp/release_info.json"
|
||||
|
||||
|
||||
class ShellRunner:
|
||||
class ReleaseProgress:
|
||||
STARTED = "started"
|
||||
DOWNLOAD_PACKAGES = "download packages"
|
||||
PUSH_RELEASE_TAG = "push release tag"
|
||||
PUSH_NEW_RELEASE_BRANCH = "push new release branch"
|
||||
BUMP_VERSION = "bump version"
|
||||
CREATE_GH_RELEASE = "create GH release"
|
||||
EXPORT_TGZ = "export TGZ packages"
|
||||
EXPORT_RPM = "export RPM packages"
|
||||
EXPORT_DEB = "export DEB packages"
|
||||
TEST_TGZ = "test TGZ packages"
|
||||
TEST_RPM = "test RPM packages"
|
||||
TEST_DEB = "test DEB packages"
|
||||
|
||||
@classmethod
|
||||
def run(
|
||||
cls, command, check_retcode=True, print_output=True, async_=False, dry_run=False
|
||||
):
|
||||
if dry_run:
|
||||
print(f"Dry-run: Would run shell command: [{command}]")
|
||||
return 0, ""
|
||||
print(f"Running shell command: [{command}]")
|
||||
if async_:
|
||||
subprocess.Popen(command.split(" ")) # pylint:disable=consider-using-with
|
||||
return 0, ""
|
||||
result = subprocess.run(
|
||||
command + " 2>&1",
|
||||
shell=True,
|
||||
stdout=subprocess.PIPE,
|
||||
stderr=subprocess.PIPE,
|
||||
text=True,
|
||||
check=True,
|
||||
)
|
||||
if print_output:
|
||||
print(result.stdout)
|
||||
if check_retcode:
|
||||
assert result.returncode == 0, f"Return code [{result.returncode}]"
|
||||
return result.returncode, result.stdout
|
||||
|
||||
class ReleaseProgressDescription:
|
||||
OK = "OK"
|
||||
FAILED = "FAILED"
|
||||
|
||||
|
||||
class ReleaseContextManager:
|
||||
def __init__(self, release_progress):
|
||||
self.release_progress = release_progress
|
||||
self.release_info = None
|
||||
|
||||
def __enter__(self):
|
||||
if self.release_progress == ReleaseProgress.STARTED:
|
||||
# create initial release info
|
||||
self.release_info = ReleaseInfo(
|
||||
release_branch="NA",
|
||||
commit_sha=args.ref,
|
||||
release_tag="NA",
|
||||
version="NA",
|
||||
codename="NA",
|
||||
previous_release_tag="NA",
|
||||
previous_release_sha="NA",
|
||||
release_progress=ReleaseProgress.STARTED,
|
||||
).dump()
|
||||
else:
|
||||
# fetch release info from fs and update
|
||||
self.release_info = ReleaseInfo.from_file()
|
||||
assert self.release_info
|
||||
assert (
|
||||
self.release_info.progress_description == ReleaseProgressDescription.OK
|
||||
), "Must be OK on the start of new context"
|
||||
self.release_info.release_progress = self.release_progress
|
||||
self.release_info.dump()
|
||||
return self.release_info
|
||||
|
||||
def __exit__(self, exc_type, exc_value, traceback):
|
||||
assert self.release_info
|
||||
if exc_type is not None:
|
||||
self.release_info.progress_description = ReleaseProgressDescription.FAILED
|
||||
else:
|
||||
self.release_info.progress_description = ReleaseProgressDescription.OK
|
||||
self.release_info.dump()
|
||||
|
||||
|
||||
@dataclasses.dataclass
|
||||
@ -67,17 +98,29 @@ class ReleaseInfo:
|
||||
codename: str
|
||||
previous_release_tag: str
|
||||
previous_release_sha: str
|
||||
changelog_pr: str = ""
|
||||
version_bump_pr: str = ""
|
||||
release_url: str = ""
|
||||
debian_command: str = ""
|
||||
rpm_command: str = ""
|
||||
tgz_command: str = ""
|
||||
docker_command: str = ""
|
||||
release_progress: str = ""
|
||||
progress_description: str = ""
|
||||
|
||||
@staticmethod
|
||||
def from_file(file_path: str) -> "ReleaseInfo":
|
||||
with open(file_path, "r", encoding="utf-8") as json_file:
|
||||
def from_file() -> "ReleaseInfo":
|
||||
with open(RELEASE_INFO_FILE, "r", encoding="utf-8") as json_file:
|
||||
res = json.load(json_file)
|
||||
return ReleaseInfo(**res)
|
||||
|
||||
@staticmethod
|
||||
def prepare(commit_ref: str, release_type: str, outfile: str) -> None:
|
||||
Path(outfile).parent.mkdir(parents=True, exist_ok=True)
|
||||
Path(outfile).unlink(missing_ok=True)
|
||||
def dump(self):
|
||||
print(f"Dump release info into [{RELEASE_INFO_FILE}]")
|
||||
with open(RELEASE_INFO_FILE, "w", encoding="utf-8") as f:
|
||||
print(json.dumps(dataclasses.asdict(self), indent=2), file=f)
|
||||
return self
|
||||
|
||||
def prepare(self, commit_ref: str, release_type: str) -> "ReleaseInfo":
|
||||
version = None
|
||||
release_branch = None
|
||||
release_tag = None
|
||||
@ -87,11 +130,12 @@ class ReleaseInfo:
|
||||
assert release_type in ("patch", "new")
|
||||
if release_type == "new":
|
||||
# check commit_ref is right and on a right branch
|
||||
ShellRunner.run(
|
||||
f"git merge-base --is-ancestor origin/{commit_ref} origin/master"
|
||||
Shell.run(
|
||||
f"git merge-base --is-ancestor origin/{commit_ref} origin/master",
|
||||
check=True,
|
||||
)
|
||||
with checkout(commit_ref):
|
||||
_, commit_sha = ShellRunner.run(f"git rev-parse {commit_ref}")
|
||||
commit_sha = Shell.run(f"git rev-parse {commit_ref}", check=True)
|
||||
# Git() must be inside "with checkout" contextmanager
|
||||
git = Git()
|
||||
version = get_version_from_repo(git=git)
|
||||
@ -112,7 +156,7 @@ class ReleaseInfo:
|
||||
assert previous_release_sha
|
||||
if release_type == "patch":
|
||||
with checkout(commit_ref):
|
||||
_, commit_sha = ShellRunner.run(f"git rev-parse {commit_ref}")
|
||||
commit_sha = Shell.run(f"git rev-parse {commit_ref}", check=True)
|
||||
# Git() must be inside "with checkout" contextmanager
|
||||
git = Git()
|
||||
version = get_version_from_repo(git=git)
|
||||
@ -120,10 +164,11 @@ class ReleaseInfo:
|
||||
version.with_description(codename)
|
||||
release_branch = f"{version.major}.{version.minor}"
|
||||
release_tag = version.describe
|
||||
ShellRunner.run(f"{GIT_PREFIX} fetch origin {release_branch} --tags")
|
||||
Shell.run(f"{GIT_PREFIX} fetch origin {release_branch} --tags", check=True)
|
||||
# check commit is right and on a right branch
|
||||
ShellRunner.run(
|
||||
f"git merge-base --is-ancestor {commit_ref} origin/{release_branch}"
|
||||
Shell.run(
|
||||
f"git merge-base --is-ancestor {commit_ref} origin/{release_branch}",
|
||||
check=True,
|
||||
)
|
||||
if version.patch == 1:
|
||||
expected_version = copy(version)
|
||||
@ -162,22 +207,22 @@ class ReleaseInfo:
|
||||
and version
|
||||
and codename in ("lts", "stable")
|
||||
)
|
||||
res = ReleaseInfo(
|
||||
release_branch=release_branch,
|
||||
commit_sha=commit_sha,
|
||||
release_tag=release_tag,
|
||||
version=version.string,
|
||||
codename=codename,
|
||||
previous_release_tag=previous_release_tag,
|
||||
previous_release_sha=previous_release_sha,
|
||||
)
|
||||
with open(outfile, "w", encoding="utf-8") as f:
|
||||
print(json.dumps(dataclasses.asdict(res), indent=2), file=f)
|
||||
|
||||
self.release_branch = release_branch
|
||||
self.commit_sha = commit_sha
|
||||
self.release_tag = release_tag
|
||||
self.version = version.string
|
||||
self.codename = codename
|
||||
self.previous_release_tag = previous_release_tag
|
||||
self.previous_release_sha = previous_release_sha
|
||||
self.release_progress = ReleaseProgress.STARTED
|
||||
self.progress_description = ReleaseProgressDescription.OK
|
||||
return self
|
||||
|
||||
def push_release_tag(self, dry_run: bool) -> None:
|
||||
if dry_run:
|
||||
# remove locally created tag from prev run
|
||||
ShellRunner.run(
|
||||
Shell.run(
|
||||
f"{GIT_PREFIX} tag -l | grep -q {self.release_tag} && git tag -d {self.release_tag} ||:"
|
||||
)
|
||||
# Create release tag
|
||||
@ -185,16 +230,17 @@ class ReleaseInfo:
|
||||
f"Create and push release tag [{self.release_tag}], commit [{self.commit_sha}]"
|
||||
)
|
||||
tag_message = f"Release {self.release_tag}"
|
||||
ShellRunner.run(
|
||||
f"{GIT_PREFIX} tag -a -m '{tag_message}' {self.release_tag} {self.commit_sha}"
|
||||
Shell.run(
|
||||
f"{GIT_PREFIX} tag -a -m '{tag_message}' {self.release_tag} {self.commit_sha}",
|
||||
check=True,
|
||||
)
|
||||
cmd_push_tag = f"{GIT_PREFIX} push origin {self.release_tag}:{self.release_tag}"
|
||||
ShellRunner.run(cmd_push_tag, dry_run=dry_run)
|
||||
Shell.run(cmd_push_tag, dry_run=dry_run, check=True)
|
||||
|
||||
@staticmethod
|
||||
def _create_gh_label(label: str, color_hex: str, dry_run: bool) -> None:
|
||||
cmd = f"gh api repos/{GITHUB_REPOSITORY}/labels -f name={label} -f color={color_hex}"
|
||||
ShellRunner.run(cmd, dry_run=dry_run)
|
||||
Shell.run(cmd, dry_run=dry_run, check=True)
|
||||
|
||||
def push_new_release_branch(self, dry_run: bool) -> None:
|
||||
assert (
|
||||
@ -211,8 +257,8 @@ class ReleaseInfo:
|
||||
), f"Unexpected current version in git, must precede [{self.version}] by one step, actual [{version.string}]"
|
||||
if dry_run:
|
||||
# remove locally created branch from prev run
|
||||
ShellRunner.run(
|
||||
f"{GIT_PREFIX} branch -l | grep -q {new_release_branch} && git branch -d {new_release_branch} ||:"
|
||||
Shell.run(
|
||||
f"{GIT_PREFIX} branch -l | grep -q {new_release_branch} && git branch -d {new_release_branch}"
|
||||
)
|
||||
print(
|
||||
f"Create and push new release branch [{new_release_branch}], commit [{self.commit_sha}]"
|
||||
@ -225,7 +271,7 @@ class ReleaseInfo:
|
||||
cmd_push_branch = (
|
||||
f"{GIT_PREFIX} push --set-upstream origin {new_release_branch}"
|
||||
)
|
||||
ShellRunner.run(cmd_push_branch, dry_run=dry_run)
|
||||
Shell.run(cmd_push_branch, dry_run=dry_run, check=True)
|
||||
|
||||
print("Create and push backport tags for new release branch")
|
||||
ReleaseInfo._create_gh_label(
|
||||
@ -234,12 +280,13 @@ class ReleaseInfo:
|
||||
ReleaseInfo._create_gh_label(
|
||||
f"v{new_release_branch}-affected", "c2bfff", dry_run=dry_run
|
||||
)
|
||||
ShellRunner.run(
|
||||
Shell.run(
|
||||
f"""gh pr create --repo {GITHUB_REPOSITORY} --title 'Release pull request for branch {new_release_branch}'
|
||||
--head {new_release_branch} {pr_labels}
|
||||
--body 'This PullRequest is a part of ClickHouse release cycle. It is used by CI system only. Do not perform any changes with it.'
|
||||
""",
|
||||
dry_run=dry_run,
|
||||
check=True,
|
||||
)
|
||||
|
||||
def update_version_and_contributors_list(self, dry_run: bool) -> None:
|
||||
@ -265,32 +312,52 @@ class ReleaseInfo:
|
||||
body_file = get_abs_path(".github/PULL_REQUEST_TEMPLATE.md")
|
||||
actor = os.getenv("GITHUB_ACTOR", "") or "me"
|
||||
cmd_create_pr = f"gh pr create --repo {GITHUB_REPOSITORY} --title 'Update version after release' --head {branch_upd_version_contributors} --base {self.release_branch} --body-file '{body_file} --label 'do not test' --assignee @{actor}"
|
||||
ShellRunner.run(cmd_commit_version_upd, dry_run=dry_run)
|
||||
ShellRunner.run(cmd_push_branch, dry_run=dry_run)
|
||||
ShellRunner.run(cmd_create_pr, dry_run=dry_run)
|
||||
Shell.run(cmd_commit_version_upd, check=True, dry_run=dry_run)
|
||||
Shell.run(cmd_push_branch, check=True, dry_run=dry_run)
|
||||
Shell.run(cmd_create_pr, check=True, dry_run=dry_run)
|
||||
if dry_run:
|
||||
ShellRunner.run(
|
||||
f"{GIT_PREFIX} diff '{CMAKE_PATH}' '{CONTRIBUTORS_PATH}'"
|
||||
)
|
||||
ShellRunner.run(
|
||||
Shell.run(f"{GIT_PREFIX} diff '{CMAKE_PATH}' '{CONTRIBUTORS_PATH}'")
|
||||
Shell.run(
|
||||
f"{GIT_PREFIX} checkout '{CMAKE_PATH}' '{CONTRIBUTORS_PATH}'"
|
||||
)
|
||||
self.version_bump_pr = GHActions.get_pr_url_by_branch(
|
||||
repo=GITHUB_REPOSITORY, branch=branch_upd_version_contributors
|
||||
)
|
||||
|
||||
def update_release_info(self, dry_run: bool) -> "ReleaseInfo":
|
||||
branch = f"auto/{release_info.release_tag}"
|
||||
if not dry_run:
|
||||
url = GHActions.get_pr_url_by_branch(repo=GITHUB_REPOSITORY, branch=branch)
|
||||
else:
|
||||
url = "dry-run"
|
||||
|
||||
print(f"ChangeLog PR url [{url}]")
|
||||
self.changelog_pr = url
|
||||
print(f"Release url [{url}]")
|
||||
self.release_url = (
|
||||
f"https://github.com/{GITHUB_REPOSITORY}/releases/tag/{self.release_tag}"
|
||||
)
|
||||
self.docker_command = f"docker run --rm clickhouse/clickhouse:{self.release_branch} clickhouse --version"
|
||||
self.dump()
|
||||
return self
|
||||
|
||||
def create_gh_release(self, packages_files: List[str], dry_run: bool) -> None:
|
||||
repo = os.getenv("GITHUB_REPOSITORY")
|
||||
assert repo
|
||||
cmds = []
|
||||
cmds.append(
|
||||
cmds = [
|
||||
f"gh release create --repo {repo} --title 'Release {self.release_tag}' {self.release_tag}"
|
||||
)
|
||||
]
|
||||
for file in packages_files:
|
||||
cmds.append(f"gh release upload {self.release_tag} {file}")
|
||||
if not dry_run:
|
||||
for cmd in cmds:
|
||||
ShellRunner.run(cmd)
|
||||
Shell.run(cmd, check=True)
|
||||
self.release_url = f"https://github.com/{GITHUB_REPOSITORY}/releases/tag/{self.release_tag}"
|
||||
else:
|
||||
print("Dry-run, would run commands:")
|
||||
print("\n * ".join(cmds))
|
||||
self.release_url = f"dry-run"
|
||||
self.dump()
|
||||
|
||||
|
||||
class RepoTypes:
|
||||
@ -350,7 +417,7 @@ class PackageDownloader:
|
||||
self.macos_package_files = ["clickhouse-macos", "clickhouse-macos-aarch64"]
|
||||
self.file_to_type = {}
|
||||
|
||||
ShellRunner.run(f"mkdir -p {self.LOCAL_DIR}")
|
||||
Shell.run(f"mkdir -p {self.LOCAL_DIR}")
|
||||
|
||||
for package_type in self.PACKAGE_TYPES:
|
||||
for package in self.package_names:
|
||||
@ -400,7 +467,7 @@ class PackageDownloader:
|
||||
return res
|
||||
|
||||
def run(self):
|
||||
ShellRunner.run(f"rm -rf {self.LOCAL_DIR}/*")
|
||||
Shell.run(f"rm -rf {self.LOCAL_DIR}/*")
|
||||
for package_file in (
|
||||
self.deb_package_files + self.rpm_package_files + self.tgz_package_files
|
||||
):
|
||||
@ -473,6 +540,37 @@ class PackageDownloader:
|
||||
return True
|
||||
|
||||
|
||||
@contextmanager
|
||||
def checkout(ref: str) -> Iterator[None]:
|
||||
orig_ref = Shell.run(f"{GIT_PREFIX} symbolic-ref --short HEAD", check=True)
|
||||
rollback_cmd = f"{GIT_PREFIX} checkout {orig_ref}"
|
||||
assert orig_ref
|
||||
if ref not in (orig_ref,):
|
||||
Shell.run(f"{GIT_PREFIX} checkout {ref}")
|
||||
try:
|
||||
yield
|
||||
except (Exception, KeyboardInterrupt) as e:
|
||||
print(f"ERROR: Exception [{e}]")
|
||||
Shell.run(rollback_cmd)
|
||||
raise
|
||||
Shell.run(rollback_cmd)
|
||||
|
||||
|
||||
@contextmanager
|
||||
def checkout_new(ref: str) -> Iterator[None]:
|
||||
orig_ref = Shell.run(f"{GIT_PREFIX} symbolic-ref --short HEAD", check=True)
|
||||
rollback_cmd = f"{GIT_PREFIX} checkout {orig_ref}"
|
||||
assert orig_ref
|
||||
Shell.run(f"{GIT_PREFIX} checkout -b {ref}", check=True)
|
||||
try:
|
||||
yield
|
||||
except (Exception, KeyboardInterrupt) as e:
|
||||
print(f"ERROR: Exception [{e}]")
|
||||
Shell.run(rollback_cmd)
|
||||
raise
|
||||
Shell.run(rollback_cmd)
|
||||
|
||||
|
||||
def parse_args() -> argparse.Namespace:
|
||||
parser = argparse.ArgumentParser(
|
||||
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
|
||||
@ -508,6 +606,11 @@ def parse_args() -> argparse.Namespace:
|
||||
action="store_true",
|
||||
help="Create GH Release object and attach all packages",
|
||||
)
|
||||
parser.add_argument(
|
||||
"--post-status",
|
||||
action="store_true",
|
||||
help="Post release status into Slack",
|
||||
)
|
||||
parser.add_argument(
|
||||
"--ref",
|
||||
type=str,
|
||||
@ -526,55 +629,25 @@ def parse_args() -> argparse.Namespace:
|
||||
help="do not make any actual changes in the repo, just show what will be done",
|
||||
)
|
||||
parser.add_argument(
|
||||
"--outfile",
|
||||
default="",
|
||||
type=str,
|
||||
help="output file to write json result to, if not set - stdout",
|
||||
"--set-progress-started",
|
||||
action="store_true",
|
||||
help="Set new progress step, --progress <PROGRESS STEP> must be set",
|
||||
)
|
||||
parser.add_argument(
|
||||
"--infile",
|
||||
default="",
|
||||
"--progress",
|
||||
type=str,
|
||||
help="input file with release info",
|
||||
help="Progress step name, see @ReleaseProgress",
|
||||
)
|
||||
parser.add_argument(
|
||||
"--set-progress-completed",
|
||||
action="store_true",
|
||||
help="Set current progress step to OK (completed)",
|
||||
)
|
||||
|
||||
return parser.parse_args()
|
||||
|
||||
|
||||
@contextmanager
|
||||
def checkout(ref: str) -> Iterator[None]:
|
||||
_, orig_ref = ShellRunner.run(f"{GIT_PREFIX} symbolic-ref --short HEAD")
|
||||
rollback_cmd = f"{GIT_PREFIX} checkout {orig_ref}"
|
||||
assert orig_ref
|
||||
if ref not in (orig_ref,):
|
||||
ShellRunner.run(f"{GIT_PREFIX} checkout {ref}")
|
||||
try:
|
||||
yield
|
||||
except (Exception, KeyboardInterrupt) as e:
|
||||
print(f"ERROR: Exception [{e}]")
|
||||
ShellRunner.run(rollback_cmd)
|
||||
raise
|
||||
ShellRunner.run(rollback_cmd)
|
||||
|
||||
|
||||
@contextmanager
|
||||
def checkout_new(ref: str) -> Iterator[None]:
|
||||
_, orig_ref = ShellRunner.run(f"{GIT_PREFIX} symbolic-ref --short HEAD")
|
||||
rollback_cmd = f"{GIT_PREFIX} checkout {orig_ref}"
|
||||
assert orig_ref
|
||||
ShellRunner.run(f"{GIT_PREFIX} checkout -b {ref}")
|
||||
try:
|
||||
yield
|
||||
except (Exception, KeyboardInterrupt) as e:
|
||||
print(f"ERROR: Exception [{e}]")
|
||||
ShellRunner.run(rollback_cmd)
|
||||
raise
|
||||
ShellRunner.run(rollback_cmd)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
args = parse_args()
|
||||
assert args.dry_run
|
||||
|
||||
# prepare ssh for git if needed
|
||||
_ssh_agent = None
|
||||
@ -586,43 +659,82 @@ if __name__ == "__main__":
|
||||
_ssh_agent.print_keys()
|
||||
|
||||
if args.prepare_release_info:
|
||||
with ReleaseContextManager(
|
||||
release_progress=ReleaseProgress.STARTED
|
||||
) as release_info:
|
||||
assert (
|
||||
args.ref and args.release_type and args.outfile
|
||||
), "--ref, --release-type and --outfile must be provided with --prepare-release-info"
|
||||
ReleaseInfo.prepare(
|
||||
commit_ref=args.ref, release_type=args.release_type, outfile=args.outfile
|
||||
)
|
||||
if args.push_release_tag:
|
||||
assert args.infile, "--infile <release info file path> must be provided"
|
||||
release_info = ReleaseInfo.from_file(args.infile)
|
||||
release_info.push_release_tag(dry_run=args.dry_run)
|
||||
if args.push_new_release_branch:
|
||||
assert args.infile, "--infile <release info file path> must be provided"
|
||||
release_info = ReleaseInfo.from_file(args.infile)
|
||||
release_info.push_new_release_branch(dry_run=args.dry_run)
|
||||
if args.create_bump_version_pr:
|
||||
# TODO: store link to PR in release info
|
||||
assert args.infile, "--infile <release info file path> must be provided"
|
||||
release_info = ReleaseInfo.from_file(args.infile)
|
||||
release_info.update_version_and_contributors_list(dry_run=args.dry_run)
|
||||
args.ref and args.release_type
|
||||
), "--ref and --release-type must be provided with --prepare-release-info"
|
||||
release_info.prepare(commit_ref=args.ref, release_type=args.release_type)
|
||||
|
||||
if args.download_packages:
|
||||
assert args.infile, "--infile <release info file path> must be provided"
|
||||
release_info = ReleaseInfo.from_file(args.infile)
|
||||
with ReleaseContextManager(
|
||||
release_progress=ReleaseProgress.DOWNLOAD_PACKAGES
|
||||
) as release_info:
|
||||
p = PackageDownloader(
|
||||
release=release_info.release_branch,
|
||||
commit_sha=release_info.commit_sha,
|
||||
version=release_info.version,
|
||||
)
|
||||
p.run()
|
||||
|
||||
if args.push_release_tag:
|
||||
with ReleaseContextManager(
|
||||
release_progress=ReleaseProgress.PUSH_RELEASE_TAG
|
||||
) as release_info:
|
||||
release_info.push_release_tag(dry_run=args.dry_run)
|
||||
|
||||
if args.push_new_release_branch:
|
||||
with ReleaseContextManager(
|
||||
release_progress=ReleaseProgress.PUSH_NEW_RELEASE_BRANCH
|
||||
) as release_info:
|
||||
release_info.push_new_release_branch(dry_run=args.dry_run)
|
||||
|
||||
if args.create_bump_version_pr:
|
||||
with ReleaseContextManager(
|
||||
release_progress=ReleaseProgress.BUMP_VERSION
|
||||
) as release_info:
|
||||
release_info.update_version_and_contributors_list(dry_run=args.dry_run)
|
||||
|
||||
if args.create_gh_release:
|
||||
assert args.infile, "--infile <release info file path> must be provided"
|
||||
release_info = ReleaseInfo.from_file(args.infile)
|
||||
with ReleaseContextManager(
|
||||
release_progress=ReleaseProgress.CREATE_GH_RELEASE
|
||||
) as release_info:
|
||||
p = PackageDownloader(
|
||||
release=release_info.release_branch,
|
||||
commit_sha=release_info.commit_sha,
|
||||
version=release_info.version,
|
||||
)
|
||||
release_info.create_gh_release(p.get_all_packages_files(), args.dry_run)
|
||||
release_info.create_gh_release(
|
||||
packages_files=p.get_all_packages_files(), dry_run=args.dry_run
|
||||
)
|
||||
|
||||
if args.post_status:
|
||||
release_info = ReleaseInfo.from_file()
|
||||
release_info.update_release_info(dry_run=args.dry_run)
|
||||
if release_info.debian_command:
|
||||
CIBuddy(dry_run=args.dry_run).post_done(
|
||||
f"New release issued", dataclasses.asdict(release_info)
|
||||
)
|
||||
else:
|
||||
CIBuddy(dry_run=args.dry_run).post_critical(
|
||||
f"Failed to issue new release", dataclasses.asdict(release_info)
|
||||
)
|
||||
|
||||
if args.set_progress_started:
|
||||
ri = ReleaseInfo.from_file()
|
||||
ri.release_progress = args.progress
|
||||
ri.progress_description = ReleaseProgressDescription.FAILED
|
||||
ri.dump()
|
||||
assert args.progress, "Progress step name must be provided"
|
||||
|
||||
if args.set_progress_completed:
|
||||
ri = ReleaseInfo.from_file()
|
||||
assert (
|
||||
ri.progress_description == ReleaseProgressDescription.FAILED
|
||||
), "Must be FAILED before set to OK"
|
||||
ri.progress_description = ReleaseProgressDescription.OK
|
||||
ri.dump()
|
||||
|
||||
# tear down ssh
|
||||
if _ssh_agent and _key_pub:
|
||||
|
@ -254,11 +254,14 @@ def main():
|
||||
statuses = get_commit_filtered_statuses(commit)
|
||||
|
||||
has_failed_statuses = False
|
||||
has_native_failed_status = False
|
||||
for status in statuses:
|
||||
print(f"Check status [{status.context}], [{status.state}]")
|
||||
if CI.is_required(status.context) and status.state != SUCCESS:
|
||||
print(f"WARNING: Failed status [{status.context}], [{status.state}]")
|
||||
has_failed_statuses = True
|
||||
if status.context != CI.StatusNames.SYNC:
|
||||
has_native_failed_status = True
|
||||
|
||||
if args.wf_status == SUCCESS or has_failed_statuses:
|
||||
# set Mergeable check if workflow is successful (green)
|
||||
@ -280,7 +283,7 @@ def main():
|
||||
print(
|
||||
"Workflow failed but no failed statuses found (died runner?) - cannot set Mergeable Check status"
|
||||
)
|
||||
if args.wf_status == SUCCESS and not has_failed_statuses:
|
||||
if args.wf_status == SUCCESS and not has_native_failed_status:
|
||||
sys.exit(0)
|
||||
else:
|
||||
sys.exit(1)
|
||||
|
@ -296,9 +296,12 @@ class PRInfo:
|
||||
else:
|
||||
if "schedule" in github_event:
|
||||
self.event_type = EventType.SCHEDULE
|
||||
else:
|
||||
elif "inputs" in github_event:
|
||||
# assume this is a dispatch
|
||||
self.event_type = EventType.DISPATCH
|
||||
print("PR Info:")
|
||||
print(self)
|
||||
else:
|
||||
logging.warning(
|
||||
"event.json does not match pull_request or push:\n%s",
|
||||
json.dumps(github_event, sort_keys=True, indent=4),
|
||||
|
@ -587,11 +587,11 @@ class TestCIConfig(unittest.TestCase):
|
||||
for job, job_config in ci_cache.jobs_to_do.items():
|
||||
if job in MOCK_AFFECTED_JOBS:
|
||||
MOCK_REQUIRED_BUILDS += job_config.required_builds
|
||||
elif job not in MOCK_AFFECTED_JOBS:
|
||||
elif job not in MOCK_AFFECTED_JOBS and not job_config.disable_await:
|
||||
ci_cache.jobs_to_wait[job] = job_config
|
||||
|
||||
for job, job_config in ci_cache.jobs_to_do.items():
|
||||
if job_config.reference_job_name:
|
||||
if job_config.reference_job_name or job_config.disable_await:
|
||||
# jobs with reference_job_name in config are not supposed to have records in the cache - continue
|
||||
continue
|
||||
if job in MOCK_AFFECTED_JOBS:
|
||||
@ -624,11 +624,76 @@ class TestCIConfig(unittest.TestCase):
|
||||
+ MOCK_AFFECTED_JOBS
|
||||
+ MOCK_REQUIRED_BUILDS
|
||||
)
|
||||
self.assertTrue(
|
||||
CI.JobNames.BUILD_CHECK not in ci_cache.jobs_to_wait,
|
||||
"We must never await on Builds Report",
|
||||
)
|
||||
self.assertCountEqual(
|
||||
list(ci_cache.jobs_to_wait),
|
||||
[
|
||||
CI.JobNames.BUILD_CHECK,
|
||||
]
|
||||
+ MOCK_REQUIRED_BUILDS,
|
||||
MOCK_REQUIRED_BUILDS,
|
||||
)
|
||||
self.assertCountEqual(list(ci_cache.jobs_to_do), expected_to_do)
|
||||
|
||||
def test_ci_py_filters_not_affected_jobs_in_prs_no_builds(self):
|
||||
"""
|
||||
checks ci.py filters not affected jobs in PRs, no builds required
|
||||
"""
|
||||
settings = CiSettings()
|
||||
settings.no_ci_cache = True
|
||||
pr_info = PRInfo(github_event=_TEST_EVENT_JSON)
|
||||
pr_info.event_type = EventType.PULL_REQUEST
|
||||
pr_info.number = 123
|
||||
assert pr_info.is_pr
|
||||
ci_cache = CIPY._configure_jobs(
|
||||
S3Helper(), pr_info, settings, skip_jobs=False, dry_run=True
|
||||
)
|
||||
self.assertTrue(not ci_cache.jobs_to_skip, "Must be no jobs in skip list")
|
||||
assert not ci_cache.jobs_to_wait
|
||||
assert not ci_cache.jobs_to_skip
|
||||
|
||||
MOCK_AFFECTED_JOBS = [
|
||||
CI.JobNames.FAST_TEST,
|
||||
]
|
||||
MOCK_REQUIRED_BUILDS = []
|
||||
|
||||
# pretend there are pending jobs that we need to wait
|
||||
for job, job_config in ci_cache.jobs_to_do.items():
|
||||
if job in MOCK_AFFECTED_JOBS:
|
||||
if job_config.required_builds:
|
||||
MOCK_REQUIRED_BUILDS += job_config.required_builds
|
||||
elif job not in MOCK_AFFECTED_JOBS and not job_config.disable_await:
|
||||
ci_cache.jobs_to_wait[job] = job_config
|
||||
|
||||
for job, job_config in ci_cache.jobs_to_do.items():
|
||||
if job_config.reference_job_name or job_config.disable_await:
|
||||
# jobs with reference_job_name in config are not supposed to have records in the cache - continue
|
||||
continue
|
||||
if job in MOCK_AFFECTED_JOBS:
|
||||
continue
|
||||
for batch in range(job_config.num_batches):
|
||||
# add any record into cache
|
||||
record = CiCache.Record(
|
||||
record_type=random.choice(
|
||||
[
|
||||
CiCache.RecordType.FAILED,
|
||||
CiCache.RecordType.PENDING,
|
||||
CiCache.RecordType.SUCCESSFUL,
|
||||
]
|
||||
),
|
||||
job_name=job,
|
||||
job_digest=ci_cache.job_digests[job],
|
||||
batch=batch,
|
||||
num_batches=job_config.num_batches,
|
||||
release_branch=True,
|
||||
)
|
||||
for record_t_, records_ in ci_cache.records.items():
|
||||
if record_t_.value == CiCache.RecordType.FAILED.value:
|
||||
records_[record.to_str_key()] = record
|
||||
|
||||
ci_cache.filter_out_not_affected_jobs()
|
||||
expected_to_do = MOCK_AFFECTED_JOBS + MOCK_REQUIRED_BUILDS
|
||||
self.assertCountEqual(
|
||||
list(ci_cache.jobs_to_wait),
|
||||
MOCK_REQUIRED_BUILDS,
|
||||
)
|
||||
self.assertCountEqual(list(ci_cache.jobs_to_do), expected_to_do)
|
||||
|
@ -821,7 +821,10 @@ class SettingsRandomizer:
|
||||
get_localzone(),
|
||||
]
|
||||
),
|
||||
"prefer_warmed_unmerged_parts_seconds": lambda: random.randint(0, 10),
|
||||
# This setting affect part names and their content which can be read from tables in tests.
|
||||
# We have a lot of tests which relies on part names, so it's very unsafe to enable randomization
|
||||
# of this setting
|
||||
# "prefer_warmed_unmerged_parts_seconds": lambda: random.randint(0, 10),
|
||||
"use_page_cache_for_disks_without_file_cache": lambda: random.random() < 0.7,
|
||||
"page_cache_inject_eviction": lambda: random.random() < 0.5,
|
||||
"merge_tree_read_split_ranges_into_intersecting_and_non_intersecting_injection_probability": lambda: round(
|
||||
@ -2168,7 +2171,10 @@ def run_tests_array(all_tests_with_params: Tuple[List[str], int, TestSuite, bool
|
||||
|
||||
while True:
|
||||
if all_tests:
|
||||
try:
|
||||
case = all_tests.pop(0)
|
||||
except IndexError:
|
||||
break
|
||||
else:
|
||||
break
|
||||
|
||||
@ -2474,18 +2480,16 @@ def do_run_tests(jobs, test_suite: TestSuite):
|
||||
# of failures will be nearly the same for all tests from the group.
|
||||
random.shuffle(test_suite.parallel_tests)
|
||||
|
||||
batch_size = max(1, (len(test_suite.parallel_tests) // jobs) + 1)
|
||||
parallel_tests_array = []
|
||||
for job in range(jobs):
|
||||
range_ = job * batch_size, job * batch_size + batch_size
|
||||
batch = test_suite.parallel_tests[range_[0] : range_[1]]
|
||||
parallel_tests_array.append((batch, batch_size, test_suite, True))
|
||||
batch_size = len(test_suite.parallel_tests) // jobs
|
||||
manager = multiprocessing.Manager()
|
||||
parallel_tests = manager.list()
|
||||
parallel_tests.extend(test_suite.parallel_tests)
|
||||
|
||||
processes = []
|
||||
|
||||
for test_batch in parallel_tests_array:
|
||||
for _ in range(jobs):
|
||||
process = multiprocessing.Process(
|
||||
target=run_tests_process, args=(test_batch,)
|
||||
target=run_tests_process,
|
||||
args=((parallel_tests, batch_size, test_suite, True),),
|
||||
)
|
||||
processes.append(process)
|
||||
process.start()
|
||||
|
@ -208,13 +208,21 @@ def test_merge_tree_custom_disk_setting(start_cluster):
|
||||
secret_access_key='minio123');
|
||||
"""
|
||||
)
|
||||
count = len(list(minio.list_objects(cluster.minio_bucket, "data/", recursive=True)))
|
||||
|
||||
list1 = list(minio.list_objects(cluster.minio_bucket, "data/", recursive=True))
|
||||
count1 = len(list1)
|
||||
|
||||
node1.query(f"INSERT INTO {TABLE_NAME}_3 SELECT number FROM numbers(100)")
|
||||
assert int(node1.query(f"SELECT count() FROM {TABLE_NAME}_3")) == 100
|
||||
assert (
|
||||
len(list(minio.list_objects(cluster.minio_bucket, "data/", recursive=True)))
|
||||
== count
|
||||
)
|
||||
|
||||
list2 = list(minio.list_objects(cluster.minio_bucket, "data/", recursive=True))
|
||||
count2 = len(list2)
|
||||
|
||||
if count1 != count2:
|
||||
print("list1: ", list1)
|
||||
print("list2: ", list2)
|
||||
|
||||
assert count1 == count2
|
||||
assert (
|
||||
len(list(minio.list_objects(cluster.minio_bucket, "data2/", recursive=True)))
|
||||
> 0
|
||||
|
@ -2220,13 +2220,11 @@ def test_rabbitmq_commit_on_block_write(rabbitmq_cluster):
|
||||
|
||||
|
||||
def test_rabbitmq_no_connection_at_startup_1(rabbitmq_cluster):
|
||||
# no connection when table is initialized
|
||||
rabbitmq_cluster.pause_container("rabbitmq1")
|
||||
instance.query_and_get_error(
|
||||
error = instance.query_and_get_error(
|
||||
"""
|
||||
CREATE TABLE test.cs (key UInt64, value UInt64)
|
||||
ENGINE = RabbitMQ
|
||||
SETTINGS rabbitmq_host_port = 'rabbitmq1:5672',
|
||||
SETTINGS rabbitmq_host_port = 'no_connection_at_startup:5672',
|
||||
rabbitmq_exchange_name = 'cs',
|
||||
rabbitmq_format = 'JSONEachRow',
|
||||
rabbitmq_flush_interval_ms=1000,
|
||||
@ -2234,7 +2232,7 @@ def test_rabbitmq_no_connection_at_startup_1(rabbitmq_cluster):
|
||||
rabbitmq_row_delimiter = '\\n';
|
||||
"""
|
||||
)
|
||||
rabbitmq_cluster.unpause_container("rabbitmq1")
|
||||
assert "CANNOT_CONNECT_RABBITMQ" in error
|
||||
|
||||
|
||||
def test_rabbitmq_no_connection_at_startup_2(rabbitmq_cluster):
|
||||
|
@ -10,8 +10,8 @@
|
||||
PARTITION BY toYYYYMM(d) ORDER BY key
|
||||
</create_query>
|
||||
|
||||
<fill_query>INSERT INTO optimized_select_final SELECT toDate('2000-01-01'), 2*number, randomPrintableASCII(1000) FROM numbers(5000000)</fill_query>
|
||||
<fill_query>INSERT INTO optimized_select_final SELECT toDate('2020-01-01'), 2*number+1, randomPrintableASCII(1000) FROM numbers(5000000)</fill_query>
|
||||
<fill_query>INSERT INTO optimized_select_final SELECT toDate('2000-01-01'), 2*number, randomPrintableASCII(1000) FROM numbers(2500000)</fill_query>
|
||||
<fill_query>INSERT INTO optimized_select_final SELECT toDate('2020-01-01'), 2*number+1, randomPrintableASCII(1000) FROM numbers(2500000)</fill_query>
|
||||
|
||||
<query>SELECT * FROM optimized_select_final FINAL FORMAT Null SETTINGS max_threads = 8</query>
|
||||
<query>SELECT * FROM optimized_select_final FINAL WHERE key % 10 = 0 FORMAT Null</query>
|
||||
|
@ -1,5 +1,5 @@
|
||||
<test>
|
||||
<query>with 'Many years later as he faced the firing squad, Colonel Aureliano Buendia was to remember that distant afternoon when his father took him to discover ice.' as s select splitByChar(' ', materialize(s)) as w from numbers(1000000)</query>
|
||||
<query>with 'Many years later as he faced the firing squad, Colonel Aureliano Buendia was to remember that distant afternoon when his father took him to discover ice.' as s select splitByRegexp(' ', materialize(s)) as w from numbers(1000000)</query>
|
||||
<query>with 'Many years later as he faced the firing squad, Colonel Aureliano Buendia was to remember that distant afternoon when his father took him to discover ice.' as s select splitByRegexp('\s+', materialize(s)) as w from numbers(100000)</query>
|
||||
<query>with 'Many years later as he faced the firing squad, Colonel Aureliano Buendia was to remember that distant afternoon when his father took him to discover ice.' as s select splitByRegexp(' ', materialize(s)) as w from numbers(200000)</query>
|
||||
<query>with 'Many years later as he faced the firing squad, Colonel Aureliano Buendia was to remember that distant afternoon when his father took him to discover ice.' as s select splitByRegexp('\s+', materialize(s)) as w from numbers(20000)</query>
|
||||
</test>
|
||||
|
@ -24,10 +24,10 @@
|
||||
<min_insert_block_size_rows>1</min_insert_block_size_rows>
|
||||
</settings>
|
||||
|
||||
<!-- 100 parts -->
|
||||
<query>INSERT INTO hits_wide(UserID) SELECT rand() FROM numbers(100)</query>
|
||||
<query>INSERT INTO hits_compact(UserID) SELECT rand() FROM numbers(1000)</query>
|
||||
<query>INSERT INTO hits_buffer(UserID) SELECT rand() FROM numbers(100)</query>
|
||||
<!-- 50 parts -->
|
||||
<query>INSERT INTO hits_wide(UserID) SELECT rand() FROM numbers(50)</query>
|
||||
<query>INSERT INTO hits_compact(UserID) SELECT rand() FROM numbers(500)</query>
|
||||
<query>INSERT INTO hits_buffer(UserID) SELECT rand() FROM numbers(50)</query>
|
||||
|
||||
<drop_query>DROP TABLE IF EXISTS hits_wide</drop_query>
|
||||
<drop_query>DROP TABLE IF EXISTS hits_compact</drop_query>
|
||||
|
@ -555,7 +555,7 @@ if args.report == "main":
|
||||
"Total client time for measured query runs, s", # 2
|
||||
"Queries", # 3
|
||||
"Longest query, total for measured runs, s", # 4
|
||||
"Wall clock time per query, s", # 5
|
||||
"Average query wall clock time, s", # 5
|
||||
"Shortest query, total for measured runs, s", # 6
|
||||
"", # Runs #7
|
||||
]
|
||||
|
@ -8,13 +8,13 @@
|
||||
40
|
||||
41
|
||||
|
||||
0
|
||||
41
|
||||
2 42
|
||||
|
||||
2 42
|
||||
43
|
||||
|
||||
0
|
||||
43
|
||||
11
|
||||
|
||||
11
|
||||
|
@ -12,10 +12,10 @@ ORDER BY (primary_key);
|
||||
|
||||
INSERT INTO set_array
|
||||
select
|
||||
toString(intDiv(number, 1000000)) as primary_key,
|
||||
toString(intDiv(number, 100000)) as primary_key,
|
||||
array(number) as index_array
|
||||
from system.numbers
|
||||
limit 10000000;
|
||||
limit 1000000;
|
||||
|
||||
OPTIMIZE TABLE set_array FINAL;
|
||||
|
||||
|
@ -1,3 +1,4 @@
|
||||
-- Tags: no-parallel
|
||||
|
||||
create table mut (n int) engine=ReplicatedMergeTree('/test/02440/{database}/mut', '1') order by tuple();
|
||||
set insert_keeper_fault_injection_probability=0;
|
||||
|
@ -1,4 +1,5 @@
|
||||
#!/usr/bin/env bash
|
||||
# Tags: no-parallel
|
||||
|
||||
CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)
|
||||
# shellcheck source=../shell_config.sh
|
||||
|
@ -1,3 +1,3 @@
|
||||
Parquet
|
||||
e76a749f346078a6a43e0cbd25f0d18a -
|
||||
3249508141921544766
|
||||
400
|
||||
|
@ -1,5 +1,6 @@
|
||||
#!/usr/bin/env bash
|
||||
# Tags: no-ubsan, no-fasttest
|
||||
# Tags: long, no-ubsan, no-fasttest, no-parallel, no-asan, no-msan, no-tsan
|
||||
# This test requires around 10 GB of memory and it is just too much.
|
||||
|
||||
CUR_DIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)
|
||||
# shellcheck source=../shell_config.sh
|
||||
@ -121,9 +122,12 @@ echo "Parquet"
|
||||
#}
|
||||
|
||||
DATA_FILE=$CUR_DIR/data_parquet/string_int_list_inconsistent_offset_multiple_batches.parquet
|
||||
${CLICKHOUSE_CLIENT} --query="DROP TABLE IF EXISTS parquet_load"
|
||||
${CLICKHOUSE_CLIENT} --query="CREATE TABLE parquet_load (ints Array(Int64), strings Nullable(String)) ENGINE = Memory"
|
||||
cat "$DATA_FILE" | ${CLICKHOUSE_CLIENT} -q "INSERT INTO parquet_load FORMAT Parquet"
|
||||
${CLICKHOUSE_CLIENT} --query="SELECT * FROM parquet_load" | md5sum
|
||||
${CLICKHOUSE_CLIENT} --query="SELECT count() FROM parquet_load"
|
||||
${CLICKHOUSE_CLIENT} --query="drop table parquet_load"
|
||||
|
||||
${CLICKHOUSE_LOCAL} --multiquery "
|
||||
DROP TABLE IF EXISTS parquet_load;
|
||||
CREATE TABLE parquet_load (ints Array(Int64), strings Nullable(String)) ENGINE = Memory;
|
||||
INSERT INTO parquet_load FROM INFILE '$DATA_FILE';
|
||||
SELECT sum(cityHash64(*)) FROM parquet_load;
|
||||
SELECT count() FROM parquet_load;
|
||||
DROP TABLE parquet_load;
|
||||
"
|
||||
|
@ -1,3 +1,6 @@
|
||||
-- Tags: no-random-merge-tree-settings, no-random-settings
|
||||
-- Because we compare part sizes, and they could be affected by index granularity and index compression settings.
|
||||
|
||||
CREATE TABLE part_log_bytes_uncompressed (
|
||||
key UInt8,
|
||||
value UInt8
|
||||
@ -17,7 +20,8 @@ ALTER TABLE part_log_bytes_uncompressed DROP PART 'all_4_4_0' SETTINGS mutations
|
||||
|
||||
SYSTEM FLUSH LOGS;
|
||||
|
||||
SELECT event_type, table, part_name, bytes_uncompressed > 0, size_in_bytes < bytes_uncompressed FROM system.part_log
|
||||
SELECT event_type, table, part_name, bytes_uncompressed > 0, (bytes_uncompressed > 0 ? (size_in_bytes < bytes_uncompressed ? '1' : toString((size_in_bytes, bytes_uncompressed))) : '0')
|
||||
FROM system.part_log
|
||||
WHERE event_date >= yesterday() AND database = currentDatabase() AND table = 'part_log_bytes_uncompressed'
|
||||
AND (event_type != 'RemovePart' OR part_name = 'all_4_4_0') -- ignore removal of other parts
|
||||
ORDER BY part_name, event_type;
|
||||
|
@ -0,0 +1,55 @@
|
||||
-- { echoOn }
|
||||
SELECT concatWithSeparator('.', toUInt128(6), '666' as b, materialize(toLowCardinality(8)))
|
||||
FROM system.one
|
||||
GROUP BY '666';
|
||||
6.666.8
|
||||
SELECT concatWithSeparator('.', toUInt128(6), '666' as b, materialize(toLowCardinality(8)))
|
||||
FROM remote('127.0.0.{1,1}', 'system.one')
|
||||
GROUP BY '666';
|
||||
6.666.8
|
||||
-- https://github.com/ClickHouse/ClickHouse/issues/63006
|
||||
SELECT
|
||||
6,
|
||||
concat(' World', toUInt128(6), 6, 6, 6, toNullable(6), materialize(toLowCardinality(toNullable(toUInt128(6))))) AS a,
|
||||
concat(concat(' World', 6, toLowCardinality(6), ' World', toUInt256(6), materialize(6), 6, toNullable(6), 6, 6, NULL, 6, 6), ' World', 6, 6, 6, 6, toUInt256(6), NULL, 6, 6) AS b
|
||||
FROM system.one
|
||||
GROUP BY toNullable(6)
|
||||
WITH ROLLUP
|
||||
WITH TOTALS;
|
||||
6 World666666 \N
|
||||
6 World666666 \N
|
||||
|
||||
6 World666666 \N
|
||||
SELECT
|
||||
6,
|
||||
concat(' World', toUInt128(6), 6, 6, 6, toNullable(6), materialize(toLowCardinality(toNullable(toUInt128(6))))) AS a,
|
||||
concat(concat(' World', 6, toLowCardinality(6), ' World', toUInt256(6), materialize(6), 6, toNullable(6), 6, 6, NULL, 6, 6), ' World', 6, 6, 6, 6, toUInt256(6), NULL, 6, 6) AS b
|
||||
FROM remote('127.0.0.1')
|
||||
GROUP BY toNullable(6)
|
||||
WITH ROLLUP
|
||||
WITH TOTALS;
|
||||
6 World666666 \N
|
||||
6 World666666 \N
|
||||
|
||||
6 World666666 \N
|
||||
-- { echoOn }
|
||||
SELECT
|
||||
'%',
|
||||
tuple(concat('%', 1, toLowCardinality(toLowCardinality(toNullable(materialize(1)))), currentDatabase(), 101., toNullable(13), '%AS%id_02%', toNullable(toNullable(10)), toLowCardinality(toNullable(10)), 10, 10)),
|
||||
(toDecimal128(99.67, 6), 36, 61, 14)
|
||||
FROM dist_03174
|
||||
WHERE dummy IN (0, '255')
|
||||
GROUP BY
|
||||
toNullable(13),
|
||||
(99.67, 61, toLowCardinality(14));
|
||||
% ('%11default10113%AS%id_02%10101010') (99.67,36,61,14)
|
||||
-- { echoOn }
|
||||
SELECT
|
||||
38,
|
||||
concat(position(concat(concat(position(concat(toUInt256(3)), 'ca', 2), 3), NULLIF(1, materialize(toLowCardinality(1)))), toLowCardinality(toNullable('ca'))), concat(NULLIF(1, 1), concat(3), toNullable(3)))
|
||||
FROM set_index_not__fuzz_0
|
||||
GROUP BY
|
||||
toNullable(3),
|
||||
concat(concat(CAST(NULL, 'Nullable(Int8)'), toNullable(3)))
|
||||
FORMAT Null
|
||||
SETTINGS max_threads = 1, allow_experimental_analyzer = 1, cluster_for_parallel_replicas = 'parallel_replicas', max_parallel_replicas = 3, allow_experimental_parallel_reading_from_replicas = 1, parallel_replicas_for_non_replicated_merge_tree = 1, max_threads = 1;
|
@ -0,0 +1,80 @@
|
||||
-- There are various tests that check that group by keys don't propagate into functions replacing const arguments
|
||||
-- by full (empty) columns
|
||||
|
||||
DROP TABLE IF EXISTS dist_03174;
|
||||
DROP TABLE IF EXISTS set_index_not__fuzz_0;
|
||||
|
||||
-- https://github.com/ClickHouse/ClickHouse/issues/63006
|
||||
|
||||
SET allow_experimental_analyzer=1;
|
||||
|
||||
-- { echoOn }
|
||||
SELECT concatWithSeparator('.', toUInt128(6), '666' as b, materialize(toLowCardinality(8)))
|
||||
FROM system.one
|
||||
GROUP BY '666';
|
||||
|
||||
SELECT concatWithSeparator('.', toUInt128(6), '666' as b, materialize(toLowCardinality(8)))
|
||||
FROM remote('127.0.0.{1,1}', 'system.one')
|
||||
GROUP BY '666';
|
||||
|
||||
-- https://github.com/ClickHouse/ClickHouse/issues/63006
|
||||
SELECT
|
||||
6,
|
||||
concat(' World', toUInt128(6), 6, 6, 6, toNullable(6), materialize(toLowCardinality(toNullable(toUInt128(6))))) AS a,
|
||||
concat(concat(' World', 6, toLowCardinality(6), ' World', toUInt256(6), materialize(6), 6, toNullable(6), 6, 6, NULL, 6, 6), ' World', 6, 6, 6, 6, toUInt256(6), NULL, 6, 6) AS b
|
||||
FROM system.one
|
||||
GROUP BY toNullable(6)
|
||||
WITH ROLLUP
|
||||
WITH TOTALS;
|
||||
|
||||
SELECT
|
||||
6,
|
||||
concat(' World', toUInt128(6), 6, 6, 6, toNullable(6), materialize(toLowCardinality(toNullable(toUInt128(6))))) AS a,
|
||||
concat(concat(' World', 6, toLowCardinality(6), ' World', toUInt256(6), materialize(6), 6, toNullable(6), 6, 6, NULL, 6, 6), ' World', 6, 6, 6, 6, toUInt256(6), NULL, 6, 6) AS b
|
||||
FROM remote('127.0.0.1')
|
||||
GROUP BY toNullable(6)
|
||||
WITH ROLLUP
|
||||
WITH TOTALS;
|
||||
|
||||
-- https://github.com/ClickHouse/ClickHouse/issues/64945
|
||||
-- { echoOff }
|
||||
CREATE TABLE dist_03174 AS system.one ENGINE = Distributed(test_cluster_two_shards, system, one, dummy);
|
||||
|
||||
-- { echoOn }
|
||||
SELECT
|
||||
'%',
|
||||
tuple(concat('%', 1, toLowCardinality(toLowCardinality(toNullable(materialize(1)))), currentDatabase(), 101., toNullable(13), '%AS%id_02%', toNullable(toNullable(10)), toLowCardinality(toNullable(10)), 10, 10)),
|
||||
(toDecimal128(99.67, 6), 36, 61, 14)
|
||||
FROM dist_03174
|
||||
WHERE dummy IN (0, '255')
|
||||
GROUP BY
|
||||
toNullable(13),
|
||||
(99.67, 61, toLowCardinality(14));
|
||||
|
||||
-- Parallel replicas
|
||||
-- { echoOff }
|
||||
CREATE TABLE set_index_not__fuzz_0
|
||||
(
|
||||
`name` String,
|
||||
`status` Enum8('alive' = 0, 'rip' = 1),
|
||||
INDEX idx_status status TYPE set(2) GRANULARITY 1
|
||||
)
|
||||
ENGINE = MergeTree()
|
||||
ORDER BY name;
|
||||
|
||||
INSERT INTO set_index_not__fuzz_0 SELECT * FROM generateRandom() LIMIT 10;
|
||||
|
||||
-- { echoOn }
|
||||
SELECT
|
||||
38,
|
||||
concat(position(concat(concat(position(concat(toUInt256(3)), 'ca', 2), 3), NULLIF(1, materialize(toLowCardinality(1)))), toLowCardinality(toNullable('ca'))), concat(NULLIF(1, 1), concat(3), toNullable(3)))
|
||||
FROM set_index_not__fuzz_0
|
||||
GROUP BY
|
||||
toNullable(3),
|
||||
concat(concat(CAST(NULL, 'Nullable(Int8)'), toNullable(3)))
|
||||
FORMAT Null
|
||||
SETTINGS max_threads = 1, allow_experimental_analyzer = 1, cluster_for_parallel_replicas = 'parallel_replicas', max_parallel_replicas = 3, allow_experimental_parallel_reading_from_replicas = 1, parallel_replicas_for_non_replicated_merge_tree = 1, max_threads = 1;
|
||||
|
||||
-- { echoOff }
|
||||
DROP TABLE IF EXISTS dist_03174;
|
||||
DROP TABLE IF EXISTS set_index_not__fuzz_0;
|
@ -0,0 +1 @@
|
||||
(1,1)
|
1
tests/queries/0_stateless/03204_index_hint_fuzzer.sql
Normal file
1
tests/queries/0_stateless/03204_index_hint_fuzzer.sql
Normal file
@ -0,0 +1 @@
|
||||
SELECT tuple(indexHint(toLowCardinality('aaa')), 1);
|
@ -1,2 +1,5 @@
|
||||
1 nan 1048575 2
|
||||
1 1 1 1 1
|
||||
() 1 nan 1048575 2
|
||||
|
||||
() 1 nan 1048575 2
|
||||
|
@ -5,3 +5,5 @@ SET join_algorithm='hash';
|
||||
SET allow_experimental_join_condition=1;
|
||||
SELECT * FROM ( SELECT 1 AS a, toLowCardinality(1), 1) AS t1 CROSS JOIN (SELECT toLowCardinality(1 AS a), 1 AS b) AS t2;
|
||||
|
||||
|
||||
SELECT * FROM (SELECT tuple(), 1 GROUP BY greatCircleAngle(toNullable(1048575), 257, toInt128(-9223372036854775808), materialize(1048576)) WITH TOTALS) AS t, (SELECT greatCircleAngle(toUInt256(1048575), 257, toNullable(-9223372036854775808), 1048576), 1048575, 2) AS u
|
||||
|
@ -0,0 +1,10 @@
|
||||
false 1 1
|
||||
true 1 1
|
||||
---
|
||||
false 1 1
|
||||
false 1 2
|
||||
false 1 3
|
||||
true 1 1
|
||||
true 1 2
|
||||
---
|
||||
-755809149 0
|
@ -0,0 +1,33 @@
|
||||
create table t(c Int32, d Bool) Engine=MergeTree order by c;
|
||||
system stop merges t;
|
||||
|
||||
insert into t values (1, 0);
|
||||
insert into t values (1, 0);
|
||||
insert into t values (1, 1);
|
||||
insert into t values (1, 0)(1, 1);
|
||||
|
||||
SELECT d, c, row_number() over (partition by d order by c) as c8 FROM t qualify c8=1 order by d settings max_threads=2, allow_experimental_analyzer = 1;
|
||||
SELECT '---';
|
||||
SELECT d, c, row_number() over (partition by d order by c) as c8 FROM t order by d, c8 settings max_threads=2;
|
||||
SELECT '---';
|
||||
|
||||
drop table t;
|
||||
|
||||
create table t (
|
||||
c Int32 primary key ,
|
||||
s Bool ,
|
||||
w Float64
|
||||
);
|
||||
|
||||
system stop merges t;
|
||||
|
||||
insert into t values(439499072,true,0),(1393290072,true,0);
|
||||
insert into t values(-1317193174,false,0),(1929066636,false,0);
|
||||
insert into t values(-2,false,0),(1962246186,true,0),(2054878592,false,0);
|
||||
insert into t values(-1893563136,true,41.55);
|
||||
insert into t values(-1338380855,true,-0.7),(-991301833,true,0),(-755809149,false,43.18),(-41,true,0),(3,false,0),(255,false,0),(255,false,0),(189195893,false,0),(195550885,false,9223372036854776000);
|
||||
|
||||
SELECT * FROM (
|
||||
SELECT c, min(w) OVER (PARTITION BY s ORDER BY c ASC, s ASC, w ASC)
|
||||
FROM t limit toUInt64(-1))
|
||||
WHERE c = -755809149;
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user