mirror of
https://github.com/ClickHouse/ClickHouse.git
synced 2024-11-10 01:25:21 +00:00
Merge branch 'master' into better_limit_in_keeper
This commit is contained in:
commit
416ec9ba69
168
.github/actions/release/action.yml
vendored
Normal file
168
.github/actions/release/action.yml
vendored
Normal file
@ -0,0 +1,168 @@
|
||||
name: Release
|
||||
|
||||
description: Makes patch releases and creates new release branch
|
||||
|
||||
inputs:
|
||||
ref:
|
||||
description: 'Git reference (branch or commit sha) from which to create the release'
|
||||
required: true
|
||||
type: string
|
||||
type:
|
||||
description: 'The type of release: "new" for a new release or "patch" for a patch release'
|
||||
required: true
|
||||
type: choice
|
||||
options:
|
||||
- patch
|
||||
- new
|
||||
dry-run:
|
||||
description: 'Dry run'
|
||||
required: false
|
||||
default: true
|
||||
type: boolean
|
||||
token:
|
||||
required: true
|
||||
type: string
|
||||
|
||||
runs:
|
||||
using: "composite"
|
||||
steps:
|
||||
- name: Prepare Release Info
|
||||
shell: bash
|
||||
run: |
|
||||
python3 ./tests/ci/create_release.py --prepare-release-info \
|
||||
--ref ${{ inputs.ref }} --release-type ${{ inputs.type }} \
|
||||
${{ inputs.dry-run && '--dry-run' || '' }}
|
||||
echo "::group::Release Info"
|
||||
python3 -m json.tool /tmp/release_info.json
|
||||
echo "::endgroup::"
|
||||
release_tag=$(jq -r '.release_tag' /tmp/release_info.json)
|
||||
commit_sha=$(jq -r '.commit_sha' /tmp/release_info.json)
|
||||
echo "Release Tag: $release_tag"
|
||||
echo "RELEASE_TAG=$release_tag" >> "$GITHUB_ENV"
|
||||
echo "COMMIT_SHA=$commit_sha" >> "$GITHUB_ENV"
|
||||
- name: Download All Release Artifacts
|
||||
if: ${{ inputs.type == 'patch' }}
|
||||
shell: bash
|
||||
run: |
|
||||
python3 ./tests/ci/create_release.py --download-packages ${{ inputs.dry-run && '--dry-run' || '' }}
|
||||
- name: Push Git Tag for the Release
|
||||
shell: bash
|
||||
run: |
|
||||
python3 ./tests/ci/create_release.py --push-release-tag ${{ inputs.dry-run && '--dry-run' || '' }}
|
||||
- name: Push New Release Branch
|
||||
if: ${{ inputs.type == 'new' }}
|
||||
shell: bash
|
||||
run: |
|
||||
python3 ./tests/ci/create_release.py --push-new-release-branch ${{ inputs.dry-run && '--dry-run' || '' }}
|
||||
- name: Bump CH Version and Update Contributors' List
|
||||
shell: bash
|
||||
run: |
|
||||
python3 ./tests/ci/create_release.py --create-bump-version-pr ${{ inputs.dry-run && '--dry-run' || '' }}
|
||||
- name: Bump Docker versions, Changelog, Security
|
||||
if: ${{ inputs.type == 'patch' }}
|
||||
shell: bash
|
||||
run: |
|
||||
git checkout master
|
||||
python3 ./tests/ci/create_release.py --set-progress-started --progress "update changelog, docker version, security"
|
||||
echo "List versions"
|
||||
./utils/list-versions/list-versions.sh > ./utils/list-versions/version_date.tsv
|
||||
echo "Update docker version"
|
||||
./utils/list-versions/update-docker-version.sh
|
||||
echo "Generate ChangeLog"
|
||||
export CI=1
|
||||
docker run -u "${UID}:${GID}" -e PYTHONUNBUFFERED=1 -e CI=1 --network=host \
|
||||
--volume=".:/ClickHouse" clickhouse/style-test \
|
||||
/ClickHouse/tests/ci/changelog.py -v --debug-helpers \
|
||||
--gh-user-or-token=${{ inputs.token }} --jobs=5 \
|
||||
--output="/ClickHouse/docs/changelogs/${{ env.RELEASE_TAG }}.md" ${{ env.RELEASE_TAG }}
|
||||
git add ./docs/changelogs/${{ env.RELEASE_TAG }}.md
|
||||
echo "Generate Security"
|
||||
python3 ./utils/security-generator/generate_security.py > SECURITY.md
|
||||
git diff HEAD
|
||||
- name: Create ChangeLog PR
|
||||
if: ${{ inputs.type == 'patch' && ! inputs.dry-run }}
|
||||
uses: peter-evans/create-pull-request@v6
|
||||
with:
|
||||
author: "robot-clickhouse <robot-clickhouse@users.noreply.github.com>"
|
||||
token: ${{ inputs.token }}
|
||||
committer: "robot-clickhouse <robot-clickhouse@users.noreply.github.com>"
|
||||
commit-message: Update version_date.tsv and changelogs after ${{ env.RELEASE_TAG }}
|
||||
branch: auto/${{ env.RELEASE_TAG }}
|
||||
assignees: ${{ github.event.sender.login }} # assign the PR to the tag pusher
|
||||
delete-branch: true
|
||||
title: Update version_date.tsv and changelog after ${{ env.RELEASE_TAG }}
|
||||
labels: do not test
|
||||
body: |
|
||||
Update version_date.tsv and changelogs after ${{ env.RELEASE_TAG }}
|
||||
### Changelog category (leave one):
|
||||
- Not for changelog (changelog entry is not required)
|
||||
- name: Complete previous steps and Restore git state
|
||||
if: ${{ inputs.type == 'patch' }}
|
||||
shell: bash
|
||||
run: |
|
||||
python3 ./tests/ci/create_release.py --set-progress-completed
|
||||
git reset --hard HEAD
|
||||
git checkout "$GITHUB_REF_NAME"
|
||||
- name: Create GH Release
|
||||
shell: bash
|
||||
if: ${{ inputs.type == 'patch' }}
|
||||
run: |
|
||||
python3 ./tests/ci/create_release.py --create-gh-release ${{ inputs.dry-run && '--dry-run' || '' }}
|
||||
- name: Export TGZ Packages
|
||||
if: ${{ inputs.type == 'patch' }}
|
||||
shell: bash
|
||||
run: |
|
||||
python3 ./tests/ci/artifactory.py --export-tgz ${{ inputs.dry-run && '--dry-run' || '' }}
|
||||
- name: Test TGZ Packages
|
||||
if: ${{ inputs.type == 'patch' }}
|
||||
shell: bash
|
||||
run: |
|
||||
python3 ./tests/ci/artifactory.py --test-tgz ${{ inputs.dry-run && '--dry-run' || '' }}
|
||||
- name: Export RPM Packages
|
||||
if: ${{ inputs.type == 'patch' }}
|
||||
shell: bash
|
||||
run: |
|
||||
python3 ./tests/ci/artifactory.py --export-rpm ${{ inputs.dry-run && '--dry-run' || '' }}
|
||||
- name: Test RPM Packages
|
||||
if: ${{ inputs.type == 'patch' }}
|
||||
shell: bash
|
||||
run: |
|
||||
python3 ./tests/ci/artifactory.py --test-rpm ${{ inputs.dry-run && '--dry-run' || '' }}
|
||||
- name: Export Debian Packages
|
||||
if: ${{ inputs.type == 'patch' }}
|
||||
shell: bash
|
||||
run: |
|
||||
python3 ./tests/ci/artifactory.py --export-debian ${{ inputs.dry-run && '--dry-run' || '' }}
|
||||
- name: Test Debian Packages
|
||||
if: ${{ inputs.type == 'patch' }}
|
||||
shell: bash
|
||||
run: |
|
||||
python3 ./tests/ci/artifactory.py --test-debian ${{ inputs.dry-run && '--dry-run' || '' }}
|
||||
- name: Docker clickhouse/clickhouse-server building
|
||||
if: ${{ inputs.type == 'patch' }}
|
||||
shell: bash
|
||||
run: |
|
||||
cd "./tests/ci"
|
||||
python3 ./create_release.py --set-progress-started --progress "docker server release"
|
||||
export CHECK_NAME="Docker server image"
|
||||
python3 docker_server.py --release-type auto --version ${{ env.RELEASE_TAG }} --check-name "$CHECK_NAME" --sha ${{ env.COMMIT_SHA }} ${{ ! inputs.dry-run && '--push' || '' }}
|
||||
python3 ./create_release.py --set-progress-completed
|
||||
- name: Docker clickhouse/clickhouse-keeper building
|
||||
if: ${{ inputs.type == 'patch' }}
|
||||
shell: bash
|
||||
run: |
|
||||
cd "./tests/ci"
|
||||
python3 ./create_release.py --set-progress-started --progress "docker keeper release"
|
||||
export CHECK_NAME="Docker keeper image"
|
||||
python3 docker_server.py --release-type auto --version ${{ env.RELEASE_TAG }} --check-name "$CHECK_NAME" --sha ${{ env.COMMIT_SHA }} ${{ ! inputs.dry-run && '--push' || '' }}
|
||||
python3 ./create_release.py --set-progress-completed
|
||||
- name: Set current Release progress to Completed with OK
|
||||
shell: bash
|
||||
run: |
|
||||
python3 ./tests/ci/create_release.py --set-progress-started --progress "completed"
|
||||
python3 ./tests/ci/create_release.py --set-progress-completed
|
||||
- name: Post Slack Message
|
||||
if: ${{ !cancelled() }}
|
||||
shell: bash
|
||||
run: |
|
||||
python3 ./tests/ci/create_release.py --post-status ${{ inputs.dry-run && '--dry-run' || '' }}
|
98
.github/workflows/auto_release.yml
vendored
98
.github/workflows/auto_release.yml
vendored
@ -1,44 +1,110 @@
|
||||
name: AutoRelease
|
||||
|
||||
env:
|
||||
# Force the stdout and stderr streams to be unbuffered
|
||||
PYTHONUNBUFFERED: 1
|
||||
DRY_RUN: true
|
||||
|
||||
concurrency:
|
||||
group: auto-release
|
||||
group: release
|
||||
on: # yamllint disable-line rule:truthy
|
||||
# schedule:
|
||||
# - cron: '0 10-16 * * 1-5'
|
||||
# Workflow uses a test bucket for packages and dry run mode (no real releases)
|
||||
schedule:
|
||||
- cron: '0 9 * * *'
|
||||
- cron: '0 15 * * *'
|
||||
workflow_dispatch:
|
||||
inputs:
|
||||
dry-run:
|
||||
description: 'Dry run'
|
||||
required: false
|
||||
default: true
|
||||
type: boolean
|
||||
|
||||
jobs:
|
||||
CherryPick:
|
||||
runs-on: [self-hosted, style-checker-aarch64]
|
||||
AutoRelease:
|
||||
runs-on: [self-hosted, release-maker]
|
||||
steps:
|
||||
- name: DebugInfo
|
||||
uses: hmarr/debug-action@f7318c783045ac39ed9bb497e22ce835fdafbfe6
|
||||
- name: Set envs
|
||||
# https://docs.github.com/en/actions/learn-github-actions/workflow-commands-for-github-actions#multiline-strings
|
||||
run: |
|
||||
cat >> "$GITHUB_ENV" << 'EOF'
|
||||
TEMP_PATH=${{runner.temp}}/cherry_pick
|
||||
ROBOT_CLICKHOUSE_SSH_KEY<<RCSK
|
||||
${{secrets.ROBOT_CLICKHOUSE_SSH_KEY}}
|
||||
RCSK
|
||||
REPO_OWNER=ClickHouse
|
||||
REPO_NAME=ClickHouse
|
||||
REPO_TEAM=core
|
||||
EOF
|
||||
- name: Set DRY_RUN for schedule
|
||||
if: ${{ github.event_name == 'schedule' }}
|
||||
run: echo "DRY_RUN=true" >> "$GITHUB_ENV"
|
||||
- name: Set DRY_RUN for dispatch
|
||||
if: ${{ github.event_name == 'workflow_dispatch' }}
|
||||
run: echo "DRY_RUN=${{ github.event.inputs.dry-run }}" >> "$GITHUB_ENV"
|
||||
- name: Check out repository code
|
||||
uses: ClickHouse/checkout@v1
|
||||
with:
|
||||
clear-repository: true
|
||||
token: ${{secrets.ROBOT_CLICKHOUSE_COMMIT_TOKEN}}
|
||||
fetch-depth: 0
|
||||
- name: Auto-release
|
||||
- name: Auto Release Prepare
|
||||
run: |
|
||||
cd "$GITHUB_WORKSPACE/tests/ci"
|
||||
python3 auto_release.py --release-after-days=3
|
||||
- name: Cleanup
|
||||
if: always()
|
||||
python3 auto_release.py --prepare
|
||||
echo "::group::Auto Release Info"
|
||||
python3 -m json.tool /tmp/autorelease_info.json
|
||||
echo "::endgroup::"
|
||||
{
|
||||
echo 'AUTO_RELEASE_PARAMS<<EOF'
|
||||
cat /tmp/autorelease_info.json
|
||||
echo 'EOF'
|
||||
} >> "$GITHUB_ENV"
|
||||
- name: Post Release Branch statuses
|
||||
run: |
|
||||
cd "$GITHUB_WORKSPACE/tests/ci"
|
||||
python3 auto_release.py --post-status
|
||||
- name: Release ${{ fromJson(env.AUTO_RELEASE_PARAMS).releases[0].release_branch }}
|
||||
if: ${{ fromJson(env.AUTO_RELEASE_PARAMS).releases[0] && fromJson(env.AUTO_RELEASE_PARAMS).releases[0].ready }}
|
||||
uses: ./.github/actions/release
|
||||
with:
|
||||
ref: ${{ fromJson(env.AUTO_RELEASE_PARAMS).releases[0].commit_sha }}
|
||||
type: patch
|
||||
dry-run: ${{ env.DRY_RUN }}
|
||||
token: ${{secrets.ROBOT_CLICKHOUSE_COMMIT_TOKEN}}
|
||||
- name: Release ${{ fromJson(env.AUTO_RELEASE_PARAMS).releases[1].release_branch }}
|
||||
if: ${{ fromJson(env.AUTO_RELEASE_PARAMS).releases[0] && fromJson(env.AUTO_RELEASE_PARAMS).releases[1].ready }}
|
||||
uses: ./.github/actions/release
|
||||
with:
|
||||
ref: ${{ fromJson(env.AUTO_RELEASE_PARAMS).releases[1].commit_sha }}
|
||||
type: patch
|
||||
dry-run: ${{ env.DRY_RUN }}
|
||||
token: ${{secrets.ROBOT_CLICKHOUSE_COMMIT_TOKEN}}
|
||||
- name: Release ${{ fromJson(env.AUTO_RELEASE_PARAMS).releases[2].release_branch }}
|
||||
if: ${{ fromJson(env.AUTO_RELEASE_PARAMS).releases[2] && fromJson(env.AUTO_RELEASE_PARAMS).releases[2].ready }}
|
||||
uses: ./.github/actions/release
|
||||
with:
|
||||
ref: ${{ fromJson(env.AUTO_RELEASE_PARAMS).releases[2].commit_sha }}
|
||||
type: patch
|
||||
dry-run: ${{ env.DRY_RUN }}
|
||||
token: ${{secrets.ROBOT_CLICKHOUSE_COMMIT_TOKEN}}
|
||||
- name: Release ${{ fromJson(env.AUTO_RELEASE_PARAMS).releases[3].release_branch }}
|
||||
if: ${{ fromJson(env.AUTO_RELEASE_PARAMS).releases[3] && fromJson(env.AUTO_RELEASE_PARAMS).releases[3].ready }}
|
||||
uses: ./.github/actions/release
|
||||
with:
|
||||
ref: ${{ fromJson(env.AUTO_RELEASE_PARAMS).releases[3].commit_sha }}
|
||||
type: patch
|
||||
dry-run: ${{ env.DRY_RUN }}
|
||||
token: ${{secrets.ROBOT_CLICKHOUSE_COMMIT_TOKEN}}
|
||||
- name: Release ${{ fromJson(env.AUTO_RELEASE_PARAMS).releases[4].release_branch }}
|
||||
if: ${{ fromJson(env.AUTO_RELEASE_PARAMS).releases[4] && fromJson(env.AUTO_RELEASE_PARAMS).releases[4].ready }}
|
||||
uses: ./.github/actions/release
|
||||
with:
|
||||
ref: ${{ fromJson(env.AUTO_RELEASE_PARAMS).releases[4].commit_sha }}
|
||||
type: patch
|
||||
dry-run: ${{ env.DRY_RUN }}
|
||||
token: ${{secrets.ROBOT_CLICKHOUSE_COMMIT_TOKEN}}
|
||||
- name: Post Slack Message
|
||||
if: ${{ !cancelled() }}
|
||||
run: |
|
||||
cd "$GITHUB_WORKSPACE/tests/ci"
|
||||
python3 auto_release.py --post-auto-release-complete --wf-status ${{ job.status }}
|
||||
- name: Clean up
|
||||
run: |
|
||||
docker ps --quiet | xargs --no-run-if-empty docker kill ||:
|
||||
docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||:
|
||||
|
14
.github/workflows/backport_branches.yml
vendored
14
.github/workflows/backport_branches.yml
vendored
@ -241,8 +241,9 @@ jobs:
|
||||
runner_type: stress-tester
|
||||
data: ${{ needs.RunConfig.outputs.data }}
|
||||
FinishCheck:
|
||||
if: ${{ !failure() && !cancelled() }}
|
||||
if: ${{ !cancelled() }}
|
||||
needs:
|
||||
- RunConfig
|
||||
- Builds_Report
|
||||
- FunctionalStatelessTestAsan
|
||||
- FunctionalStatefulTestDebug
|
||||
@ -257,6 +258,7 @@ jobs:
|
||||
with:
|
||||
clear-repository: true
|
||||
- name: Finish label
|
||||
if: ${{ !failure() }}
|
||||
run: |
|
||||
cd "$GITHUB_WORKSPACE/tests/ci"
|
||||
# update mergeable check
|
||||
@ -264,3 +266,13 @@ jobs:
|
||||
# update overall ci report
|
||||
python3 finish_check.py --wf-status ${{ contains(needs.*.result, 'failure') && 'failure' || 'success' }}
|
||||
python3 merge_pr.py
|
||||
- name: Check Workflow results
|
||||
run: |
|
||||
export WORKFLOW_RESULT_FILE="/tmp/workflow_results.json"
|
||||
cat >> "$WORKFLOW_RESULT_FILE" << 'EOF'
|
||||
${{ toJson(needs) }}
|
||||
EOF
|
||||
echo "::group::Workflow results"
|
||||
python3 -m json.tool "$WORKFLOW_RESULT_FILE"
|
||||
echo "::endgroup::"
|
||||
python3 ./tests/ci/ci_buddy.py --check-wf-status
|
||||
|
134
.github/workflows/create_release.yml
vendored
134
.github/workflows/create_release.yml
vendored
@ -2,7 +2,6 @@ name: CreateRelease
|
||||
|
||||
concurrency:
|
||||
group: release
|
||||
|
||||
'on':
|
||||
workflow_dispatch:
|
||||
inputs:
|
||||
@ -31,136 +30,15 @@ jobs:
|
||||
steps:
|
||||
- name: DebugInfo
|
||||
uses: hmarr/debug-action@f7318c783045ac39ed9bb497e22ce835fdafbfe6
|
||||
- name: Set envs
|
||||
# https://docs.github.com/en/actions/learn-github-actions/workflow-commands-for-github-actions#multiline-strings
|
||||
run: |
|
||||
cat >> "$GITHUB_ENV" << 'EOF'
|
||||
ROBOT_CLICKHOUSE_SSH_KEY<<RCSK
|
||||
${{secrets.ROBOT_CLICKHOUSE_SSH_KEY}}
|
||||
RCSK
|
||||
RELEASE_INFO_FILE=${{ runner.temp }}/release_info.json
|
||||
EOF
|
||||
- name: Check out repository code
|
||||
uses: ClickHouse/checkout@v1
|
||||
with:
|
||||
token: ${{secrets.ROBOT_CLICKHOUSE_COMMIT_TOKEN}}
|
||||
fetch-depth: 0
|
||||
- name: Prepare Release Info
|
||||
run: |
|
||||
python3 ./tests/ci/create_release.py --prepare-release-info \
|
||||
--ref ${{ inputs.ref }} --release-type ${{ inputs.type }} \
|
||||
--outfile ${{ env.RELEASE_INFO_FILE }} ${{ inputs.dry-run && '--dry-run' || '' }}
|
||||
echo "::group::Release Info"
|
||||
python3 -m json.tool "$RELEASE_INFO_FILE"
|
||||
echo "::endgroup::"
|
||||
release_tag=$(jq -r '.release_tag' "$RELEASE_INFO_FILE")
|
||||
commit_sha=$(jq -r '.commit_sha' "$RELEASE_INFO_FILE")
|
||||
echo "Release Tag: $release_tag"
|
||||
echo "RELEASE_TAG=$release_tag" >> "$GITHUB_ENV"
|
||||
echo "COMMIT_SHA=$commit_sha" >> "$GITHUB_ENV"
|
||||
- name: Download All Release Artifacts
|
||||
if: ${{ inputs.type == 'patch' }}
|
||||
run: |
|
||||
python3 ./tests/ci/create_release.py --infile "$RELEASE_INFO_FILE" --download-packages ${{ inputs.dry-run && '--dry-run' || '' }}
|
||||
- name: Push Git Tag for the Release
|
||||
run: |
|
||||
python3 ./tests/ci/create_release.py --push-release-tag --infile "$RELEASE_INFO_FILE" ${{ inputs.dry-run && '--dry-run' || '' }}
|
||||
- name: Push New Release Branch
|
||||
if: ${{ inputs.type == 'new' }}
|
||||
run: |
|
||||
python3 ./tests/ci/create_release.py --push-new-release-branch --infile "$RELEASE_INFO_FILE" ${{ inputs.dry-run && '--dry-run' || '' }}
|
||||
- name: Bump CH Version and Update Contributors' List
|
||||
run: |
|
||||
python3 ./tests/ci/create_release.py --create-bump-version-pr --infile "$RELEASE_INFO_FILE" ${{ inputs.dry-run && '--dry-run' || '' }}
|
||||
- name: Checkout master
|
||||
run: |
|
||||
git checkout master
|
||||
- name: Bump Docker versions, Changelog, Security
|
||||
if: ${{ inputs.type == 'patch' }}
|
||||
run: |
|
||||
[ "$(git branch --show-current)" != "master" ] && echo "not on the master" && exit 1
|
||||
echo "List versions"
|
||||
./utils/list-versions/list-versions.sh > ./utils/list-versions/version_date.tsv
|
||||
echo "Update docker version"
|
||||
./utils/list-versions/update-docker-version.sh
|
||||
echo "Generate ChangeLog"
|
||||
export CI=1
|
||||
docker run -u "${UID}:${GID}" -e PYTHONUNBUFFERED=1 -e CI=1 --network=host \
|
||||
--volume=".:/ClickHouse" clickhouse/style-test \
|
||||
/ClickHouse/tests/ci/changelog.py -v --debug-helpers \
|
||||
--gh-user-or-token="$GH_TOKEN" --jobs=5 \
|
||||
--output="/ClickHouse/docs/changelogs/${{ env.RELEASE_TAG }}.md" ${{ env.RELEASE_TAG }}
|
||||
git add ./docs/changelogs/${{ env.RELEASE_TAG }}.md
|
||||
echo "Generate Security"
|
||||
python3 ./utils/security-generator/generate_security.py > SECURITY.md
|
||||
git diff HEAD
|
||||
- name: Create ChangeLog PR
|
||||
if: ${{ inputs.type == 'patch' && ! inputs.dry-run }}
|
||||
uses: peter-evans/create-pull-request@v6
|
||||
- name: Call Release Action
|
||||
uses: ./.github/actions/release
|
||||
with:
|
||||
author: "robot-clickhouse <robot-clickhouse@users.noreply.github.com>"
|
||||
token: ${{ secrets.ROBOT_CLICKHOUSE_COMMIT_TOKEN }}
|
||||
committer: "robot-clickhouse <robot-clickhouse@users.noreply.github.com>"
|
||||
commit-message: Update version_date.tsv and changelogs after ${{ env.RELEASE_TAG }}
|
||||
branch: auto/${{ env.RELEASE_TAG }}
|
||||
assignees: ${{ github.event.sender.login }} # assign the PR to the tag pusher
|
||||
delete-branch: true
|
||||
title: Update version_date.tsv and changelog after ${{ env.RELEASE_TAG }}
|
||||
labels: do not test
|
||||
body: |
|
||||
Update version_date.tsv and changelogs after ${{ env.RELEASE_TAG }}
|
||||
### Changelog category (leave one):
|
||||
- Not for changelog (changelog entry is not required)
|
||||
- name: Reset changes if Dry-run
|
||||
if: ${{ inputs.dry-run }}
|
||||
run: |
|
||||
git reset --hard HEAD
|
||||
- name: Checkout back to GITHUB_REF
|
||||
run: |
|
||||
git checkout "$GITHUB_REF_NAME"
|
||||
- name: Create GH Release
|
||||
if: ${{ inputs.type == 'patch' }}
|
||||
run: |
|
||||
python3 ./tests/ci/create_release.py --create-gh-release \
|
||||
--infile ${{ env.RELEASE_INFO_FILE }} ${{ inputs.dry-run && '--dry-run' || '' }}
|
||||
|
||||
- name: Export TGZ Packages
|
||||
if: ${{ inputs.type == 'patch' }}
|
||||
run: |
|
||||
python3 ./tests/ci/artifactory.py --export-tgz --infile ${{ env.RELEASE_INFO_FILE }} ${{ inputs.dry-run && '--dry-run' || '' }}
|
||||
- name: Test TGZ Packages
|
||||
if: ${{ inputs.type == 'patch' }}
|
||||
run: |
|
||||
python3 ./tests/ci/artifactory.py --test-tgz --infile ${{ env.RELEASE_INFO_FILE }} ${{ inputs.dry-run && '--dry-run' || '' }}
|
||||
- name: Export RPM Packages
|
||||
if: ${{ inputs.type == 'patch' }}
|
||||
run: |
|
||||
python3 ./tests/ci/artifactory.py --export-rpm --infile ${{ env.RELEASE_INFO_FILE }} ${{ inputs.dry-run && '--dry-run' || '' }}
|
||||
- name: Test RPM Packages
|
||||
if: ${{ inputs.type == 'patch' }}
|
||||
run: |
|
||||
python3 ./tests/ci/artifactory.py --test-rpm --infile ${{ env.RELEASE_INFO_FILE }} ${{ inputs.dry-run && '--dry-run' || '' }}
|
||||
- name: Export Debian Packages
|
||||
if: ${{ inputs.type == 'patch' }}
|
||||
run: |
|
||||
python3 ./tests/ci/artifactory.py --export-debian --infile ${{ env.RELEASE_INFO_FILE }} ${{ inputs.dry-run && '--dry-run' || '' }}
|
||||
- name: Test Debian Packages
|
||||
if: ${{ inputs.type == 'patch' }}
|
||||
run: |
|
||||
python3 ./tests/ci/artifactory.py --test-debian --infile ${{ env.RELEASE_INFO_FILE }} ${{ inputs.dry-run && '--dry-run' || '' }}
|
||||
- name: Docker clickhouse/clickhouse-server building
|
||||
if: ${{ inputs.type == 'patch' }}
|
||||
run: |
|
||||
cd "./tests/ci"
|
||||
export CHECK_NAME="Docker server image"
|
||||
python3 docker_server.py --release-type auto --version ${{ env.RELEASE_TAG }} --check-name "$CHECK_NAME" --sha ${{ env.COMMIT_SHA }} ${{ ! inputs.dry-run && '--push' || '' }}
|
||||
- name: Docker clickhouse/clickhouse-keeper building
|
||||
if: ${{ inputs.type == 'patch' }}
|
||||
run: |
|
||||
cd "./tests/ci"
|
||||
export CHECK_NAME="Docker keeper image"
|
||||
python3 docker_server.py --release-type auto --version ${{ env.RELEASE_TAG }} --check-name "$CHECK_NAME" --sha ${{ env.COMMIT_SHA }} ${{ ! inputs.dry-run && '--push' || '' }}
|
||||
- name: Post Slack Message
|
||||
if: always()
|
||||
run: |
|
||||
echo Slack Message
|
||||
ref: ${{ inputs.ref }}
|
||||
type: ${{ inputs.type }}
|
||||
dry-run: ${{ inputs.dry-run }}
|
||||
token: ${{secrets.ROBOT_CLICKHOUSE_COMMIT_TOKEN}}
|
||||
|
38
.github/workflows/master.yml
vendored
38
.github/workflows/master.yml
vendored
@ -121,34 +121,6 @@ jobs:
|
||||
runner_type: style-checker-aarch64
|
||||
data: ${{ needs.RunConfig.outputs.data }}
|
||||
|
||||
MarkReleaseReady:
|
||||
if: ${{ !failure() && !cancelled() }}
|
||||
needs: [RunConfig, Builds_1, Builds_2]
|
||||
runs-on: [self-hosted, style-checker-aarch64]
|
||||
steps:
|
||||
- name: Debug
|
||||
run: |
|
||||
echo need with different filters
|
||||
cat << 'EOF'
|
||||
${{ toJSON(needs) }}
|
||||
${{ toJSON(needs.*.result) }}
|
||||
no failures ${{ !contains(needs.*.result, 'failure') }}
|
||||
no skips ${{ !contains(needs.*.result, 'skipped') }}
|
||||
no both ${{ !(contains(needs.*.result, 'skipped') || contains(needs.*.result, 'failure')) }}
|
||||
EOF
|
||||
- name: Not ready
|
||||
# fail the job to be able to restart it
|
||||
if: ${{ contains(needs.*.result, 'skipped') || contains(needs.*.result, 'failure') }}
|
||||
run: exit 1
|
||||
- name: Check out repository code
|
||||
if: ${{ ! (contains(needs.*.result, 'skipped') || contains(needs.*.result, 'failure')) }}
|
||||
uses: ClickHouse/checkout@v1
|
||||
- name: Mark Commit Release Ready
|
||||
if: ${{ ! (contains(needs.*.result, 'skipped') || contains(needs.*.result, 'failure')) }}
|
||||
run: |
|
||||
cd "$GITHUB_WORKSPACE/tests/ci"
|
||||
python3 mark_release_ready.py
|
||||
|
||||
FinishCheck:
|
||||
if: ${{ !cancelled() }}
|
||||
needs: [RunConfig, Builds_1, Builds_2, Builds_Report, Tests_1, Tests_2, Tests_3]
|
||||
@ -160,3 +132,13 @@ jobs:
|
||||
run: |
|
||||
cd "$GITHUB_WORKSPACE/tests/ci"
|
||||
python3 finish_check.py --wf-status ${{ contains(needs.*.result, 'failure') && 'failure' || 'success' }}
|
||||
- name: Check Workflow results
|
||||
run: |
|
||||
export WORKFLOW_RESULT_FILE="/tmp/workflow_results.json"
|
||||
cat >> "$WORKFLOW_RESULT_FILE" << 'EOF'
|
||||
${{ toJson(needs) }}
|
||||
EOF
|
||||
echo "::group::Workflow results"
|
||||
python3 -m json.tool "$WORKFLOW_RESULT_FILE"
|
||||
echo "::endgroup::"
|
||||
python3 ./tests/ci/ci_buddy.py --check-wf-status
|
||||
|
13
.github/workflows/merge_queue.yml
vendored
13
.github/workflows/merge_queue.yml
vendored
@ -93,7 +93,7 @@ jobs:
|
||||
data: ${{ needs.RunConfig.outputs.data }}
|
||||
|
||||
CheckReadyForMerge:
|
||||
if: ${{ !cancelled() && needs.StyleCheck.result == 'success' }}
|
||||
if: ${{ !cancelled() }}
|
||||
# Test_2 or Test_3 must not have jobs required for Mergeable check
|
||||
needs: [RunConfig, BuildDockers, StyleCheck, FastTest, Builds_1, Tests_1]
|
||||
runs-on: [self-hosted, style-checker-aarch64]
|
||||
@ -101,6 +101,17 @@ jobs:
|
||||
- name: Check out repository code
|
||||
uses: ClickHouse/checkout@v1
|
||||
- name: Check and set merge status
|
||||
if: ${{ needs.StyleCheck.result == 'success' }}
|
||||
run: |
|
||||
cd "$GITHUB_WORKSPACE/tests/ci"
|
||||
python3 merge_pr.py --set-ci-status --wf-status ${{ contains(needs.*.result, 'failure') && 'failure' || 'success' }}
|
||||
- name: Check Workflow results
|
||||
run: |
|
||||
export WORKFLOW_RESULT_FILE="/tmp/workflow_results.json"
|
||||
cat >> "$WORKFLOW_RESULT_FILE" << 'EOF'
|
||||
${{ toJson(needs) }}
|
||||
EOF
|
||||
echo "::group::Workflow results"
|
||||
python3 -m json.tool "$WORKFLOW_RESULT_FILE"
|
||||
echo "::endgroup::"
|
||||
python3 ./tests/ci/ci_buddy.py --check-wf-status
|
||||
|
17
.github/workflows/nightly.yml
vendored
17
.github/workflows/nightly.yml
vendored
@ -44,3 +44,20 @@ jobs:
|
||||
with:
|
||||
data: "${{ needs.RunConfig.outputs.data }}"
|
||||
set_latest: true
|
||||
CheckWorkflow:
|
||||
if: ${{ !cancelled() }}
|
||||
needs: [RunConfig, BuildDockers]
|
||||
runs-on: [self-hosted, style-checker-aarch64]
|
||||
steps:
|
||||
- name: Check out repository code
|
||||
uses: ClickHouse/checkout@v1
|
||||
- name: Check Workflow results
|
||||
run: |
|
||||
export WORKFLOW_RESULT_FILE="/tmp/workflow_results.json"
|
||||
cat >> "$WORKFLOW_RESULT_FILE" << 'EOF'
|
||||
${{ toJson(needs) }}
|
||||
EOF
|
||||
echo "::group::Workflow results"
|
||||
python3 -m json.tool "$WORKFLOW_RESULT_FILE"
|
||||
echo "::endgroup::"
|
||||
python3 ./tests/ci/ci_buddy.py --check-wf-status
|
||||
|
13
.github/workflows/pull_request.yml
vendored
13
.github/workflows/pull_request.yml
vendored
@ -151,7 +151,7 @@ jobs:
|
||||
data: ${{ needs.RunConfig.outputs.data }}
|
||||
|
||||
CheckReadyForMerge:
|
||||
if: ${{ !cancelled() && needs.StyleCheck.result == 'success' }}
|
||||
if: ${{ !cancelled() }}
|
||||
# Test_2 or Test_3 must not have jobs required for Mergeable check
|
||||
needs: [RunConfig, BuildDockers, StyleCheck, FastTest, Builds_1, Builds_2, Builds_Report, Tests_1]
|
||||
runs-on: [self-hosted, style-checker-aarch64]
|
||||
@ -161,9 +161,20 @@ jobs:
|
||||
with:
|
||||
filter: tree:0
|
||||
- name: Check and set merge status
|
||||
if: ${{ needs.StyleCheck.result == 'success' }}
|
||||
run: |
|
||||
cd "$GITHUB_WORKSPACE/tests/ci"
|
||||
python3 merge_pr.py --set-ci-status --wf-status ${{ contains(needs.*.result, 'failure') && 'failure' || 'success' }}
|
||||
- name: Check Workflow results
|
||||
run: |
|
||||
export WORKFLOW_RESULT_FILE="/tmp/workflow_results.json"
|
||||
cat >> "$WORKFLOW_RESULT_FILE" << 'EOF'
|
||||
${{ toJson(needs) }}
|
||||
EOF
|
||||
echo "::group::Workflow results"
|
||||
python3 -m json.tool "$WORKFLOW_RESULT_FILE"
|
||||
echo "::endgroup::"
|
||||
python3 ./tests/ci/ci_buddy.py --check-wf-status
|
||||
|
||||
################################# Stage Final #################################
|
||||
#
|
||||
|
14
.github/workflows/release_branches.yml
vendored
14
.github/workflows/release_branches.yml
vendored
@ -441,8 +441,9 @@ jobs:
|
||||
runner_type: stress-tester
|
||||
data: ${{ needs.RunConfig.outputs.data }}
|
||||
FinishCheck:
|
||||
if: ${{ !failure() && !cancelled() }}
|
||||
if: ${{ !cancelled() }}
|
||||
needs:
|
||||
- RunConfig
|
||||
- DockerServerImage
|
||||
- DockerKeeperImage
|
||||
- Builds_Report
|
||||
@ -478,9 +479,20 @@ jobs:
|
||||
with:
|
||||
clear-repository: true
|
||||
- name: Finish label
|
||||
if: ${{ !failure() }}
|
||||
run: |
|
||||
cd "$GITHUB_WORKSPACE/tests/ci"
|
||||
# update mergeable check
|
||||
python3 merge_pr.py --set-ci-status --wf-status ${{ contains(needs.*.result, 'failure') && 'failure' || 'success' }}
|
||||
# update overall ci report
|
||||
python3 finish_check.py --wf-status ${{ contains(needs.*.result, 'failure') && 'failure' || 'success' }}
|
||||
- name: Check Workflow results
|
||||
run: |
|
||||
export WORKFLOW_RESULT_FILE="/tmp/workflow_results.json"
|
||||
cat >> "$WORKFLOW_RESULT_FILE" << 'EOF'
|
||||
${{ toJson(needs) }}
|
||||
EOF
|
||||
echo "::group::Workflow results"
|
||||
python3 -m json.tool "$WORKFLOW_RESULT_FILE"
|
||||
echo "::endgroup::"
|
||||
python3 ./tests/ci/ci_buddy.py --check-wf-status
|
||||
|
@ -14,3 +14,9 @@ rules:
|
||||
comments:
|
||||
min-spaces-from-content: 1
|
||||
document-start: disable
|
||||
colons: disable
|
||||
indentation: disable
|
||||
line-length: disable
|
||||
trailing-spaces: disable
|
||||
truthy: disable
|
||||
new-line-at-end-of-file: disable
|
||||
|
@ -87,10 +87,13 @@
|
||||
# define ASAN_POISON_MEMORY_REGION(a, b)
|
||||
#endif
|
||||
|
||||
#if !defined(ABORT_ON_LOGICAL_ERROR)
|
||||
#if !defined(NDEBUG) || defined(ADDRESS_SANITIZER) || defined(THREAD_SANITIZER) || defined(MEMORY_SANITIZER) || defined(UNDEFINED_BEHAVIOR_SANITIZER)
|
||||
#define ABORT_ON_LOGICAL_ERROR
|
||||
#endif
|
||||
/// We used to have only ABORT_ON_LOGICAL_ERROR macro, but most of its uses were actually in places where we didn't care about logical errors
|
||||
/// but wanted to check exactly if the current build type is debug or with sanitizer. This new macro is introduced to fix those places.
|
||||
#if !defined(DEBUG_OR_SANITIZER_BUILD)
|
||||
# if !defined(NDEBUG) || defined(ADDRESS_SANITIZER) || defined(THREAD_SANITIZER) || defined(MEMORY_SANITIZER) \
|
||||
|| defined(UNDEFINED_BEHAVIOR_SANITIZER)
|
||||
# define DEBUG_OR_SANITIZER_BUILD
|
||||
# endif
|
||||
#endif
|
||||
|
||||
/// chassert(x) is similar to assert(x), but:
|
||||
@ -101,7 +104,7 @@
|
||||
/// Also it makes sense to call abort() instead of __builtin_unreachable() in debug builds,
|
||||
/// because SIGABRT is easier to debug than SIGTRAP (the second one makes gdb crazy)
|
||||
#if !defined(chassert)
|
||||
#if defined(ABORT_ON_LOGICAL_ERROR)
|
||||
# if defined(DEBUG_OR_SANITIZER_BUILD)
|
||||
// clang-format off
|
||||
#include <base/types.h>
|
||||
namespace DB
|
||||
|
@ -22,6 +22,21 @@ Structure of the `users` section:
|
||||
<!-- Or -->
|
||||
<password_sha256_hex></password_sha256_hex>
|
||||
|
||||
<ssh_keys>
|
||||
<ssh_key>
|
||||
<type>ssh-ed25519</type>
|
||||
<base64_key>AAAAC3NzaC1lZDI1NTE5AAAAIDNf0r6vRl24Ix3tv2IgPmNPO2ATa2krvt80DdcTatLj</base64_key>
|
||||
</ssh_key>
|
||||
<ssh_key>
|
||||
<type>ecdsa-sha2-nistp256</type>
|
||||
<base64_key>AAAAE2VjZHNhLXNoYTItbmlzdHAyNTYAAAAIbmlzdHAyNTYAAABBBNxeV2uN5UY6CUbCzTA1rXfYimKQA5ivNIqxdax4bcMXz4D0nSk2l5E1TkR5mG8EBWtmExSPbcEPJ8V7lyWWbA8=</base64_key>
|
||||
</ssh_key>
|
||||
<ssh_key>
|
||||
<type>ssh-rsa</type>
|
||||
<base64_key>AAAAB3NzaC1yc2EAAAADAQABAAABgQCpgqL1SHhPVBOTFlOm0pu+cYBbADzC2jL41sPMawYCJHDyHuq7t+htaVVh2fRgpAPmSEnLEC2d4BEIKMtPK3bfR8plJqVXlLt6Q8t4b1oUlnjb3VPA9P6iGcW7CV1FBkZQEVx8ckOfJ3F+kI5VsrRlEDgiecm/C1VPl0/9M2llW/mPUMaD65cM9nlZgM/hUeBrfxOEqM11gDYxEZm1aRSbZoY4dfdm3vzvpSQ6lrCrkjn3X2aSmaCLcOWJhfBWMovNDB8uiPuw54g3ioZ++qEQMlfxVsqXDGYhXCrsArOVuW/5RbReO79BvXqdssiYShfwo+GhQ0+aLWMIW/jgBkkqx/n7uKLzCMX7b2F+aebRYFh+/QXEj7SnihdVfr9ud6NN3MWzZ1ltfIczlEcFLrLJ1Yq57wW6wXtviWh59WvTWFiPejGjeSjjJyqqB49tKdFVFuBnIU5u/bch2DXVgiAEdQwUrIp1ACoYPq22HFFAYUJrL32y7RxX3PGzuAv3LOc=</base64_key>
|
||||
</ssh_key>
|
||||
</ssh_keys>
|
||||
|
||||
<access_management>0|1</access_management>
|
||||
|
||||
<networks incl="networks" replace="replace">
|
||||
@ -79,6 +94,24 @@ Password can be specified in plaintext or in SHA256 (hex format).
|
||||
|
||||
The first line of the result is the password. The second line is the corresponding double SHA1 hash.
|
||||
|
||||
### username/ssh-key {#user-sshkey}
|
||||
|
||||
This setting allows authenticating with SSH keys.
|
||||
|
||||
Given a SSH key (as generated by `ssh-keygen`) like
|
||||
```
|
||||
ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIDNf0r6vRl24Ix3tv2IgPmNPO2ATa2krvt80DdcTatLj john@example.com
|
||||
```
|
||||
The `ssh_key` element is expected to be
|
||||
```
|
||||
<ssh_key>
|
||||
<type>ssh-ed25519</type>
|
||||
<base64_key>AAAAC3NzaC1lZDI1NTE5AAAAIDNf0r6vRl24Ix3tv2IgPmNPO2ATa2krvt80DdcTatLj</base64_key>
|
||||
</ssh_key>
|
||||
```
|
||||
|
||||
Substitute `ssh-ed25519` with `ssh-rsa` or `ecdsa-sha2-nistp256` for the other supported algorithms.
|
||||
|
||||
### access_management {#access_management-user-setting}
|
||||
|
||||
This setting enables or disables using of SQL-driven [access control and account management](../../guides/sre/user-management/index.md#access-control) for the user.
|
||||
|
@ -297,7 +297,7 @@ Algorithm requires the special column in tables. This column:
|
||||
|
||||
- Must contain an ordered sequence.
|
||||
- Can be one of the following types: [Int, UInt](../../../sql-reference/data-types/int-uint.md), [Float](../../../sql-reference/data-types/float.md), [Date](../../../sql-reference/data-types/date.md), [DateTime](../../../sql-reference/data-types/datetime.md), [Decimal](../../../sql-reference/data-types/decimal.md).
|
||||
- Can’t be the only column in the `JOIN` clause.
|
||||
- For `hash` join algorithm it can’t be the only column in the `JOIN` clause.
|
||||
|
||||
Syntax `ASOF JOIN ... ON`:
|
||||
|
||||
@ -337,7 +337,8 @@ For example, consider the following tables:
|
||||
`ASOF JOIN` can take the timestamp of a user event from `table_1` and find an event in `table_2` where the timestamp is closest to the timestamp of the event from `table_1` corresponding to the closest match condition. Equal timestamp values are the closest if available. Here, the `user_id` column can be used for joining on equality and the `ev_time` column can be used for joining on the closest match. In our example, `event_1_1` can be joined with `event_2_1` and `event_1_2` can be joined with `event_2_3`, but `event_2_2` can’t be joined.
|
||||
|
||||
:::note
|
||||
`ASOF` join is **not** supported in the [Join](../../../engines/table-engines/special/join.md) table engine.
|
||||
`ASOF JOIN` is supported only by `hash` and `full_sorting_merge` join algorithms.
|
||||
It's **not** supported in the [Join](../../../engines/table-engines/special/join.md) table engine.
|
||||
:::
|
||||
|
||||
## PASTE JOIN Usage
|
||||
|
@ -6,38 +6,38 @@ sidebar_label: Playground
|
||||
|
||||
# ClickHouse Playground {#clickhouse-playground}
|
||||
|
||||
[ClickHouse Playground](https://play.clickhouse.com/play?user=play) allows people to experiment with ClickHouse by running queries instantly, without setting up their server or cluster.
|
||||
Several example datasets are available in Playground.
|
||||
[ClickHouse Playground](https://play.clickhouse.com/play?user=play) позволяет пользователям экспериментировать с ClickHouse, выполняя запросы мгновенно, без необходимости настройки сервера или кластера.
|
||||
В Playground доступны несколько примеров наборов данных.
|
||||
|
||||
You can make queries to Playground using any HTTP client, for example [curl](https://curl.haxx.se) or [wget](https://www.gnu.org/software/wget/), or set up a connection using [JDBC](../interfaces/jdbc.md) or [ODBC](../interfaces/odbc.md) drivers. More information about software products that support ClickHouse is available [here](../interfaces/index.md).
|
||||
Вы можете выполнять запросы к Playground, используя любой HTTP-клиент, например [curl](https://curl.haxx.se) или [wget](https://www.gnu.org/software/wget/), или настроить соединение, используя драйверы [JDBC](../interfaces/jdbc.md) или [ODBC](../interfaces/odbc.md). Дополнительную информацию о программных продуктах, поддерживающих ClickHouse, можно найти [здесь](../interfaces/index.md).
|
||||
|
||||
## Credentials {#credentials}
|
||||
## Учетные данные {#credentials}
|
||||
|
||||
| Parameter | Value |
|
||||
| Параметр | Значение |
|
||||
|:--------------------|:-----------------------------------|
|
||||
| HTTPS endpoint | `https://play.clickhouse.com:443/` |
|
||||
| Native TCP endpoint | `play.clickhouse.com:9440` |
|
||||
| User | `explorer` or `play` |
|
||||
| Password | (empty) |
|
||||
| HTTPS-адрес | `https://play.clickhouse.com:443/` |
|
||||
| TCP-адрес | `play.clickhouse.com:9440` |
|
||||
| Пользователь | `explorer` или `play` |
|
||||
| Пароль | (пусто) |
|
||||
|
||||
## Limitations {#limitations}
|
||||
## Ограничения {#limitations}
|
||||
|
||||
The queries are executed as a read-only user. It implies some limitations:
|
||||
Запросы выполняются от имени пользователя с правами только на чтение. Это предполагает некоторые ограничения:
|
||||
|
||||
- DDL queries are not allowed
|
||||
- INSERT queries are not allowed
|
||||
- DDL-запросы не разрешены
|
||||
- INSERT-запросы не разрешены
|
||||
|
||||
The service also have quotas on its usage.
|
||||
Сервис также имеет квоты на использование.
|
||||
|
||||
## Examples {#examples}
|
||||
## Примеры {#examples}
|
||||
|
||||
HTTPS endpoint example with `curl`:
|
||||
Пример использования HTTPS-адреса с `curl`:
|
||||
|
||||
``` bash
|
||||
```bash
|
||||
curl "https://play.clickhouse.com/?user=explorer" --data-binary "SELECT 'Play ClickHouse'"
|
||||
```
|
||||
|
||||
TCP endpoint example with [CLI](../interfaces/cli.md):
|
||||
Пример использования TCP-адреса с [CLI](../interfaces/cli.md):
|
||||
|
||||
``` bash
|
||||
clickhouse client --secure --host play.clickhouse.com --user explorer
|
||||
|
@ -68,6 +68,41 @@ QueryTreeNodePtr findEqualsFunction(const QueryTreeNodes & nodes)
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
/// Checks if the node is combination of isNull and notEquals functions of two the same arguments
|
||||
bool matchIsNullOfTwoArgs(const QueryTreeNodes & nodes, QueryTreeNodePtr & lhs, QueryTreeNodePtr & rhs)
|
||||
{
|
||||
QueryTreeNodePtrWithHashSet all_arguments;
|
||||
for (const auto & node : nodes)
|
||||
{
|
||||
const auto * func_node = node->as<FunctionNode>();
|
||||
if (!func_node)
|
||||
return false;
|
||||
|
||||
const auto & arguments = func_node->getArguments().getNodes();
|
||||
if (func_node->getFunctionName() == "isNull" && arguments.size() == 1)
|
||||
all_arguments.insert(QueryTreeNodePtrWithHash(arguments[0]));
|
||||
else if (func_node->getFunctionName() == "notEquals" && arguments.size() == 2)
|
||||
{
|
||||
if (arguments[0]->isEqual(*arguments[1]))
|
||||
return false;
|
||||
all_arguments.insert(QueryTreeNodePtrWithHash(arguments[0]));
|
||||
all_arguments.insert(QueryTreeNodePtrWithHash(arguments[1]));
|
||||
}
|
||||
else
|
||||
return false;
|
||||
|
||||
if (all_arguments.size() > 2)
|
||||
return false;
|
||||
}
|
||||
|
||||
if (all_arguments.size() != 2)
|
||||
return false;
|
||||
|
||||
lhs = all_arguments.begin()->node;
|
||||
rhs = std::next(all_arguments.begin())->node;
|
||||
return true;
|
||||
}
|
||||
|
||||
bool isBooleanConstant(const QueryTreeNodePtr & node, bool expected_value)
|
||||
{
|
||||
const auto * constant_node = node->as<ConstantNode>();
|
||||
@ -213,11 +248,14 @@ private:
|
||||
else if (func_name == "and")
|
||||
{
|
||||
const auto & and_arguments = argument_function->getArguments().getNodes();
|
||||
bool all_are_is_null = and_arguments.size() == 2 && isNodeFunction(and_arguments[0], "isNull") && isNodeFunction(and_arguments[1], "isNull");
|
||||
if (all_are_is_null)
|
||||
|
||||
QueryTreeNodePtr is_null_lhs_arg;
|
||||
QueryTreeNodePtr is_null_rhs_arg;
|
||||
if (matchIsNullOfTwoArgs(and_arguments, is_null_lhs_arg, is_null_rhs_arg))
|
||||
{
|
||||
is_null_argument_to_indices[getFunctionArgument(and_arguments.front(), 0)].push_back(or_operands.size() - 1);
|
||||
is_null_argument_to_indices[getFunctionArgument(and_arguments.back(), 0)].push_back(or_operands.size() - 1);
|
||||
is_null_argument_to_indices[is_null_lhs_arg].push_back(or_operands.size() - 1);
|
||||
is_null_argument_to_indices[is_null_rhs_arg].push_back(or_operands.size() - 1);
|
||||
continue;
|
||||
}
|
||||
|
||||
/// Expression `a = b AND (a IS NOT NULL) AND true AND (b IS NOT NULL)` we can be replaced with `a = b`
|
||||
|
@ -62,7 +62,7 @@ namespace ErrorCodes
|
||||
namespace
|
||||
{
|
||||
|
||||
#if defined(ABORT_ON_LOGICAL_ERROR)
|
||||
#if defined(DEBUG_OR_SANITIZER_BUILD)
|
||||
|
||||
/** This visitor checks if Query Tree structure is valid after each pass
|
||||
* in debug build.
|
||||
@ -183,7 +183,7 @@ void QueryTreePassManager::run(QueryTreeNodePtr query_tree_node)
|
||||
for (size_t i = 0; i < passes_size; ++i)
|
||||
{
|
||||
passes[i]->run(query_tree_node, current_context);
|
||||
#if defined(ABORT_ON_LOGICAL_ERROR)
|
||||
#if defined(DEBUG_OR_SANITIZER_BUILD)
|
||||
ValidationChecker(passes[i]->getName()).visit(query_tree_node);
|
||||
#endif
|
||||
}
|
||||
@ -208,7 +208,7 @@ void QueryTreePassManager::run(QueryTreeNodePtr query_tree_node, size_t up_to_pa
|
||||
for (size_t i = 0; i < up_to_pass_index; ++i)
|
||||
{
|
||||
passes[i]->run(query_tree_node, current_context);
|
||||
#if defined(ABORT_ON_LOGICAL_ERROR)
|
||||
#if defined(DEBUG_OR_SANITIZER_BUILD)
|
||||
ValidationChecker(passes[i]->getName()).visit(query_tree_node);
|
||||
#endif
|
||||
}
|
||||
|
@ -4124,7 +4124,9 @@ void QueryAnalyzer::resolveInterpolateColumnsNodeList(QueryTreeNodePtr & interpo
|
||||
|
||||
auto * column_to_interpolate = interpolate_node_typed.getExpression()->as<IdentifierNode>();
|
||||
if (!column_to_interpolate)
|
||||
throw Exception(ErrorCodes::LOGICAL_ERROR, "INTERPOLATE can work only for indentifiers, but {} is found",
|
||||
throw Exception(
|
||||
ErrorCodes::LOGICAL_ERROR,
|
||||
"INTERPOLATE can work only for identifiers, but {} is found",
|
||||
interpolate_node_typed.getExpression()->formatASTForErrorMessage());
|
||||
auto column_to_interpolate_name = column_to_interpolate->getIdentifier().getFullName();
|
||||
|
||||
|
@ -105,7 +105,7 @@ bool compareRestoredTableDef(const IAST & restored_table_create_query, const IAS
|
||||
auto new_query = query.clone();
|
||||
adjustCreateQueryForBackup(new_query, global_context);
|
||||
ASTCreateQuery & create = typeid_cast<ASTCreateQuery &>(*new_query);
|
||||
create.setUUID({});
|
||||
create.resetUUIDs();
|
||||
create.if_not_exists = false;
|
||||
return new_query;
|
||||
};
|
||||
|
@ -1,4 +1,5 @@
|
||||
#include <Backups/RestoreCoordinationLocal.h>
|
||||
#include <Parsers/ASTCreateQuery.h>
|
||||
#include <Parsers/formatAST.h>
|
||||
#include <Common/logger_useful.h>
|
||||
|
||||
@ -67,7 +68,7 @@ void RestoreCoordinationLocal::generateUUIDForTable(ASTCreateQuery & create_quer
|
||||
auto it = create_query_uuids.find(query_str);
|
||||
if (it != create_query_uuids.end())
|
||||
{
|
||||
create_query.setUUID(it->second);
|
||||
it->second.copyToQuery(create_query);
|
||||
return true;
|
||||
}
|
||||
return false;
|
||||
@ -79,7 +80,8 @@ void RestoreCoordinationLocal::generateUUIDForTable(ASTCreateQuery & create_quer
|
||||
return;
|
||||
}
|
||||
|
||||
auto new_uuids = create_query.generateRandomUUID(/* always_generate_new_uuid= */ true);
|
||||
CreateQueryUUIDs new_uuids{create_query, /* generate_random= */ true, /* force_random= */ true};
|
||||
new_uuids.copyToQuery(create_query);
|
||||
|
||||
{
|
||||
std::lock_guard lock{mutex};
|
||||
|
@ -1,16 +1,17 @@
|
||||
#pragma once
|
||||
|
||||
#include <Backups/IRestoreCoordination.h>
|
||||
#include <Parsers/ASTCreateQuery.h>
|
||||
#include <Parsers/CreateQueryUUIDs.h>
|
||||
#include <Common/Logger.h>
|
||||
#include <mutex>
|
||||
#include <set>
|
||||
#include <unordered_set>
|
||||
|
||||
namespace Poco { class Logger; }
|
||||
|
||||
|
||||
namespace DB
|
||||
{
|
||||
class ASTCreateQuery;
|
||||
|
||||
|
||||
/// Implementation of the IRestoreCoordination interface performing coordination in memory.
|
||||
class RestoreCoordinationLocal : public IRestoreCoordination
|
||||
@ -55,7 +56,7 @@ private:
|
||||
|
||||
std::set<std::pair<String /* database_zk_path */, String /* table_name */>> acquired_tables_in_replicated_databases;
|
||||
std::unordered_set<String /* table_zk_path */> acquired_data_in_replicated_tables;
|
||||
std::unordered_map<String, ASTCreateQuery::UUIDs> create_query_uuids;
|
||||
std::unordered_map<String, CreateQueryUUIDs> create_query_uuids;
|
||||
std::unordered_set<String /* root_zk_path */> acquired_data_in_keeper_map_tables;
|
||||
|
||||
mutable std::mutex mutex;
|
||||
|
@ -3,6 +3,7 @@
|
||||
#include <Backups/RestoreCoordinationRemote.h>
|
||||
#include <Backups/BackupCoordinationStageSync.h>
|
||||
#include <Parsers/ASTCreateQuery.h>
|
||||
#include <Parsers/CreateQueryUUIDs.h>
|
||||
#include <Parsers/formatAST.h>
|
||||
#include <Functions/UserDefined/UserDefinedSQLObjectType.h>
|
||||
#include <Common/ZooKeeper/KeeperException.h>
|
||||
@ -269,7 +270,8 @@ bool RestoreCoordinationRemote::acquireInsertingDataForKeeperMap(const String &
|
||||
void RestoreCoordinationRemote::generateUUIDForTable(ASTCreateQuery & create_query)
|
||||
{
|
||||
String query_str = serializeAST(create_query);
|
||||
String new_uuids_str = create_query.generateRandomUUID(/* always_generate_new_uuid= */ true).toString();
|
||||
CreateQueryUUIDs new_uuids{create_query, /* generate_random= */ true, /* force_random= */ true};
|
||||
String new_uuids_str = new_uuids.toString();
|
||||
|
||||
auto holder = with_retries.createRetriesControlHolder("generateUUIDForTable");
|
||||
holder.retries_ctl.retryLoop(
|
||||
@ -281,11 +283,14 @@ void RestoreCoordinationRemote::generateUUIDForTable(ASTCreateQuery & create_que
|
||||
Coordination::Error res = zk->tryCreate(path, new_uuids_str, zkutil::CreateMode::Persistent);
|
||||
|
||||
if (res == Coordination::Error::ZOK)
|
||||
{
|
||||
new_uuids.copyToQuery(create_query);
|
||||
return;
|
||||
}
|
||||
|
||||
if (res == Coordination::Error::ZNODEEXISTS)
|
||||
{
|
||||
create_query.setUUID(ASTCreateQuery::UUIDs::fromString(zk->get(path)));
|
||||
CreateQueryUUIDs::fromString(zk->get(path)).copyToQuery(create_query);
|
||||
return;
|
||||
}
|
||||
|
||||
|
@ -267,7 +267,7 @@ bool ColumnAggregateFunction::structureEquals(const IColumn & to) const
|
||||
}
|
||||
|
||||
|
||||
#if !defined(ABORT_ON_LOGICAL_ERROR)
|
||||
#if !defined(DEBUG_OR_SANITIZER_BUILD)
|
||||
void ColumnAggregateFunction::insertRangeFrom(const IColumn & from, size_t start, size_t length)
|
||||
#else
|
||||
void ColumnAggregateFunction::doInsertRangeFrom(const IColumn & from, size_t start, size_t length)
|
||||
@ -465,7 +465,7 @@ void ColumnAggregateFunction::insertFromWithOwnership(const IColumn & from, size
|
||||
insertMergeFrom(from, n);
|
||||
}
|
||||
|
||||
#if !defined(ABORT_ON_LOGICAL_ERROR)
|
||||
#if !defined(DEBUG_OR_SANITIZER_BUILD)
|
||||
void ColumnAggregateFunction::insertFrom(const IColumn & from, size_t n)
|
||||
#else
|
||||
void ColumnAggregateFunction::doInsertFrom(const IColumn & from, size_t n)
|
||||
|
@ -145,7 +145,7 @@ public:
|
||||
|
||||
void insertData(const char * pos, size_t length) override;
|
||||
|
||||
#if !defined(ABORT_ON_LOGICAL_ERROR)
|
||||
#if !defined(DEBUG_OR_SANITIZER_BUILD)
|
||||
void insertFrom(const IColumn & from, size_t n) override;
|
||||
#else
|
||||
using IColumn::insertFrom;
|
||||
@ -189,7 +189,7 @@ public:
|
||||
|
||||
void protect() override;
|
||||
|
||||
#if !defined(ABORT_ON_LOGICAL_ERROR)
|
||||
#if !defined(DEBUG_OR_SANITIZER_BUILD)
|
||||
void insertRangeFrom(const IColumn & from, size_t start, size_t length) override;
|
||||
#else
|
||||
void doInsertRangeFrom(const IColumn & from, size_t start, size_t length) override;
|
||||
@ -212,7 +212,7 @@ public:
|
||||
|
||||
MutableColumns scatter(ColumnIndex num_columns, const Selector & selector) const override;
|
||||
|
||||
#if !defined(ABORT_ON_LOGICAL_ERROR)
|
||||
#if !defined(DEBUG_OR_SANITIZER_BUILD)
|
||||
int compareAt(size_t, size_t, const IColumn &, int) const override
|
||||
#else
|
||||
int doCompareAt(size_t, size_t, const IColumn &, int) const override
|
||||
|
@ -336,7 +336,7 @@ bool ColumnArray::tryInsert(const Field & x)
|
||||
return true;
|
||||
}
|
||||
|
||||
#if !defined(ABORT_ON_LOGICAL_ERROR)
|
||||
#if !defined(DEBUG_OR_SANITIZER_BUILD)
|
||||
void ColumnArray::insertFrom(const IColumn & src_, size_t n)
|
||||
#else
|
||||
void ColumnArray::doInsertFrom(const IColumn & src_, size_t n)
|
||||
@ -395,7 +395,7 @@ int ColumnArray::compareAtImpl(size_t n, size_t m, const IColumn & rhs_, int nan
|
||||
: 1);
|
||||
}
|
||||
|
||||
#if !defined(ABORT_ON_LOGICAL_ERROR)
|
||||
#if !defined(DEBUG_OR_SANITIZER_BUILD)
|
||||
int ColumnArray::compareAt(size_t n, size_t m, const IColumn & rhs_, int nan_direction_hint) const
|
||||
#else
|
||||
int ColumnArray::doCompareAt(size_t n, size_t m, const IColumn & rhs_, int nan_direction_hint) const
|
||||
@ -542,7 +542,7 @@ void ColumnArray::getExtremes(Field & min, Field & max) const
|
||||
}
|
||||
|
||||
|
||||
#if !defined(ABORT_ON_LOGICAL_ERROR)
|
||||
#if !defined(DEBUG_OR_SANITIZER_BUILD)
|
||||
void ColumnArray::insertRangeFrom(const IColumn & src, size_t start, size_t length)
|
||||
#else
|
||||
void ColumnArray::doInsertRangeFrom(const IColumn & src, size_t start, size_t length)
|
||||
|
@ -84,14 +84,14 @@ public:
|
||||
void updateHashWithValue(size_t n, SipHash & hash) const override;
|
||||
WeakHash32 getWeakHash32() const override;
|
||||
void updateHashFast(SipHash & hash) const override;
|
||||
#if !defined(ABORT_ON_LOGICAL_ERROR)
|
||||
#if !defined(DEBUG_OR_SANITIZER_BUILD)
|
||||
void insertRangeFrom(const IColumn & src, size_t start, size_t length) override;
|
||||
#else
|
||||
void doInsertRangeFrom(const IColumn & src, size_t start, size_t length) override;
|
||||
#endif
|
||||
void insert(const Field & x) override;
|
||||
bool tryInsert(const Field & x) override;
|
||||
#if !defined(ABORT_ON_LOGICAL_ERROR)
|
||||
#if !defined(DEBUG_OR_SANITIZER_BUILD)
|
||||
void insertFrom(const IColumn & src_, size_t n) override;
|
||||
#else
|
||||
void doInsertFrom(const IColumn & src_, size_t n) override;
|
||||
@ -103,7 +103,7 @@ public:
|
||||
ColumnPtr permute(const Permutation & perm, size_t limit) const override;
|
||||
ColumnPtr index(const IColumn & indexes, size_t limit) const override;
|
||||
template <typename Type> ColumnPtr indexImpl(const PaddedPODArray<Type> & indexes, size_t limit) const;
|
||||
#if !defined(ABORT_ON_LOGICAL_ERROR)
|
||||
#if !defined(DEBUG_OR_SANITIZER_BUILD)
|
||||
int compareAt(size_t n, size_t m, const IColumn & rhs_, int nan_direction_hint) const override;
|
||||
#else
|
||||
int doCompareAt(size_t n, size_t m, const IColumn & rhs_, int nan_direction_hint) const override;
|
||||
|
@ -86,7 +86,7 @@ public:
|
||||
bool isDefaultAt(size_t) const override { throwMustBeDecompressed(); }
|
||||
void insert(const Field &) override { throwMustBeDecompressed(); }
|
||||
bool tryInsert(const Field &) override { throwMustBeDecompressed(); }
|
||||
#if !defined(ABORT_ON_LOGICAL_ERROR)
|
||||
#if !defined(DEBUG_OR_SANITIZER_BUILD)
|
||||
void insertRangeFrom(const IColumn &, size_t, size_t) override { throwMustBeDecompressed(); }
|
||||
#else
|
||||
void doInsertRangeFrom(const IColumn &, size_t, size_t) override { throwMustBeDecompressed(); }
|
||||
@ -105,7 +105,7 @@ public:
|
||||
void expand(const Filter &, bool) override { throwMustBeDecompressed(); }
|
||||
ColumnPtr permute(const Permutation &, size_t) const override { throwMustBeDecompressed(); }
|
||||
ColumnPtr index(const IColumn &, size_t) const override { throwMustBeDecompressed(); }
|
||||
#if !defined(ABORT_ON_LOGICAL_ERROR)
|
||||
#if !defined(DEBUG_OR_SANITIZER_BUILD)
|
||||
int compareAt(size_t, size_t, const IColumn &, int) const override { throwMustBeDecompressed(); }
|
||||
#else
|
||||
int doCompareAt(size_t, size_t, const IColumn &, int) const override { throwMustBeDecompressed(); }
|
||||
|
@ -123,7 +123,7 @@ public:
|
||||
return data->isNullAt(0);
|
||||
}
|
||||
|
||||
#if !defined(ABORT_ON_LOGICAL_ERROR)
|
||||
#if !defined(DEBUG_OR_SANITIZER_BUILD)
|
||||
void insertRangeFrom(const IColumn &, size_t /*start*/, size_t length) override
|
||||
#else
|
||||
void doInsertRangeFrom(const IColumn &, size_t /*start*/, size_t length) override
|
||||
@ -151,7 +151,7 @@ public:
|
||||
++s;
|
||||
}
|
||||
|
||||
#if !defined(ABORT_ON_LOGICAL_ERROR)
|
||||
#if !defined(DEBUG_OR_SANITIZER_BUILD)
|
||||
void insertFrom(const IColumn &, size_t) override
|
||||
#else
|
||||
void doInsertFrom(const IColumn &, size_t) override
|
||||
@ -160,7 +160,7 @@ public:
|
||||
++s;
|
||||
}
|
||||
|
||||
#if !defined(ABORT_ON_LOGICAL_ERROR)
|
||||
#if !defined(DEBUG_OR_SANITIZER_BUILD)
|
||||
void insertManyFrom(const IColumn & /*src*/, size_t /* position */, size_t length) override { s += length; }
|
||||
#else
|
||||
void doInsertManyFrom(const IColumn & /*src*/, size_t /* position */, size_t length) override { s += length; }
|
||||
@ -237,7 +237,7 @@ public:
|
||||
return data->allocatedBytes() + sizeof(s);
|
||||
}
|
||||
|
||||
#if !defined(ABORT_ON_LOGICAL_ERROR)
|
||||
#if !defined(DEBUG_OR_SANITIZER_BUILD)
|
||||
int compareAt(size_t, size_t, const IColumn & rhs, int nan_direction_hint) const override
|
||||
#else
|
||||
int doCompareAt(size_t, size_t, const IColumn & rhs, int nan_direction_hint) const override
|
||||
|
@ -31,7 +31,7 @@ namespace ErrorCodes
|
||||
}
|
||||
|
||||
template <is_decimal T>
|
||||
#if !defined(ABORT_ON_LOGICAL_ERROR)
|
||||
#if !defined(DEBUG_OR_SANITIZER_BUILD)
|
||||
int ColumnDecimal<T>::compareAt(size_t n, size_t m, const IColumn & rhs_, int) const
|
||||
#else
|
||||
int ColumnDecimal<T>::doCompareAt(size_t n, size_t m, const IColumn & rhs_, int) const
|
||||
@ -333,7 +333,7 @@ void ColumnDecimal<T>::insertData(const char * src, size_t /*length*/)
|
||||
}
|
||||
|
||||
template <is_decimal T>
|
||||
#if !defined(ABORT_ON_LOGICAL_ERROR)
|
||||
#if !defined(DEBUG_OR_SANITIZER_BUILD)
|
||||
void ColumnDecimal<T>::insertRangeFrom(const IColumn & src, size_t start, size_t length)
|
||||
#else
|
||||
void ColumnDecimal<T>::doInsertRangeFrom(const IColumn & src, size_t start, size_t length)
|
||||
|
@ -55,13 +55,13 @@ public:
|
||||
void reserve(size_t n) override { data.reserve_exact(n); }
|
||||
void shrinkToFit() override { data.shrink_to_fit(); }
|
||||
|
||||
#if !defined(ABORT_ON_LOGICAL_ERROR)
|
||||
#if !defined(DEBUG_OR_SANITIZER_BUILD)
|
||||
void insertFrom(const IColumn & src, size_t n) override { data.push_back(static_cast<const Self &>(src).getData()[n]); }
|
||||
#else
|
||||
void doInsertFrom(const IColumn & src, size_t n) override { data.push_back(static_cast<const Self &>(src).getData()[n]); }
|
||||
#endif
|
||||
|
||||
#if !defined(ABORT_ON_LOGICAL_ERROR)
|
||||
#if !defined(DEBUG_OR_SANITIZER_BUILD)
|
||||
void insertManyFrom(const IColumn & src, size_t position, size_t length) override
|
||||
#else
|
||||
void doInsertManyFrom(const IColumn & src, size_t position, size_t length) override
|
||||
@ -76,7 +76,7 @@ public:
|
||||
void insertManyDefaults(size_t length) override { data.resize_fill(data.size() + length); }
|
||||
void insert(const Field & x) override { data.push_back(x.get<T>()); }
|
||||
bool tryInsert(const Field & x) override;
|
||||
#if !defined(ABORT_ON_LOGICAL_ERROR)
|
||||
#if !defined(DEBUG_OR_SANITIZER_BUILD)
|
||||
void insertRangeFrom(const IColumn & src, size_t start, size_t length) override;
|
||||
#else
|
||||
void doInsertRangeFrom(const IColumn & src, size_t start, size_t length) override;
|
||||
@ -104,7 +104,7 @@ public:
|
||||
void updateHashWithValue(size_t n, SipHash & hash) const override;
|
||||
WeakHash32 getWeakHash32() const override;
|
||||
void updateHashFast(SipHash & hash) const override;
|
||||
#if !defined(ABORT_ON_LOGICAL_ERROR)
|
||||
#if !defined(DEBUG_OR_SANITIZER_BUILD)
|
||||
int compareAt(size_t n, size_t m, const IColumn & rhs_, int nan_direction_hint) const override;
|
||||
#else
|
||||
int doCompareAt(size_t n, size_t m, const IColumn & rhs_, int nan_direction_hint) const override;
|
||||
|
@ -215,7 +215,7 @@ bool ColumnDynamic::tryInsert(const DB::Field & x)
|
||||
}
|
||||
|
||||
|
||||
#if !defined(ABORT_ON_LOGICAL_ERROR)
|
||||
#if !defined(DEBUG_OR_SANITIZER_BUILD)
|
||||
void ColumnDynamic::insertFrom(const DB::IColumn & src_, size_t n)
|
||||
#else
|
||||
void ColumnDynamic::doInsertFrom(const DB::IColumn & src_, size_t n)
|
||||
@ -269,7 +269,7 @@ void ColumnDynamic::doInsertFrom(const DB::IColumn & src_, size_t n)
|
||||
variant_col.insertIntoVariantFrom(string_variant_discr, *tmp_string_column, 0);
|
||||
}
|
||||
|
||||
#if !defined(ABORT_ON_LOGICAL_ERROR)
|
||||
#if !defined(DEBUG_OR_SANITIZER_BUILD)
|
||||
void ColumnDynamic::insertRangeFrom(const DB::IColumn & src_, size_t start, size_t length)
|
||||
#else
|
||||
void ColumnDynamic::doInsertRangeFrom(const DB::IColumn & src_, size_t start, size_t length)
|
||||
@ -439,7 +439,7 @@ void ColumnDynamic::doInsertRangeFrom(const DB::IColumn & src_, size_t start, si
|
||||
}
|
||||
}
|
||||
|
||||
#if !defined(ABORT_ON_LOGICAL_ERROR)
|
||||
#if !defined(DEBUG_OR_SANITIZER_BUILD)
|
||||
void ColumnDynamic::insertManyFrom(const DB::IColumn & src_, size_t position, size_t length)
|
||||
#else
|
||||
void ColumnDynamic::doInsertManyFrom(const DB::IColumn & src_, size_t position, size_t length)
|
||||
@ -603,7 +603,7 @@ void ColumnDynamic::updateHashWithValue(size_t n, SipHash & hash) const
|
||||
variant_col.getVariantByGlobalDiscriminator(discr).updateHashWithValue(variant_col.offsetAt(n), hash);
|
||||
}
|
||||
|
||||
#if !defined(ABORT_ON_LOGICAL_ERROR)
|
||||
#if !defined(DEBUG_OR_SANITIZER_BUILD)
|
||||
int ColumnDynamic::compareAt(size_t n, size_t m, const DB::IColumn & rhs, int nan_direction_hint) const
|
||||
#else
|
||||
int ColumnDynamic::doCompareAt(size_t n, size_t m, const DB::IColumn & rhs, int nan_direction_hint) const
|
||||
|
@ -144,7 +144,7 @@ public:
|
||||
void insert(const Field & x) override;
|
||||
bool tryInsert(const Field & x) override;
|
||||
|
||||
#if !defined(ABORT_ON_LOGICAL_ERROR)
|
||||
#if !defined(DEBUG_OR_SANITIZER_BUILD)
|
||||
void insertFrom(const IColumn & src_, size_t n) override;
|
||||
void insertRangeFrom(const IColumn & src, size_t start, size_t length) override;
|
||||
void insertManyFrom(const IColumn & src, size_t position, size_t length) override;
|
||||
@ -221,7 +221,7 @@ public:
|
||||
return scattered_columns;
|
||||
}
|
||||
|
||||
#if !defined(ABORT_ON_LOGICAL_ERROR)
|
||||
#if !defined(DEBUG_OR_SANITIZER_BUILD)
|
||||
int compareAt(size_t n, size_t m, const IColumn & rhs, int nan_direction_hint) const override;
|
||||
#else
|
||||
int doCompareAt(size_t n, size_t m, const IColumn & rhs, int nan_direction_hint) const override;
|
||||
|
@ -74,7 +74,7 @@ bool ColumnFixedString::tryInsert(const Field & x)
|
||||
return true;
|
||||
}
|
||||
|
||||
#if !defined(ABORT_ON_LOGICAL_ERROR)
|
||||
#if !defined(DEBUG_OR_SANITIZER_BUILD)
|
||||
void ColumnFixedString::insertFrom(const IColumn & src_, size_t index)
|
||||
#else
|
||||
void ColumnFixedString::doInsertFrom(const IColumn & src_, size_t index)
|
||||
@ -90,7 +90,7 @@ void ColumnFixedString::doInsertFrom(const IColumn & src_, size_t index)
|
||||
memcpySmallAllowReadWriteOverflow15(chars.data() + old_size, &src.chars[n * index], n);
|
||||
}
|
||||
|
||||
#if !defined(ABORT_ON_LOGICAL_ERROR)
|
||||
#if !defined(DEBUG_OR_SANITIZER_BUILD)
|
||||
void ColumnFixedString::insertManyFrom(const IColumn & src, size_t position, size_t length)
|
||||
#else
|
||||
void ColumnFixedString::doInsertManyFrom(const IColumn & src, size_t position, size_t length)
|
||||
@ -225,7 +225,7 @@ size_t ColumnFixedString::estimateCardinalityInPermutedRange(const Permutation &
|
||||
return elements.size();
|
||||
}
|
||||
|
||||
#if !defined(ABORT_ON_LOGICAL_ERROR)
|
||||
#if !defined(DEBUG_OR_SANITIZER_BUILD)
|
||||
void ColumnFixedString::insertRangeFrom(const IColumn & src, size_t start, size_t length)
|
||||
#else
|
||||
void ColumnFixedString::doInsertRangeFrom(const IColumn & src, size_t start, size_t length)
|
||||
|
@ -98,13 +98,13 @@ public:
|
||||
|
||||
bool tryInsert(const Field & x) override;
|
||||
|
||||
#if !defined(ABORT_ON_LOGICAL_ERROR)
|
||||
#if !defined(DEBUG_OR_SANITIZER_BUILD)
|
||||
void insertFrom(const IColumn & src_, size_t index) override;
|
||||
#else
|
||||
void doInsertFrom(const IColumn & src_, size_t index) override;
|
||||
#endif
|
||||
|
||||
#if !defined(ABORT_ON_LOGICAL_ERROR)
|
||||
#if !defined(DEBUG_OR_SANITIZER_BUILD)
|
||||
void insertManyFrom(const IColumn & src, size_t position, size_t length) override;
|
||||
#else
|
||||
void doInsertManyFrom(const IColumn & src, size_t position, size_t length) override;
|
||||
@ -137,7 +137,7 @@ public:
|
||||
|
||||
void updateHashFast(SipHash & hash) const override;
|
||||
|
||||
#if !defined(ABORT_ON_LOGICAL_ERROR)
|
||||
#if !defined(DEBUG_OR_SANITIZER_BUILD)
|
||||
int compareAt(size_t p1, size_t p2, const IColumn & rhs_, int /*nan_direction_hint*/) const override
|
||||
#else
|
||||
int doCompareAt(size_t p1, size_t p2, const IColumn & rhs_, int /*nan_direction_hint*/) const override
|
||||
@ -156,7 +156,7 @@ public:
|
||||
|
||||
size_t estimateCardinalityInPermutedRange(const Permutation & permutation, const EqualRange & equal_range) const override;
|
||||
|
||||
#if !defined(ABORT_ON_LOGICAL_ERROR)
|
||||
#if !defined(DEBUG_OR_SANITIZER_BUILD)
|
||||
void insertRangeFrom(const IColumn & src, size_t start, size_t length) override;
|
||||
#else
|
||||
void doInsertRangeFrom(const IColumn & src, size_t start, size_t length) override;
|
||||
|
@ -72,7 +72,7 @@ ColumnPtr ColumnFunction::cut(size_t start, size_t length) const
|
||||
return ColumnFunction::create(length, function, capture, is_short_circuit_argument, is_function_compiled);
|
||||
}
|
||||
|
||||
#if !defined(ABORT_ON_LOGICAL_ERROR)
|
||||
#if !defined(DEBUG_OR_SANITIZER_BUILD)
|
||||
void ColumnFunction::insertFrom(const IColumn & src, size_t n)
|
||||
#else
|
||||
void ColumnFunction::doInsertFrom(const IColumn & src, size_t n)
|
||||
@ -93,7 +93,7 @@ void ColumnFunction::doInsertFrom(const IColumn & src, size_t n)
|
||||
++elements_size;
|
||||
}
|
||||
|
||||
#if !defined(ABORT_ON_LOGICAL_ERROR)
|
||||
#if !defined(DEBUG_OR_SANITIZER_BUILD)
|
||||
void ColumnFunction::insertRangeFrom(const IColumn & src, size_t start, size_t length)
|
||||
#else
|
||||
void ColumnFunction::doInsertRangeFrom(const IColumn & src, size_t start, size_t length)
|
||||
|
@ -95,12 +95,12 @@ public:
|
||||
throw Exception(ErrorCodes::NOT_IMPLEMENTED, "Cannot insert into {}", getName());
|
||||
}
|
||||
|
||||
#if !defined(ABORT_ON_LOGICAL_ERROR)
|
||||
#if !defined(DEBUG_OR_SANITIZER_BUILD)
|
||||
void insertFrom(const IColumn & src, size_t n) override;
|
||||
#else
|
||||
void doInsertFrom(const IColumn & src, size_t n) override;
|
||||
#endif
|
||||
#if !defined(ABORT_ON_LOGICAL_ERROR)
|
||||
#if !defined(DEBUG_OR_SANITIZER_BUILD)
|
||||
void insertRangeFrom(const IColumn &, size_t start, size_t length) override;
|
||||
#else
|
||||
void doInsertRangeFrom(const IColumn &, size_t start, size_t length) override;
|
||||
@ -146,7 +146,7 @@ public:
|
||||
throw Exception(ErrorCodes::NOT_IMPLEMENTED, "popBack is not implemented for {}", getName());
|
||||
}
|
||||
|
||||
#if !defined(ABORT_ON_LOGICAL_ERROR)
|
||||
#if !defined(DEBUG_OR_SANITIZER_BUILD)
|
||||
int compareAt(size_t, size_t, const IColumn &, int) const override
|
||||
#else
|
||||
int doCompareAt(size_t, size_t, const IColumn &, int) const override
|
||||
|
@ -158,7 +158,7 @@ void ColumnLowCardinality::insertDefault()
|
||||
idx.insertPosition(getDictionary().getDefaultValueIndex());
|
||||
}
|
||||
|
||||
#if !defined(ABORT_ON_LOGICAL_ERROR)
|
||||
#if !defined(DEBUG_OR_SANITIZER_BUILD)
|
||||
void ColumnLowCardinality::insertFrom(const IColumn & src, size_t n)
|
||||
#else
|
||||
void ColumnLowCardinality::doInsertFrom(const IColumn & src, size_t n)
|
||||
@ -190,7 +190,7 @@ void ColumnLowCardinality::insertFromFullColumn(const IColumn & src, size_t n)
|
||||
idx.insertPosition(getDictionary().uniqueInsertFrom(src, n));
|
||||
}
|
||||
|
||||
#if !defined(ABORT_ON_LOGICAL_ERROR)
|
||||
#if !defined(DEBUG_OR_SANITIZER_BUILD)
|
||||
void ColumnLowCardinality::insertRangeFrom(const IColumn & src, size_t start, size_t length)
|
||||
#else
|
||||
void ColumnLowCardinality::doInsertRangeFrom(const IColumn & src, size_t start, size_t length)
|
||||
@ -362,7 +362,7 @@ int ColumnLowCardinality::compareAtImpl(size_t n, size_t m, const IColumn & rhs,
|
||||
return getDictionary().compareAt(n_index, m_index, low_cardinality_column.getDictionary(), nan_direction_hint);
|
||||
}
|
||||
|
||||
#if !defined(ABORT_ON_LOGICAL_ERROR)
|
||||
#if !defined(DEBUG_OR_SANITIZER_BUILD)
|
||||
int ColumnLowCardinality::compareAt(size_t n, size_t m, const IColumn & rhs, int nan_direction_hint) const
|
||||
#else
|
||||
int ColumnLowCardinality::doCompareAt(size_t n, size_t m, const IColumn & rhs, int nan_direction_hint) const
|
||||
|
@ -78,14 +78,14 @@ public:
|
||||
bool tryInsert(const Field & x) override;
|
||||
void insertDefault() override;
|
||||
|
||||
#if !defined(ABORT_ON_LOGICAL_ERROR)
|
||||
#if !defined(DEBUG_OR_SANITIZER_BUILD)
|
||||
void insertFrom(const IColumn & src, size_t n) override;
|
||||
#else
|
||||
void doInsertFrom(const IColumn & src, size_t n) override;
|
||||
#endif
|
||||
void insertFromFullColumn(const IColumn & src, size_t n);
|
||||
|
||||
#if !defined(ABORT_ON_LOGICAL_ERROR)
|
||||
#if !defined(DEBUG_OR_SANITIZER_BUILD)
|
||||
void insertRangeFrom(const IColumn & src, size_t start, size_t length) override;
|
||||
#else
|
||||
void doInsertRangeFrom(const IColumn & src, size_t start, size_t length) override;
|
||||
@ -135,7 +135,7 @@ public:
|
||||
return ColumnLowCardinality::create(dictionary.getColumnUniquePtr(), getIndexes().index(indexes_, limit));
|
||||
}
|
||||
|
||||
#if !defined(ABORT_ON_LOGICAL_ERROR)
|
||||
#if !defined(DEBUG_OR_SANITIZER_BUILD)
|
||||
int compareAt(size_t n, size_t m, const IColumn & rhs, int nan_direction_hint) const override;
|
||||
#else
|
||||
int doCompareAt(size_t n, size_t m, const IColumn & rhs, int nan_direction_hint) const override;
|
||||
|
@ -153,7 +153,7 @@ void ColumnMap::updateHashFast(SipHash & hash) const
|
||||
nested->updateHashFast(hash);
|
||||
}
|
||||
|
||||
#if !defined(ABORT_ON_LOGICAL_ERROR)
|
||||
#if !defined(DEBUG_OR_SANITIZER_BUILD)
|
||||
void ColumnMap::insertFrom(const IColumn & src, size_t n)
|
||||
#else
|
||||
void ColumnMap::doInsertFrom(const IColumn & src, size_t n)
|
||||
@ -162,7 +162,7 @@ void ColumnMap::doInsertFrom(const IColumn & src, size_t n)
|
||||
nested->insertFrom(assert_cast<const ColumnMap &>(src).getNestedColumn(), n);
|
||||
}
|
||||
|
||||
#if !defined(ABORT_ON_LOGICAL_ERROR)
|
||||
#if !defined(DEBUG_OR_SANITIZER_BUILD)
|
||||
void ColumnMap::insertManyFrom(const IColumn & src, size_t position, size_t length)
|
||||
#else
|
||||
void ColumnMap::doInsertManyFrom(const IColumn & src, size_t position, size_t length)
|
||||
@ -171,7 +171,7 @@ void ColumnMap::doInsertManyFrom(const IColumn & src, size_t position, size_t le
|
||||
assert_cast<ColumnArray &>(*nested).insertManyFrom(assert_cast<const ColumnMap &>(src).getNestedColumn(), position, length);
|
||||
}
|
||||
|
||||
#if !defined(ABORT_ON_LOGICAL_ERROR)
|
||||
#if !defined(DEBUG_OR_SANITIZER_BUILD)
|
||||
void ColumnMap::insertRangeFrom(const IColumn & src, size_t start, size_t length)
|
||||
#else
|
||||
void ColumnMap::doInsertRangeFrom(const IColumn & src, size_t start, size_t length)
|
||||
@ -222,7 +222,7 @@ MutableColumns ColumnMap::scatter(ColumnIndex num_columns, const Selector & sele
|
||||
return res;
|
||||
}
|
||||
|
||||
#if !defined(ABORT_ON_LOGICAL_ERROR)
|
||||
#if !defined(DEBUG_OR_SANITIZER_BUILD)
|
||||
int ColumnMap::compareAt(size_t n, size_t m, const IColumn & rhs, int nan_direction_hint) const
|
||||
#else
|
||||
int ColumnMap::doCompareAt(size_t n, size_t m, const IColumn & rhs, int nan_direction_hint) const
|
||||
|
@ -67,7 +67,7 @@ public:
|
||||
WeakHash32 getWeakHash32() const override;
|
||||
void updateHashFast(SipHash & hash) const override;
|
||||
|
||||
#if !defined(ABORT_ON_LOGICAL_ERROR)
|
||||
#if !defined(DEBUG_OR_SANITIZER_BUILD)
|
||||
void insertFrom(const IColumn & src_, size_t n) override;
|
||||
void insertManyFrom(const IColumn & src, size_t position, size_t length) override;
|
||||
void insertRangeFrom(const IColumn & src, size_t start, size_t length) override;
|
||||
@ -83,7 +83,7 @@ public:
|
||||
ColumnPtr index(const IColumn & indexes, size_t limit) const override;
|
||||
ColumnPtr replicate(const Offsets & offsets) const override;
|
||||
MutableColumns scatter(ColumnIndex num_columns, const Selector & selector) const override;
|
||||
#if !defined(ABORT_ON_LOGICAL_ERROR)
|
||||
#if !defined(DEBUG_OR_SANITIZER_BUILD)
|
||||
int compareAt(size_t n, size_t m, const IColumn & rhs, int nan_direction_hint) const override;
|
||||
#else
|
||||
int doCompareAt(size_t n, size_t m, const IColumn & rhs, int nan_direction_hint) const override;
|
||||
|
@ -217,7 +217,7 @@ const char * ColumnNullable::skipSerializedInArena(const char * pos) const
|
||||
return pos;
|
||||
}
|
||||
|
||||
#if !defined(ABORT_ON_LOGICAL_ERROR)
|
||||
#if !defined(DEBUG_OR_SANITIZER_BUILD)
|
||||
void ColumnNullable::insertRangeFrom(const IColumn & src, size_t start, size_t length)
|
||||
#else
|
||||
void ColumnNullable::doInsertRangeFrom(const IColumn & src, size_t start, size_t length)
|
||||
@ -258,7 +258,7 @@ bool ColumnNullable::tryInsert(const Field & x)
|
||||
return true;
|
||||
}
|
||||
|
||||
#if !defined(ABORT_ON_LOGICAL_ERROR)
|
||||
#if !defined(DEBUG_OR_SANITIZER_BUILD)
|
||||
void ColumnNullable::insertFrom(const IColumn & src, size_t n)
|
||||
#else
|
||||
void ColumnNullable::doInsertFrom(const IColumn & src, size_t n)
|
||||
@ -270,7 +270,7 @@ void ColumnNullable::doInsertFrom(const IColumn & src, size_t n)
|
||||
}
|
||||
|
||||
|
||||
#if !defined(ABORT_ON_LOGICAL_ERROR)
|
||||
#if !defined(DEBUG_OR_SANITIZER_BUILD)
|
||||
void ColumnNullable::insertManyFrom(const IColumn & src, size_t position, size_t length)
|
||||
#else
|
||||
void ColumnNullable::doInsertManyFrom(const IColumn & src, size_t position, size_t length)
|
||||
@ -410,7 +410,7 @@ int ColumnNullable::compareAtImpl(size_t n, size_t m, const IColumn & rhs_, int
|
||||
return getNestedColumn().compareAt(n, m, nested_rhs, null_direction_hint);
|
||||
}
|
||||
|
||||
#if !defined(ABORT_ON_LOGICAL_ERROR)
|
||||
#if !defined(DEBUG_OR_SANITIZER_BUILD)
|
||||
int ColumnNullable::compareAt(size_t n, size_t m, const IColumn & rhs_, int null_direction_hint) const
|
||||
#else
|
||||
int ColumnNullable::doCompareAt(size_t n, size_t m, const IColumn & rhs_, int null_direction_hint) const
|
||||
|
@ -69,7 +69,7 @@ public:
|
||||
char * serializeValueIntoMemory(size_t n, char * memory) const override;
|
||||
const char * deserializeAndInsertFromArena(const char * pos) override;
|
||||
const char * skipSerializedInArena(const char * pos) const override;
|
||||
#if !defined(ABORT_ON_LOGICAL_ERROR)
|
||||
#if !defined(DEBUG_OR_SANITIZER_BUILD)
|
||||
void insertRangeFrom(const IColumn & src, size_t start, size_t length) override;
|
||||
#else
|
||||
void doInsertRangeFrom(const IColumn & src, size_t start, size_t length) override;
|
||||
@ -77,7 +77,7 @@ public:
|
||||
void insert(const Field & x) override;
|
||||
bool tryInsert(const Field & x) override;
|
||||
|
||||
#if !defined(ABORT_ON_LOGICAL_ERROR)
|
||||
#if !defined(DEBUG_OR_SANITIZER_BUILD)
|
||||
void insertFrom(const IColumn & src, size_t n) override;
|
||||
void insertManyFrom(const IColumn & src, size_t position, size_t length) override;
|
||||
#else
|
||||
@ -100,7 +100,7 @@ public:
|
||||
void expand(const Filter & mask, bool inverted) override;
|
||||
ColumnPtr permute(const Permutation & perm, size_t limit) const override;
|
||||
ColumnPtr index(const IColumn & indexes, size_t limit) const override;
|
||||
#if !defined(ABORT_ON_LOGICAL_ERROR)
|
||||
#if !defined(DEBUG_OR_SANITIZER_BUILD)
|
||||
int compareAt(size_t n, size_t m, const IColumn & rhs_, int null_direction_hint) const override;
|
||||
#else
|
||||
int doCompareAt(size_t n, size_t m, const IColumn & rhs_, int null_direction_hint) const override;
|
||||
|
@ -763,7 +763,7 @@ void ColumnObject::get(size_t n, Field & res) const
|
||||
}
|
||||
}
|
||||
|
||||
#if !defined(ABORT_ON_LOGICAL_ERROR)
|
||||
#if !defined(DEBUG_OR_SANITIZER_BUILD)
|
||||
void ColumnObject::insertFrom(const IColumn & src, size_t n)
|
||||
#else
|
||||
void ColumnObject::doInsertFrom(const IColumn & src, size_t n)
|
||||
@ -772,7 +772,7 @@ void ColumnObject::doInsertFrom(const IColumn & src, size_t n)
|
||||
insert(src[n]);
|
||||
}
|
||||
|
||||
#if !defined(ABORT_ON_LOGICAL_ERROR)
|
||||
#if !defined(DEBUG_OR_SANITIZER_BUILD)
|
||||
void ColumnObject::insertRangeFrom(const IColumn & src, size_t start, size_t length)
|
||||
#else
|
||||
void ColumnObject::doInsertRangeFrom(const IColumn & src, size_t start, size_t length)
|
||||
|
@ -211,7 +211,7 @@ public:
|
||||
bool tryInsert(const Field & field) override;
|
||||
void insertDefault() override;
|
||||
|
||||
#if !defined(ABORT_ON_LOGICAL_ERROR)
|
||||
#if !defined(DEBUG_OR_SANITIZER_BUILD)
|
||||
void insertFrom(const IColumn & src, size_t n) override;
|
||||
void insertRangeFrom(const IColumn & src, size_t start, size_t length) override;
|
||||
#else
|
||||
@ -236,7 +236,7 @@ public:
|
||||
/// Order of rows in ColumnObject is undefined.
|
||||
void getPermutation(PermutationSortDirection, PermutationSortStability, size_t, int, Permutation & res) const override;
|
||||
void updatePermutation(PermutationSortDirection, PermutationSortStability, size_t, int, Permutation &, EqualRanges &) const override {}
|
||||
#if !defined(ABORT_ON_LOGICAL_ERROR)
|
||||
#if !defined(DEBUG_OR_SANITIZER_BUILD)
|
||||
int compareAt(size_t, size_t, const IColumn &, int) const override { return 0; }
|
||||
#else
|
||||
int doCompareAt(size_t, size_t, const IColumn &, int) const override { return 0; }
|
||||
|
@ -174,7 +174,7 @@ const char * ColumnSparse::skipSerializedInArena(const char * pos) const
|
||||
return values->skipSerializedInArena(pos);
|
||||
}
|
||||
|
||||
#if !defined(ABORT_ON_LOGICAL_ERROR)
|
||||
#if !defined(DEBUG_OR_SANITIZER_BUILD)
|
||||
void ColumnSparse::insertRangeFrom(const IColumn & src, size_t start, size_t length)
|
||||
#else
|
||||
void ColumnSparse::doInsertRangeFrom(const IColumn & src, size_t start, size_t length)
|
||||
@ -252,7 +252,7 @@ bool ColumnSparse::tryInsert(const Field & x)
|
||||
return true;
|
||||
}
|
||||
|
||||
#if !defined(ABORT_ON_LOGICAL_ERROR)
|
||||
#if !defined(DEBUG_OR_SANITIZER_BUILD)
|
||||
void ColumnSparse::insertFrom(const IColumn & src, size_t n)
|
||||
#else
|
||||
void ColumnSparse::doInsertFrom(const IColumn & src, size_t n)
|
||||
@ -454,7 +454,7 @@ ColumnPtr ColumnSparse::indexImpl(const PaddedPODArray<Type> & indexes, size_t l
|
||||
return ColumnSparse::create(std::move(res_values), std::move(res_offsets), limit);
|
||||
}
|
||||
|
||||
#if !defined(ABORT_ON_LOGICAL_ERROR)
|
||||
#if !defined(DEBUG_OR_SANITIZER_BUILD)
|
||||
int ColumnSparse::compareAt(size_t n, size_t m, const IColumn & rhs_, int null_direction_hint) const
|
||||
#else
|
||||
int ColumnSparse::doCompareAt(size_t n, size_t m, const IColumn & rhs_, int null_direction_hint) const
|
||||
|
@ -81,14 +81,14 @@ public:
|
||||
char * serializeValueIntoMemory(size_t n, char * memory) const override;
|
||||
const char * deserializeAndInsertFromArena(const char * pos) override;
|
||||
const char * skipSerializedInArena(const char *) const override;
|
||||
#if !defined(ABORT_ON_LOGICAL_ERROR)
|
||||
#if !defined(DEBUG_OR_SANITIZER_BUILD)
|
||||
void insertRangeFrom(const IColumn & src, size_t start, size_t length) override;
|
||||
#else
|
||||
void doInsertRangeFrom(const IColumn & src, size_t start, size_t length) override;
|
||||
#endif
|
||||
void insert(const Field & x) override;
|
||||
bool tryInsert(const Field & x) override;
|
||||
#if !defined(ABORT_ON_LOGICAL_ERROR)
|
||||
#if !defined(DEBUG_OR_SANITIZER_BUILD)
|
||||
void insertFrom(const IColumn & src, size_t n) override;
|
||||
#else
|
||||
void doInsertFrom(const IColumn & src, size_t n) override;
|
||||
@ -106,7 +106,7 @@ public:
|
||||
template <typename Type>
|
||||
ColumnPtr indexImpl(const PaddedPODArray<Type> & indexes, size_t limit) const;
|
||||
|
||||
#if !defined(ABORT_ON_LOGICAL_ERROR)
|
||||
#if !defined(DEBUG_OR_SANITIZER_BUILD)
|
||||
int compareAt(size_t n, size_t m, const IColumn & rhs_, int null_direction_hint) const override;
|
||||
#else
|
||||
int doCompareAt(size_t n, size_t m, const IColumn & rhs_, int null_direction_hint) const override;
|
||||
|
@ -39,7 +39,7 @@ ColumnString::ColumnString(const ColumnString & src)
|
||||
last_offset, chars.size());
|
||||
}
|
||||
|
||||
#if !defined(ABORT_ON_LOGICAL_ERROR)
|
||||
#if !defined(DEBUG_OR_SANITIZER_BUILD)
|
||||
void ColumnString::insertManyFrom(const IColumn & src, size_t position, size_t length)
|
||||
#else
|
||||
void ColumnString::doInsertManyFrom(const IColumn & src, size_t position, size_t length)
|
||||
@ -132,7 +132,7 @@ WeakHash32 ColumnString::getWeakHash32() const
|
||||
}
|
||||
|
||||
|
||||
#if !defined(ABORT_ON_LOGICAL_ERROR)
|
||||
#if !defined(DEBUG_OR_SANITIZER_BUILD)
|
||||
void ColumnString::insertRangeFrom(const IColumn & src, size_t start, size_t length)
|
||||
#else
|
||||
void ColumnString::doInsertRangeFrom(const IColumn & src, size_t start, size_t length)
|
||||
|
@ -142,7 +142,7 @@ public:
|
||||
return true;
|
||||
}
|
||||
|
||||
#if !defined(ABORT_ON_LOGICAL_ERROR)
|
||||
#if !defined(DEBUG_OR_SANITIZER_BUILD)
|
||||
void insertFrom(const IColumn & src_, size_t n) override
|
||||
#else
|
||||
void doInsertFrom(const IColumn & src_, size_t n) override
|
||||
@ -169,7 +169,7 @@ public:
|
||||
}
|
||||
}
|
||||
|
||||
#if !defined(ABORT_ON_LOGICAL_ERROR)
|
||||
#if !defined(DEBUG_OR_SANITIZER_BUILD)
|
||||
void insertManyFrom(const IColumn & src, size_t position, size_t length) override;
|
||||
#else
|
||||
void doInsertManyFrom(const IColumn & src, size_t position, size_t length) override;
|
||||
@ -220,7 +220,7 @@ public:
|
||||
hash.update(reinterpret_cast<const char *>(chars.data()), chars.size() * sizeof(chars[0]));
|
||||
}
|
||||
|
||||
#if !defined(ABORT_ON_LOGICAL_ERROR)
|
||||
#if !defined(DEBUG_OR_SANITIZER_BUILD)
|
||||
void insertRangeFrom(const IColumn & src, size_t start, size_t length) override;
|
||||
#else
|
||||
void doInsertRangeFrom(const IColumn & src, size_t start, size_t length) override;
|
||||
@ -250,7 +250,7 @@ public:
|
||||
offsets.push_back(offsets.back() + 1);
|
||||
}
|
||||
|
||||
#if !defined(ABORT_ON_LOGICAL_ERROR)
|
||||
#if !defined(DEBUG_OR_SANITIZER_BUILD)
|
||||
int compareAt(size_t n, size_t m, const IColumn & rhs_, int /*nan_direction_hint*/) const override
|
||||
#else
|
||||
int doCompareAt(size_t n, size_t m, const IColumn & rhs_, int /*nan_direction_hint*/) const override
|
||||
|
@ -206,7 +206,7 @@ bool ColumnTuple::tryInsert(const Field & x)
|
||||
return true;
|
||||
}
|
||||
|
||||
#if !defined(ABORT_ON_LOGICAL_ERROR)
|
||||
#if !defined(DEBUG_OR_SANITIZER_BUILD)
|
||||
void ColumnTuple::insertFrom(const IColumn & src_, size_t n)
|
||||
#else
|
||||
void ColumnTuple::doInsertFrom(const IColumn & src_, size_t n)
|
||||
@ -223,7 +223,7 @@ void ColumnTuple::doInsertFrom(const IColumn & src_, size_t n)
|
||||
columns[i]->insertFrom(*src.columns[i], n);
|
||||
}
|
||||
|
||||
#if !defined(ABORT_ON_LOGICAL_ERROR)
|
||||
#if !defined(DEBUG_OR_SANITIZER_BUILD)
|
||||
void ColumnTuple::insertManyFrom(const IColumn & src, size_t position, size_t length)
|
||||
#else
|
||||
void ColumnTuple::doInsertManyFrom(const IColumn & src, size_t position, size_t length)
|
||||
@ -327,7 +327,7 @@ void ColumnTuple::updateHashFast(SipHash & hash) const
|
||||
column->updateHashFast(hash);
|
||||
}
|
||||
|
||||
#if !defined(ABORT_ON_LOGICAL_ERROR)
|
||||
#if !defined(DEBUG_OR_SANITIZER_BUILD)
|
||||
void ColumnTuple::insertRangeFrom(const IColumn & src, size_t start, size_t length)
|
||||
#else
|
||||
void ColumnTuple::doInsertRangeFrom(const IColumn & src, size_t start, size_t length)
|
||||
@ -483,7 +483,7 @@ int ColumnTuple::compareAtImpl(size_t n, size_t m, const IColumn & rhs, int nan_
|
||||
return 0;
|
||||
}
|
||||
|
||||
#if !defined(ABORT_ON_LOGICAL_ERROR)
|
||||
#if !defined(DEBUG_OR_SANITIZER_BUILD)
|
||||
int ColumnTuple::compareAt(size_t n, size_t m, const IColumn & rhs, int nan_direction_hint) const
|
||||
#else
|
||||
int ColumnTuple::doCompareAt(size_t n, size_t m, const IColumn & rhs, int nan_direction_hint) const
|
||||
|
@ -66,7 +66,7 @@ public:
|
||||
void insert(const Field & x) override;
|
||||
bool tryInsert(const Field & x) override;
|
||||
|
||||
#if !defined(ABORT_ON_LOGICAL_ERROR)
|
||||
#if !defined(DEBUG_OR_SANITIZER_BUILD)
|
||||
void insertFrom(const IColumn & src_, size_t n) override;
|
||||
void insertManyFrom(const IColumn & src, size_t position, size_t length) override;
|
||||
#else
|
||||
@ -83,7 +83,7 @@ public:
|
||||
void updateHashWithValue(size_t n, SipHash & hash) const override;
|
||||
WeakHash32 getWeakHash32() const override;
|
||||
void updateHashFast(SipHash & hash) const override;
|
||||
#if !defined(ABORT_ON_LOGICAL_ERROR)
|
||||
#if !defined(DEBUG_OR_SANITIZER_BUILD)
|
||||
void insertRangeFrom(const IColumn & src, size_t start, size_t length) override;
|
||||
#else
|
||||
void doInsertRangeFrom(const IColumn & src, size_t start, size_t length) override;
|
||||
@ -94,7 +94,7 @@ public:
|
||||
ColumnPtr index(const IColumn & indexes, size_t limit) const override;
|
||||
ColumnPtr replicate(const Offsets & offsets) const override;
|
||||
MutableColumns scatter(ColumnIndex num_columns, const Selector & selector) const override;
|
||||
#if !defined(ABORT_ON_LOGICAL_ERROR)
|
||||
#if !defined(DEBUG_OR_SANITIZER_BUILD)
|
||||
int compareAt(size_t n, size_t m, const IColumn & rhs, int nan_direction_hint) const override;
|
||||
#else
|
||||
int doCompareAt(size_t n, size_t m, const IColumn & rhs, int nan_direction_hint) const override;
|
||||
|
@ -90,7 +90,7 @@ public:
|
||||
return getNestedColumn()->updateHashWithValue(n, hash_func);
|
||||
}
|
||||
|
||||
#if !defined(ABORT_ON_LOGICAL_ERROR)
|
||||
#if !defined(DEBUG_OR_SANITIZER_BUILD)
|
||||
int compareAt(size_t n, size_t m, const IColumn & rhs, int nan_direction_hint) const override;
|
||||
#else
|
||||
int doCompareAt(size_t n, size_t m, const IColumn & rhs, int nan_direction_hint) const override;
|
||||
@ -492,7 +492,7 @@ const char * ColumnUnique<ColumnType>::skipSerializedInArena(const char *) const
|
||||
}
|
||||
|
||||
template <typename ColumnType>
|
||||
#if !defined(ABORT_ON_LOGICAL_ERROR)
|
||||
#if !defined(DEBUG_OR_SANITIZER_BUILD)
|
||||
int ColumnUnique<ColumnType>::compareAt(size_t n, size_t m, const IColumn & rhs, int nan_direction_hint) const
|
||||
#else
|
||||
int ColumnUnique<ColumnType>::doCompareAt(size_t n, size_t m, const IColumn & rhs, int nan_direction_hint) const
|
||||
|
@ -595,7 +595,7 @@ void ColumnVariant::insertManyFromImpl(const DB::IColumn & src_, size_t position
|
||||
}
|
||||
}
|
||||
|
||||
#if !defined(ABORT_ON_LOGICAL_ERROR)
|
||||
#if !defined(DEBUG_OR_SANITIZER_BUILD)
|
||||
void ColumnVariant::insertFrom(const IColumn & src_, size_t n)
|
||||
#else
|
||||
void ColumnVariant::doInsertFrom(const IColumn & src_, size_t n)
|
||||
@ -604,7 +604,7 @@ void ColumnVariant::doInsertFrom(const IColumn & src_, size_t n)
|
||||
insertFromImpl(src_, n, nullptr);
|
||||
}
|
||||
|
||||
#if !defined(ABORT_ON_LOGICAL_ERROR)
|
||||
#if !defined(DEBUG_OR_SANITIZER_BUILD)
|
||||
void ColumnVariant::insertRangeFrom(const IColumn & src_, size_t start, size_t length)
|
||||
#else
|
||||
void ColumnVariant::doInsertRangeFrom(const IColumn & src_, size_t start, size_t length)
|
||||
@ -613,7 +613,7 @@ void ColumnVariant::doInsertRangeFrom(const IColumn & src_, size_t start, size_t
|
||||
insertRangeFromImpl(src_, start, length, nullptr);
|
||||
}
|
||||
|
||||
#if !defined(ABORT_ON_LOGICAL_ERROR)
|
||||
#if !defined(DEBUG_OR_SANITIZER_BUILD)
|
||||
void ColumnVariant::insertManyFrom(const DB::IColumn & src_, size_t position, size_t length)
|
||||
#else
|
||||
void ColumnVariant::doInsertManyFrom(const DB::IColumn & src_, size_t position, size_t length)
|
||||
@ -1175,7 +1175,7 @@ bool ColumnVariant::hasEqualValues() const
|
||||
return local_discriminators->hasEqualValues() && variants[localDiscriminatorAt(0)]->hasEqualValues();
|
||||
}
|
||||
|
||||
#if !defined(ABORT_ON_LOGICAL_ERROR)
|
||||
#if !defined(DEBUG_OR_SANITIZER_BUILD)
|
||||
int ColumnVariant::compareAt(size_t n, size_t m, const IColumn & rhs, int nan_direction_hint) const
|
||||
#else
|
||||
int ColumnVariant::doCompareAt(size_t n, size_t m, const IColumn & rhs, int nan_direction_hint) const
|
||||
|
@ -180,7 +180,7 @@ public:
|
||||
void insert(const Field & x) override;
|
||||
bool tryInsert(const Field & x) override;
|
||||
|
||||
#if !defined(ABORT_ON_LOGICAL_ERROR)
|
||||
#if !defined(DEBUG_OR_SANITIZER_BUILD)
|
||||
void insertFrom(const IColumn & src_, size_t n) override;
|
||||
void insertRangeFrom(const IColumn & src_, size_t start, size_t length) override;
|
||||
void insertManyFrom(const IColumn & src_, size_t position, size_t length) override;
|
||||
@ -223,7 +223,7 @@ public:
|
||||
ColumnPtr indexImpl(const PaddedPODArray<Type> & indexes, size_t limit) const;
|
||||
ColumnPtr replicate(const Offsets & replicate_offsets) const override;
|
||||
MutableColumns scatter(ColumnIndex num_columns, const Selector & selector) const override;
|
||||
#if !defined(ABORT_ON_LOGICAL_ERROR)
|
||||
#if !defined(DEBUG_OR_SANITIZER_BUILD)
|
||||
int compareAt(size_t n, size_t m, const IColumn & rhs, int nan_direction_hint) const override;
|
||||
#else
|
||||
int doCompareAt(size_t n, size_t m, const IColumn & rhs, int nan_direction_hint) const override;
|
||||
|
@ -502,7 +502,7 @@ bool ColumnVector<T>::tryInsert(const DB::Field & x)
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
#if !defined(ABORT_ON_LOGICAL_ERROR)
|
||||
#if !defined(DEBUG_OR_SANITIZER_BUILD)
|
||||
void ColumnVector<T>::insertRangeFrom(const IColumn & src, size_t start, size_t length)
|
||||
#else
|
||||
void ColumnVector<T>::doInsertRangeFrom(const IColumn & src, size_t start, size_t length)
|
||||
|
@ -64,7 +64,7 @@ public:
|
||||
return data.size();
|
||||
}
|
||||
|
||||
#if !defined(ABORT_ON_LOGICAL_ERROR)
|
||||
#if !defined(DEBUG_OR_SANITIZER_BUILD)
|
||||
void insertFrom(const IColumn & src, size_t n) override
|
||||
#else
|
||||
void doInsertFrom(const IColumn & src, size_t n) override
|
||||
@ -73,7 +73,7 @@ public:
|
||||
data.push_back(assert_cast<const Self &>(src).getData()[n]);
|
||||
}
|
||||
|
||||
#if !defined(ABORT_ON_LOGICAL_ERROR)
|
||||
#if !defined(DEBUG_OR_SANITIZER_BUILD)
|
||||
void insertManyFrom(const IColumn & src, size_t position, size_t length) override
|
||||
#else
|
||||
void doInsertManyFrom(const IColumn & src, size_t position, size_t length) override
|
||||
@ -150,7 +150,7 @@ public:
|
||||
}
|
||||
|
||||
/// This method implemented in header because it could be possibly devirtualized.
|
||||
#if !defined(ABORT_ON_LOGICAL_ERROR)
|
||||
#if !defined(DEBUG_OR_SANITIZER_BUILD)
|
||||
int compareAt(size_t n, size_t m, const IColumn & rhs_, int nan_direction_hint) const override
|
||||
#else
|
||||
int doCompareAt(size_t n, size_t m, const IColumn & rhs_, int nan_direction_hint) const override
|
||||
@ -240,7 +240,7 @@ public:
|
||||
|
||||
bool tryInsert(const DB::Field & x) override;
|
||||
|
||||
#if !defined(ABORT_ON_LOGICAL_ERROR)
|
||||
#if !defined(DEBUG_OR_SANITIZER_BUILD)
|
||||
void insertRangeFrom(const IColumn & src, size_t start, size_t length) override;
|
||||
#else
|
||||
void doInsertRangeFrom(const IColumn & src, size_t start, size_t length) override;
|
||||
|
@ -46,7 +46,7 @@ String IColumn::dumpStructure() const
|
||||
return res.str();
|
||||
}
|
||||
|
||||
#if !defined(ABORT_ON_LOGICAL_ERROR)
|
||||
#if !defined(DEBUG_OR_SANITIZER_BUILD)
|
||||
void IColumn::insertFrom(const IColumn & src, size_t n)
|
||||
#else
|
||||
void IColumn::doInsertFrom(const IColumn & src, size_t n)
|
||||
|
@ -179,7 +179,7 @@ public:
|
||||
|
||||
/// Appends n-th element from other column with the same type.
|
||||
/// Is used in merge-sort and merges. It could be implemented in inherited classes more optimally than default implementation.
|
||||
#if !defined(ABORT_ON_LOGICAL_ERROR)
|
||||
#if !defined(DEBUG_OR_SANITIZER_BUILD)
|
||||
virtual void insertFrom(const IColumn & src, size_t n);
|
||||
#else
|
||||
void insertFrom(const IColumn & src, size_t n)
|
||||
@ -191,7 +191,7 @@ public:
|
||||
|
||||
/// Appends range of elements from other column with the same type.
|
||||
/// Could be used to concatenate columns.
|
||||
#if !defined(ABORT_ON_LOGICAL_ERROR)
|
||||
#if !defined(DEBUG_OR_SANITIZER_BUILD)
|
||||
virtual void insertRangeFrom(const IColumn & src, size_t start, size_t length) = 0;
|
||||
#else
|
||||
void insertRangeFrom(const IColumn & src, size_t start, size_t length)
|
||||
@ -202,7 +202,7 @@ public:
|
||||
#endif
|
||||
|
||||
/// Appends one element from other column with the same type multiple times.
|
||||
#if !defined(ABORT_ON_LOGICAL_ERROR)
|
||||
#if !defined(DEBUG_OR_SANITIZER_BUILD)
|
||||
virtual void insertManyFrom(const IColumn & src, size_t position, size_t length)
|
||||
{
|
||||
for (size_t i = 0; i < length; ++i)
|
||||
@ -345,7 +345,7 @@ public:
|
||||
*
|
||||
* For non Nullable and non floating point types, nan_direction_hint is ignored.
|
||||
*/
|
||||
#if !defined(ABORT_ON_LOGICAL_ERROR)
|
||||
#if !defined(DEBUG_OR_SANITIZER_BUILD)
|
||||
[[nodiscard]] virtual int compareAt(size_t n, size_t m, const IColumn & rhs, int nan_direction_hint) const = 0;
|
||||
#else
|
||||
[[nodiscard]] int compareAt(size_t n, size_t m, const IColumn & rhs, int nan_direction_hint) const
|
||||
@ -667,7 +667,7 @@ protected:
|
||||
Sort full_sort,
|
||||
PartialSort partial_sort) const;
|
||||
|
||||
#if defined(ABORT_ON_LOGICAL_ERROR)
|
||||
#if defined(DEBUG_OR_SANITIZER_BUILD)
|
||||
virtual void doInsertFrom(const IColumn & src, size_t n);
|
||||
|
||||
virtual void doInsertRangeFrom(const IColumn & src, size_t start, size_t length) = 0;
|
||||
|
@ -27,7 +27,7 @@ public:
|
||||
size_t byteSize() const override { return 0; }
|
||||
size_t byteSizeAt(size_t) const override { return 0; }
|
||||
size_t allocatedBytes() const override { return 0; }
|
||||
#if !defined(ABORT_ON_LOGICAL_ERROR)
|
||||
#if !defined(DEBUG_OR_SANITIZER_BUILD)
|
||||
int compareAt(size_t, size_t, const IColumn &, int) const override { return 0; }
|
||||
#else
|
||||
int doCompareAt(size_t, size_t, const IColumn &, int) const override { return 0; }
|
||||
@ -73,7 +73,7 @@ public:
|
||||
{
|
||||
}
|
||||
|
||||
#if !defined(ABORT_ON_LOGICAL_ERROR)
|
||||
#if !defined(DEBUG_OR_SANITIZER_BUILD)
|
||||
void insertFrom(const IColumn &, size_t) override
|
||||
#else
|
||||
void doInsertFrom(const IColumn &, size_t) override
|
||||
@ -82,7 +82,7 @@ public:
|
||||
++s;
|
||||
}
|
||||
|
||||
#if !defined(ABORT_ON_LOGICAL_ERROR)
|
||||
#if !defined(DEBUG_OR_SANITIZER_BUILD)
|
||||
void insertRangeFrom(const IColumn & /*src*/, size_t /*start*/, size_t length) override
|
||||
#else
|
||||
void doInsertRangeFrom(const IColumn & /*src*/, size_t /*start*/, size_t length) override
|
||||
|
@ -86,7 +86,7 @@ public:
|
||||
throw Exception(ErrorCodes::NOT_IMPLEMENTED, "Method tryInsert is not supported for ColumnUnique.");
|
||||
}
|
||||
|
||||
#if !defined(ABORT_ON_LOGICAL_ERROR)
|
||||
#if !defined(DEBUG_OR_SANITIZER_BUILD)
|
||||
void insertRangeFrom(const IColumn &, size_t, size_t) override
|
||||
#else
|
||||
void doInsertRangeFrom(const IColumn &, size_t, size_t) override
|
||||
|
@ -52,7 +52,7 @@ static ColumnPtr mockColumn(const DataTypePtr & type, size_t rows)
|
||||
}
|
||||
|
||||
|
||||
#if !defined(ABORT_ON_LOGICAL_ERROR)
|
||||
#if !defined(DEBUG_OR_SANITIZER_BUILD)
|
||||
static NO_INLINE void insertManyFrom(IColumn & dst, const IColumn & src)
|
||||
#else
|
||||
static NO_INLINE void doInsertManyFrom(IColumn & dst, const IColumn & src)
|
||||
|
@ -206,7 +206,7 @@ void * Allocator<clear_memory_, populate>::realloc(void * buf, size_t old_size,
|
||||
}
|
||||
else
|
||||
{
|
||||
[[maybe_unused]] auto trace_free = CurrentMemoryTracker::free(old_size);
|
||||
[[maybe_unused]] auto trace_free = CurrentMemoryTracker::free(new_size);
|
||||
ProfileEvents::increment(ProfileEvents::GWPAsanAllocateFailed);
|
||||
}
|
||||
}
|
||||
@ -239,7 +239,7 @@ void * Allocator<clear_memory_, populate>::realloc(void * buf, size_t old_size,
|
||||
void * new_buf = ::realloc(buf, new_size);
|
||||
if (nullptr == new_buf)
|
||||
{
|
||||
[[maybe_unused]] auto trace_free = CurrentMemoryTracker::free(old_size);
|
||||
[[maybe_unused]] auto trace_free = CurrentMemoryTracker::free(new_size);
|
||||
throw DB::ErrnoException(
|
||||
DB::ErrorCodes::CANNOT_ALLOCATE_MEMORY,
|
||||
"Allocator: Cannot realloc from {} to {}",
|
||||
|
@ -38,7 +38,7 @@ namespace
|
||||
std::erase_if(left_subkeys, [&](const String & key) { return ignore_keys->contains(key); });
|
||||
std::erase_if(right_subkeys, [&](const String & key) { return ignore_keys->contains(key); });
|
||||
|
||||
#if defined(ABORT_ON_LOGICAL_ERROR)
|
||||
#if defined(DEBUG_OR_SANITIZER_BUILD)
|
||||
/// Compound `ignore_keys` are not yet implemented.
|
||||
for (const auto & ignore_key : *ignore_keys)
|
||||
chassert(ignore_key.find('.') == std::string_view::npos);
|
||||
|
@ -234,10 +234,10 @@
|
||||
M(PartsCommitted, "Deprecated. See PartsActive.") \
|
||||
M(PartsPreActive, "The part is in data_parts, but not used for SELECTs.") \
|
||||
M(PartsActive, "Active data part, used by current and upcoming SELECTs.") \
|
||||
M(AttachedDatabase, "Active database, used by current and upcoming SELECTs.") \
|
||||
M(AttachedTable, "Active table, used by current and upcoming SELECTs.") \
|
||||
M(AttachedView, "Active view, used by current and upcoming SELECTs.") \
|
||||
M(AttachedDictionary, "Active dictionary, used by current and upcoming SELECTs.") \
|
||||
M(AttachedDatabase, "Active databases.") \
|
||||
M(AttachedTable, "Active tables.") \
|
||||
M(AttachedView, "Active views.") \
|
||||
M(AttachedDictionary, "Active dictionaries.") \
|
||||
M(PartsOutdated, "Not active data part, but could be used by only current SELECTs, could be deleted after SELECTs finishes.") \
|
||||
M(PartsDeleting, "Not active data part with identity refcounter, it is deleting right now by a cleaner.") \
|
||||
M(PartsDeleteOnDestroy, "Part was moved to another disk and should be deleted in own destructor.") \
|
||||
|
@ -64,7 +64,7 @@ void handle_error_code(const std::string & msg, int code, bool remote, const Exc
|
||||
{
|
||||
// In debug builds and builds with sanitizers, treat LOGICAL_ERROR as an assertion failure.
|
||||
// Log the message before we fail.
|
||||
#ifdef ABORT_ON_LOGICAL_ERROR
|
||||
#ifdef DEBUG_OR_SANITIZER_BUILD
|
||||
if (code == ErrorCodes::LOGICAL_ERROR)
|
||||
{
|
||||
abortOnFailedAssertion(msg, trace.data(), 0, trace.size());
|
||||
@ -443,7 +443,7 @@ PreformattedMessage getCurrentExceptionMessageAndPattern(bool with_stacktrace, b
|
||||
}
|
||||
catch (...) {} // NOLINT(bugprone-empty-catch)
|
||||
|
||||
#ifdef ABORT_ON_LOGICAL_ERROR
|
||||
#ifdef DEBUG_OR_SANITIZER_BUILD
|
||||
try
|
||||
{
|
||||
throw;
|
||||
|
@ -192,7 +192,7 @@ void MemoryTracker::debugLogBigAllocationWithoutCheck(Int64 size [[maybe_unused]
|
||||
{
|
||||
/// Big allocations through allocNoThrow (without checking memory limits) may easily lead to OOM (and it's hard to debug).
|
||||
/// Let's find them.
|
||||
#ifdef ABORT_ON_LOGICAL_ERROR
|
||||
#ifdef DEBUG_OR_SANITIZER_BUILD
|
||||
if (size < 0)
|
||||
return;
|
||||
|
||||
|
@ -424,7 +424,7 @@ static void logUnexpectedSyscallError(std::string name)
|
||||
{
|
||||
std::string message = fmt::format("{} failed: {}", name, errnoToString());
|
||||
LOG_WARNING(&Poco::Logger::get("PageCache"), "{}", message);
|
||||
#if defined(ABORT_ON_LOGICAL_ERROR)
|
||||
#if defined(DEBUG_OR_SANITIZER_BUILD)
|
||||
volatile bool true_ = true;
|
||||
if (true_) // suppress warning about missing [[noreturn]]
|
||||
abortOnFailedAssertion(message);
|
||||
|
@ -23,8 +23,20 @@ namespace DB
|
||||
|
||||
LazyPipeFDs TraceSender::pipe;
|
||||
|
||||
static thread_local bool inside_send = false;
|
||||
void TraceSender::send(TraceType trace_type, const StackTrace & stack_trace, Extras extras)
|
||||
{
|
||||
/** The method shouldn't be called recursively or throw exceptions.
|
||||
* There are several reasons:
|
||||
* - avoid infinite recursion when some of subsequent functions invoke tracing;
|
||||
* - avoid inconsistent writes if the method was interrupted by a signal handler in the middle of writing,
|
||||
* and then another tracing is invoked (e.g., from query profiler).
|
||||
*/
|
||||
if (unlikely(inside_send))
|
||||
return;
|
||||
inside_send = true;
|
||||
DENY_ALLOCATIONS_IN_SCOPE;
|
||||
|
||||
constexpr size_t buf_size = sizeof(char) /// TraceCollector stop flag
|
||||
+ sizeof(UInt8) /// String size
|
||||
+ QUERY_ID_MAX_LEN /// Maximum query_id length
|
||||
@ -80,6 +92,8 @@ void TraceSender::send(TraceType trace_type, const StackTrace & stack_trace, Ext
|
||||
writePODBinary(extras.increment, out);
|
||||
|
||||
out.next();
|
||||
|
||||
inside_send = false;
|
||||
}
|
||||
|
||||
}
|
||||
|
@ -25,7 +25,7 @@ namespace DB
|
||||
template <typename To, typename From>
|
||||
inline To assert_cast(From && from)
|
||||
{
|
||||
#ifdef ABORT_ON_LOGICAL_ERROR
|
||||
#ifdef DEBUG_OR_SANITIZER_BUILD
|
||||
try
|
||||
{
|
||||
if constexpr (std::is_pointer_v<To>)
|
||||
|
@ -6,12 +6,17 @@ namespace DB
|
||||
{
|
||||
|
||||
String getRandomASCIIString(size_t length)
|
||||
{
|
||||
return getRandomASCIIString(length, thread_local_rng);
|
||||
}
|
||||
|
||||
String getRandomASCIIString(size_t length, pcg64 & rng)
|
||||
{
|
||||
std::uniform_int_distribution<int> distribution('a', 'z');
|
||||
String res;
|
||||
res.resize(length);
|
||||
for (auto & c : res)
|
||||
c = distribution(thread_local_rng);
|
||||
c = distribution(rng);
|
||||
return res;
|
||||
}
|
||||
|
||||
|
@ -2,11 +2,14 @@
|
||||
|
||||
#include <Core/Types.h>
|
||||
|
||||
#include <pcg_random.hpp>
|
||||
|
||||
|
||||
namespace DB
|
||||
{
|
||||
|
||||
/// Slow random string. Useful for random names and things like this. Not for generating data.
|
||||
String getRandomASCIIString(size_t length);
|
||||
String getRandomASCIIString(size_t length, pcg64 & rng);
|
||||
|
||||
}
|
||||
|
@ -166,7 +166,7 @@ TEST(Common, RWLockRecursive)
|
||||
|
||||
auto lock2 = fifo_lock->getLock(RWLockImpl::Read, "q2");
|
||||
|
||||
#ifndef ABORT_ON_LOGICAL_ERROR
|
||||
#ifndef DEBUG_OR_SANITIZER_BUILD
|
||||
/// It throws LOGICAL_ERROR
|
||||
EXPECT_ANY_THROW({fifo_lock->getLock(RWLockImpl::Write, "q2");});
|
||||
#endif
|
||||
|
@ -80,13 +80,20 @@ namespace
|
||||
/// CREATE TABLE or CREATE DICTIONARY or CREATE VIEW or CREATE TEMPORARY TABLE or CREATE DATABASE query.
|
||||
void visitCreateQuery(const ASTCreateQuery & create)
|
||||
{
|
||||
QualifiedTableName to_table{create.to_table_id.database_name, create.to_table_id.table_name};
|
||||
if (!to_table.table.empty())
|
||||
if (create.targets)
|
||||
{
|
||||
/// TO target_table (for materialized views)
|
||||
if (to_table.database.empty())
|
||||
to_table.database = current_database;
|
||||
dependencies.emplace(to_table);
|
||||
for (const auto & target : create.targets->targets)
|
||||
{
|
||||
const auto & table_id = target.table_id;
|
||||
if (!table_id.table_name.empty())
|
||||
{
|
||||
/// TO target_table (for materialized views)
|
||||
QualifiedTableName target_name{table_id.database_name, table_id.table_name};
|
||||
if (target_name.database.empty())
|
||||
target_name.database = current_database;
|
||||
dependencies.emplace(target_name);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
QualifiedTableName as_table{create.as_database, create.as_table};
|
||||
|
@ -86,12 +86,19 @@ namespace
|
||||
create.as_table = as_table_new.table;
|
||||
}
|
||||
|
||||
QualifiedTableName to_table{create.to_table_id.database_name, create.to_table_id.table_name};
|
||||
if (!to_table.table.empty() && !to_table.database.empty())
|
||||
if (create.targets)
|
||||
{
|
||||
auto to_table_new = data.renaming_map.getNewTableName(to_table);
|
||||
if (to_table_new != to_table)
|
||||
create.to_table_id = StorageID{to_table_new.database, to_table_new.table};
|
||||
for (auto & target : create.targets->targets)
|
||||
{
|
||||
auto & table_id = target.table_id;
|
||||
if (!table_id.database_name.empty() && !table_id.table_name.empty())
|
||||
{
|
||||
QualifiedTableName target_name{table_id.database_name, table_id.table_name};
|
||||
auto new_target_name = data.renaming_map.getNewTableName(target_name);
|
||||
if (new_target_name != target_name)
|
||||
table_id = StorageID{new_target_name.database, new_target_name.table};
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -195,7 +195,7 @@ void DatabaseLazy::attachTable(ContextPtr /* context_ */, const String & table_n
|
||||
snapshot_detached_tables.erase(table_name);
|
||||
}
|
||||
|
||||
CurrentMetrics::add(CurrentMetrics::AttachedTable, 1);
|
||||
CurrentMetrics::add(CurrentMetrics::AttachedTable);
|
||||
}
|
||||
|
||||
StoragePtr DatabaseLazy::detachTable(ContextPtr /* context */, const String & table_name)
|
||||
@ -221,7 +221,7 @@ StoragePtr DatabaseLazy::detachTable(ContextPtr /* context */, const String & ta
|
||||
.metadata_path = getObjectMetadataPath(table_name),
|
||||
.is_permanently = false});
|
||||
|
||||
CurrentMetrics::sub(CurrentMetrics::AttachedTable, 1);
|
||||
CurrentMetrics::sub(CurrentMetrics::AttachedTable);
|
||||
}
|
||||
return res;
|
||||
}
|
||||
|
@ -729,81 +729,14 @@ void DatabaseReplicated::checkQueryValid(const ASTPtr & query, ContextPtr query_
|
||||
|
||||
if (auto * create = query->as<ASTCreateQuery>())
|
||||
{
|
||||
bool replicated_table = create->storage && create->storage->engine &&
|
||||
(startsWith(create->storage->engine->name, "Replicated") || startsWith(create->storage->engine->name, "Shared"));
|
||||
if (!replicated_table || !create->storage->engine->arguments)
|
||||
return;
|
||||
if (create->storage)
|
||||
checkTableEngine(*create, *create->storage, query_context);
|
||||
|
||||
ASTs & args_ref = create->storage->engine->arguments->children;
|
||||
ASTs args = args_ref;
|
||||
if (args.size() < 2)
|
||||
return;
|
||||
|
||||
/// It can be a constant expression. Try to evaluate it, ignore exception if we cannot.
|
||||
bool has_expression_argument = args_ref[0]->as<ASTFunction>() || args_ref[1]->as<ASTFunction>();
|
||||
if (has_expression_argument)
|
||||
if (create->targets)
|
||||
{
|
||||
try
|
||||
{
|
||||
args[0] = evaluateConstantExpressionAsLiteral(args_ref[0]->clone(), query_context);
|
||||
args[1] = evaluateConstantExpressionAsLiteral(args_ref[1]->clone(), query_context);
|
||||
}
|
||||
catch (...) // NOLINT(bugprone-empty-catch)
|
||||
{
|
||||
}
|
||||
for (const auto & inner_table_engine : create->targets->getInnerEngines())
|
||||
checkTableEngine(*create, *inner_table_engine, query_context);
|
||||
}
|
||||
|
||||
ASTLiteral * arg1 = args[0]->as<ASTLiteral>();
|
||||
ASTLiteral * arg2 = args[1]->as<ASTLiteral>();
|
||||
if (!arg1 || !arg2 || arg1->value.getType() != Field::Types::String || arg2->value.getType() != Field::Types::String)
|
||||
return;
|
||||
|
||||
String maybe_path = arg1->value.get<String>();
|
||||
String maybe_replica = arg2->value.get<String>();
|
||||
|
||||
/// Looks like it's ReplicatedMergeTree with explicit zookeeper_path and replica_name arguments.
|
||||
/// Let's ensure that some macros are used.
|
||||
/// NOTE: we cannot check here that substituted values will be actually different on shards and replicas.
|
||||
|
||||
Macros::MacroExpansionInfo info;
|
||||
info.table_id = {getDatabaseName(), create->getTable(), create->uuid};
|
||||
info.shard = getShardName();
|
||||
info.replica = getReplicaName();
|
||||
query_context->getMacros()->expand(maybe_path, info);
|
||||
bool maybe_shard_macros = info.expanded_other;
|
||||
info.expanded_other = false;
|
||||
query_context->getMacros()->expand(maybe_replica, info);
|
||||
bool maybe_replica_macros = info.expanded_other;
|
||||
bool enable_functional_tests_helper = getContext()->getConfigRef().has("_functional_tests_helper_database_replicated_replace_args_macros");
|
||||
|
||||
if (!enable_functional_tests_helper)
|
||||
{
|
||||
if (query_context->getSettingsRef().database_replicated_allow_replicated_engine_arguments)
|
||||
LOG_WARNING(log, "It's not recommended to explicitly specify zookeeper_path and replica_name in ReplicatedMergeTree arguments");
|
||||
else
|
||||
throw Exception(ErrorCodes::INCORRECT_QUERY,
|
||||
"It's not allowed to specify explicit zookeeper_path and replica_name "
|
||||
"for ReplicatedMergeTree arguments in Replicated database. If you really want to "
|
||||
"specify them explicitly, enable setting "
|
||||
"database_replicated_allow_replicated_engine_arguments.");
|
||||
}
|
||||
|
||||
if (maybe_shard_macros && maybe_replica_macros)
|
||||
return;
|
||||
|
||||
if (enable_functional_tests_helper && !has_expression_argument)
|
||||
{
|
||||
if (maybe_path.empty() || maybe_path.back() != '/')
|
||||
maybe_path += '/';
|
||||
args_ref[0]->as<ASTLiteral>()->value = maybe_path + "auto_{shard}";
|
||||
args_ref[1]->as<ASTLiteral>()->value = maybe_replica + "auto_{replica}";
|
||||
return;
|
||||
}
|
||||
|
||||
throw Exception(ErrorCodes::INCORRECT_QUERY,
|
||||
"Explicit zookeeper_path and replica_name are specified in ReplicatedMergeTree arguments. "
|
||||
"If you really want to specify it explicitly, then you should use some macros "
|
||||
"to distinguish different shards and replicas");
|
||||
}
|
||||
}
|
||||
|
||||
@ -827,6 +760,85 @@ void DatabaseReplicated::checkQueryValid(const ASTPtr & query, ContextPtr query_
|
||||
}
|
||||
}
|
||||
|
||||
void DatabaseReplicated::checkTableEngine(const ASTCreateQuery & query, ASTStorage & storage, ContextPtr query_context) const
|
||||
{
|
||||
bool replicated_table = storage.engine &&
|
||||
(startsWith(storage.engine->name, "Replicated") || startsWith(storage.engine->name, "Shared"));
|
||||
if (!replicated_table || !storage.engine->arguments)
|
||||
return;
|
||||
|
||||
ASTs & args_ref = storage.engine->arguments->children;
|
||||
ASTs args = args_ref;
|
||||
if (args.size() < 2)
|
||||
return;
|
||||
|
||||
/// It can be a constant expression. Try to evaluate it, ignore exception if we cannot.
|
||||
bool has_expression_argument = args_ref[0]->as<ASTFunction>() || args_ref[1]->as<ASTFunction>();
|
||||
if (has_expression_argument)
|
||||
{
|
||||
try
|
||||
{
|
||||
args[0] = evaluateConstantExpressionAsLiteral(args_ref[0]->clone(), query_context);
|
||||
args[1] = evaluateConstantExpressionAsLiteral(args_ref[1]->clone(), query_context);
|
||||
}
|
||||
catch (...) // NOLINT(bugprone-empty-catch)
|
||||
{
|
||||
}
|
||||
}
|
||||
|
||||
ASTLiteral * arg1 = args[0]->as<ASTLiteral>();
|
||||
ASTLiteral * arg2 = args[1]->as<ASTLiteral>();
|
||||
if (!arg1 || !arg2 || arg1->value.getType() != Field::Types::String || arg2->value.getType() != Field::Types::String)
|
||||
return;
|
||||
|
||||
String maybe_path = arg1->value.get<String>();
|
||||
String maybe_replica = arg2->value.get<String>();
|
||||
|
||||
/// Looks like it's ReplicatedMergeTree with explicit zookeeper_path and replica_name arguments.
|
||||
/// Let's ensure that some macros are used.
|
||||
/// NOTE: we cannot check here that substituted values will be actually different on shards and replicas.
|
||||
|
||||
Macros::MacroExpansionInfo info;
|
||||
info.table_id = {getDatabaseName(), query.getTable(), query.uuid};
|
||||
info.shard = getShardName();
|
||||
info.replica = getReplicaName();
|
||||
query_context->getMacros()->expand(maybe_path, info);
|
||||
bool maybe_shard_macros = info.expanded_other;
|
||||
info.expanded_other = false;
|
||||
query_context->getMacros()->expand(maybe_replica, info);
|
||||
bool maybe_replica_macros = info.expanded_other;
|
||||
bool enable_functional_tests_helper = getContext()->getConfigRef().has("_functional_tests_helper_database_replicated_replace_args_macros");
|
||||
|
||||
if (!enable_functional_tests_helper)
|
||||
{
|
||||
if (query_context->getSettingsRef().database_replicated_allow_replicated_engine_arguments)
|
||||
LOG_WARNING(log, "It's not recommended to explicitly specify zookeeper_path and replica_name in ReplicatedMergeTree arguments");
|
||||
else
|
||||
throw Exception(ErrorCodes::INCORRECT_QUERY,
|
||||
"It's not allowed to specify explicit zookeeper_path and replica_name "
|
||||
"for ReplicatedMergeTree arguments in Replicated database. If you really want to "
|
||||
"specify them explicitly, enable setting "
|
||||
"database_replicated_allow_replicated_engine_arguments.");
|
||||
}
|
||||
|
||||
if (maybe_shard_macros && maybe_replica_macros)
|
||||
return;
|
||||
|
||||
if (enable_functional_tests_helper && !has_expression_argument)
|
||||
{
|
||||
if (maybe_path.empty() || maybe_path.back() != '/')
|
||||
maybe_path += '/';
|
||||
args_ref[0]->as<ASTLiteral>()->value = maybe_path + "auto_{shard}";
|
||||
args_ref[1]->as<ASTLiteral>()->value = maybe_replica + "auto_{replica}";
|
||||
return;
|
||||
}
|
||||
|
||||
throw Exception(ErrorCodes::INCORRECT_QUERY,
|
||||
"Explicit zookeeper_path and replica_name are specified in ReplicatedMergeTree arguments. "
|
||||
"If you really want to specify it explicitly, then you should use some macros "
|
||||
"to distinguish different shards and replicas");
|
||||
}
|
||||
|
||||
BlockIO DatabaseReplicated::tryEnqueueReplicatedDDL(const ASTPtr & query, ContextPtr query_context, QueryFlags flags)
|
||||
{
|
||||
waitDatabaseStarted();
|
||||
@ -1312,11 +1324,9 @@ ASTPtr DatabaseReplicated::parseQueryFromMetadataInZooKeeper(const String & node
|
||||
if (create.uuid == UUIDHelpers::Nil || create.getTable() != TABLE_WITH_UUID_NAME_PLACEHOLDER || create.database)
|
||||
throw Exception(ErrorCodes::LOGICAL_ERROR, "Got unexpected query from {}: {}", node_name, query);
|
||||
|
||||
bool is_materialized_view_with_inner_table = create.is_materialized_view && create.to_table_id.empty();
|
||||
|
||||
create.setDatabase(getDatabaseName());
|
||||
create.setTable(unescapeForFileName(node_name));
|
||||
create.attach = is_materialized_view_with_inner_table;
|
||||
create.attach = create.is_materialized_view_with_inner_table();
|
||||
|
||||
return ast;
|
||||
}
|
||||
|
@ -107,6 +107,7 @@ private:
|
||||
void fillClusterAuthInfo(String collection_name, const Poco::Util::AbstractConfiguration & config);
|
||||
|
||||
void checkQueryValid(const ASTPtr & query, ContextPtr query_context) const;
|
||||
void checkTableEngine(const ASTCreateQuery & query, ASTStorage & storage, ContextPtr query_context) const;
|
||||
|
||||
void recoverLostReplica(const ZooKeeperPtr & current_zookeeper, UInt32 our_log_ptr, UInt32 & max_log_ptr);
|
||||
|
||||
|
@ -289,8 +289,8 @@ StoragePtr DatabaseWithOwnTablesBase::detachTableUnlocked(const String & table_n
|
||||
tables.erase(it);
|
||||
table_storage->is_detached = true;
|
||||
|
||||
if (table_storage->isSystemStorage() == false)
|
||||
CurrentMetrics::sub(getAttachedCounterForStorage(table_storage), 1);
|
||||
if (!table_storage->isSystemStorage() && database_name != DatabaseCatalog::SYSTEM_DATABASE)
|
||||
CurrentMetrics::sub(getAttachedCounterForStorage(table_storage));
|
||||
|
||||
auto table_id = table_storage->getStorageID();
|
||||
if (table_id.hasUUID())
|
||||
@ -334,8 +334,8 @@ void DatabaseWithOwnTablesBase::attachTableUnlocked(const String & table_name, c
|
||||
/// non-Atomic database the is_detached is set to true before RENAME.
|
||||
table->is_detached = false;
|
||||
|
||||
if (table->isSystemStorage() == false && table_id.database_name != DatabaseCatalog::SYSTEM_DATABASE)
|
||||
CurrentMetrics::add(getAttachedCounterForStorage(table), 1);
|
||||
if (!table->isSystemStorage() && table_id.database_name != DatabaseCatalog::SYSTEM_DATABASE)
|
||||
CurrentMetrics::add(getAttachedCounterForStorage(table));
|
||||
}
|
||||
|
||||
void DatabaseWithOwnTablesBase::shutdown()
|
||||
|
@ -59,7 +59,7 @@ CachedOnDiskReadBufferFromFile::CachedOnDiskReadBufferFromFile(
|
||||
std::optional<size_t> read_until_position_,
|
||||
std::shared_ptr<FilesystemCacheLog> cache_log_)
|
||||
: ReadBufferFromFileBase(use_external_buffer_ ? 0 : settings_.remote_fs_buffer_size, nullptr, 0, file_size_)
|
||||
#ifdef ABORT_ON_LOGICAL_ERROR
|
||||
#ifdef DEBUG_OR_SANITIZER_BUILD
|
||||
, log(getLogger(fmt::format("CachedOnDiskReadBufferFromFile({})", cache_key_)))
|
||||
#else
|
||||
, log(getLogger("CachedOnDiskReadBufferFromFile"))
|
||||
@ -452,7 +452,7 @@ CachedOnDiskReadBufferFromFile::getImplementationBuffer(FileSegment & file_segme
|
||||
{
|
||||
case ReadType::CACHED:
|
||||
{
|
||||
#ifdef ABORT_ON_LOGICAL_ERROR
|
||||
#ifdef DEBUG_OR_SANITIZER_BUILD
|
||||
size_t file_size = getFileSizeFromReadBuffer(*read_buffer_for_file_segment);
|
||||
if (file_size == 0 || range.left + file_size <= file_offset_of_buffer_end)
|
||||
throw Exception(
|
||||
@ -937,7 +937,7 @@ bool CachedOnDiskReadBufferFromFile::nextImplStep()
|
||||
|
||||
if (!result)
|
||||
{
|
||||
#ifdef ABORT_ON_LOGICAL_ERROR
|
||||
#ifdef DEBUG_OR_SANITIZER_BUILD
|
||||
if (read_type == ReadType::CACHED)
|
||||
{
|
||||
size_t cache_file_size = getFileSizeFromReadBuffer(*implementation_buffer);
|
||||
|
@ -1954,7 +1954,10 @@ struct ToRelativeSubsecondNumImpl
|
||||
return t.value;
|
||||
if (scale > scale_multiplier)
|
||||
return t.value / (scale / scale_multiplier);
|
||||
return t.value * (scale_multiplier / scale);
|
||||
return static_cast<UInt128>(t.value) * static_cast<UInt128>((scale_multiplier / scale));
|
||||
/// Casting ^^: All integers are Int64, yet if t.value is big enough the multiplication can still
|
||||
/// overflow which is UB. This place is too low-level and generic to check if t.value is sane.
|
||||
/// Therefore just let it overflow safely and don't bother further.
|
||||
}
|
||||
static Int64 execute(UInt32 t, const DateLUTImpl &)
|
||||
{
|
||||
|
@ -47,54 +47,85 @@ bool allArgumentsAreConstants(const ColumnsWithTypeAndName & args)
|
||||
return true;
|
||||
}
|
||||
|
||||
/// Replaces single low cardinality column in a function call by its dictionary
|
||||
/// This can only happen after the arguments have been adapted in IFunctionOverloadResolver::getReturnType
|
||||
/// as it's only possible if there is one low cardinality column and, optionally, const columns
|
||||
ColumnPtr replaceLowCardinalityColumnsByNestedAndGetDictionaryIndexes(
|
||||
ColumnsWithTypeAndName & args, bool can_be_executed_on_default_arguments, size_t input_rows_count)
|
||||
{
|
||||
size_t num_rows = input_rows_count;
|
||||
/// We return the LC indexes so the LC can be reconstructed with the function result
|
||||
ColumnPtr indexes;
|
||||
|
||||
/// Find first LowCardinality column and replace it to nested dictionary.
|
||||
for (auto & column : args)
|
||||
size_t number_low_cardinality_columns = 0;
|
||||
size_t last_low_cardinality = 0;
|
||||
size_t number_const_columns = 0;
|
||||
size_t number_full_columns = 0;
|
||||
|
||||
for (size_t i = 0; i < args.size(); i++)
|
||||
{
|
||||
if (const auto * low_cardinality_column = checkAndGetColumn<ColumnLowCardinality>(column.column.get()))
|
||||
auto const & arg = args[i];
|
||||
if (checkAndGetColumn<ColumnLowCardinality>(arg.column.get()))
|
||||
{
|
||||
/// Single LowCardinality column is supported now.
|
||||
if (indexes)
|
||||
throw Exception(ErrorCodes::LOGICAL_ERROR, "Expected single dictionary argument for function.");
|
||||
|
||||
const auto * low_cardinality_type = checkAndGetDataType<DataTypeLowCardinality>(column.type.get());
|
||||
|
||||
if (!low_cardinality_type)
|
||||
throw Exception(ErrorCodes::LOGICAL_ERROR,
|
||||
"Incompatible type for LowCardinality column: {}",
|
||||
column.type->getName());
|
||||
|
||||
if (can_be_executed_on_default_arguments)
|
||||
{
|
||||
/// Normal case, when function can be executed on values' default.
|
||||
column.column = low_cardinality_column->getDictionary().getNestedColumn();
|
||||
indexes = low_cardinality_column->getIndexesPtr();
|
||||
}
|
||||
else
|
||||
{
|
||||
/// Special case when default value can't be used. Example: 1 % LowCardinality(Int).
|
||||
/// LowCardinality always contains default, so 1 % 0 will throw exception in normal case.
|
||||
auto dict_encoded = low_cardinality_column->getMinimalDictionaryEncodedColumn(0, low_cardinality_column->size());
|
||||
column.column = dict_encoded.dictionary;
|
||||
indexes = dict_encoded.indexes;
|
||||
}
|
||||
|
||||
num_rows = column.column->size();
|
||||
column.type = low_cardinality_type->getDictionaryType();
|
||||
number_low_cardinality_columns++;
|
||||
last_low_cardinality = i;
|
||||
}
|
||||
else if (checkAndGetColumn<ColumnConst>(arg.column.get()))
|
||||
number_const_columns++;
|
||||
else
|
||||
number_full_columns++;
|
||||
}
|
||||
|
||||
/// Change size of constants.
|
||||
if (!number_low_cardinality_columns && !number_const_columns)
|
||||
return nullptr;
|
||||
|
||||
if (number_full_columns > 0 || number_low_cardinality_columns > 1)
|
||||
{
|
||||
/// This should not be possible but currently there are multiple tests in CI failing because of it
|
||||
/// TODO: Fix those cases, then enable this exception
|
||||
#if 0
|
||||
throw Exception(ErrorCodes::LOGICAL_ERROR, "Unexpected low cardinality types found. Low cardinality: {}. Full {}. Const {}",
|
||||
number_low_cardinality_columns, number_full_columns, number_const_columns);
|
||||
#else
|
||||
return nullptr;
|
||||
#endif
|
||||
}
|
||||
else if (number_low_cardinality_columns == 1)
|
||||
{
|
||||
auto & lc_arg = args[last_low_cardinality];
|
||||
|
||||
const auto * low_cardinality_type = checkAndGetDataType<DataTypeLowCardinality>(lc_arg.type.get());
|
||||
if (!low_cardinality_type)
|
||||
throw Exception(ErrorCodes::LOGICAL_ERROR, "Incompatible type for LowCardinality column: {}", lc_arg.type->getName());
|
||||
|
||||
const auto * low_cardinality_column = checkAndGetColumn<ColumnLowCardinality>(lc_arg.column.get());
|
||||
chassert(low_cardinality_column);
|
||||
|
||||
if (can_be_executed_on_default_arguments)
|
||||
{
|
||||
/// Normal case, when function can be executed on values' default.
|
||||
lc_arg.column = low_cardinality_column->getDictionary().getNestedColumn();
|
||||
indexes = low_cardinality_column->getIndexesPtr();
|
||||
}
|
||||
else
|
||||
{
|
||||
/// Special case when default value can't be used. Example: 1 % LowCardinality(Int).
|
||||
/// LowCardinality always contains default, so 1 % 0 will throw exception in normal case.
|
||||
auto dict_encoded = low_cardinality_column->getMinimalDictionaryEncodedColumn(0, low_cardinality_column->size());
|
||||
lc_arg.column = dict_encoded.dictionary;
|
||||
indexes = dict_encoded.indexes;
|
||||
}
|
||||
|
||||
/// The new column will have a different number of rows, normally less but occasionally it might be more (NULL)
|
||||
input_rows_count = lc_arg.column->size();
|
||||
lc_arg.type = low_cardinality_type->getDictionaryType();
|
||||
}
|
||||
|
||||
/// Change size of constants
|
||||
for (auto & column : args)
|
||||
{
|
||||
if (const auto * column_const = checkAndGetColumn<ColumnConst>(column.column.get()))
|
||||
{
|
||||
column.column = ColumnConst::create(recursiveRemoveLowCardinality(column_const->getDataColumnPtr()), num_rows);
|
||||
column.column = ColumnConst::create(recursiveRemoveLowCardinality(column_const->getDataColumnPtr()), input_rows_count);
|
||||
column.type = recursiveRemoveLowCardinality(column.type);
|
||||
}
|
||||
}
|
||||
@ -270,6 +301,8 @@ ColumnPtr IExecutableFunction::executeWithoutSparseColumns(const ColumnsWithType
|
||||
bool can_be_executed_on_default_arguments = canBeExecutedOnDefaultArguments();
|
||||
|
||||
const auto & dictionary_type = res_low_cardinality_type->getDictionaryType();
|
||||
/// The arguments should have been adapted in IFunctionOverloadResolver::getReturnType
|
||||
/// So there is only one low cardinality column (and optionally some const columns) and no full column
|
||||
ColumnPtr indexes = replaceLowCardinalityColumnsByNestedAndGetDictionaryIndexes(
|
||||
columns_without_low_cardinality, can_be_executed_on_default_arguments, input_rows_count);
|
||||
|
||||
|
@ -5,11 +5,12 @@ namespace DB
|
||||
{
|
||||
namespace
|
||||
{
|
||||
struct AcoshName
|
||||
{
|
||||
static constexpr auto name = "acosh";
|
||||
};
|
||||
using FunctionAcosh = FunctionMathUnary<UnaryFunctionVectorized<AcoshName, acosh>>;
|
||||
|
||||
struct AcoshName
|
||||
{
|
||||
static constexpr auto name = "acosh";
|
||||
};
|
||||
using FunctionAcosh = FunctionMathUnary<UnaryFunctionVectorized<AcoshName, acosh>>;
|
||||
|
||||
}
|
||||
|
||||
|
@ -6,6 +6,7 @@ namespace DB
|
||||
{
|
||||
|
||||
using FunctionAddMicroseconds = FunctionDateOrDateTimeAddInterval<AddMicrosecondsImpl>;
|
||||
|
||||
REGISTER_FUNCTION(AddMicroseconds)
|
||||
{
|
||||
factory.registerFunction<FunctionAddMicroseconds>();
|
||||
|
@ -6,6 +6,7 @@ namespace DB
|
||||
{
|
||||
|
||||
using FunctionAddMilliseconds = FunctionDateOrDateTimeAddInterval<AddMillisecondsImpl>;
|
||||
|
||||
REGISTER_FUNCTION(AddMilliseconds)
|
||||
{
|
||||
factory.registerFunction<FunctionAddMilliseconds>();
|
||||
|
@ -6,6 +6,7 @@ namespace DB
|
||||
{
|
||||
|
||||
using FunctionAddNanoseconds = FunctionDateOrDateTimeAddInterval<AddNanosecondsImpl>;
|
||||
|
||||
REGISTER_FUNCTION(AddNanoseconds)
|
||||
{
|
||||
factory.registerFunction<FunctionAddNanoseconds>();
|
||||
|
@ -7,7 +7,6 @@
|
||||
|
||||
namespace DB
|
||||
{
|
||||
|
||||
namespace
|
||||
{
|
||||
|
||||
|
@ -57,7 +57,7 @@ private:
|
||||
bool useDefaultImplementationForConstants() const override { return true; }
|
||||
ColumnNumbers getArgumentsThatAreAlwaysConstant() const override { return {1}; }
|
||||
|
||||
ColumnPtr executeImpl(const ColumnsWithTypeAndName & arguments, const DataTypePtr &, size_t /*input_rows_count*/) const override
|
||||
ColumnPtr executeImpl(const ColumnsWithTypeAndName & arguments, const DataTypePtr &, size_t input_rows_count) const override
|
||||
{
|
||||
const auto & column = arguments[0].column;
|
||||
const auto & column_char = arguments[1].column;
|
||||
@ -80,14 +80,13 @@ private:
|
||||
auto & dst_data = col_res->getChars();
|
||||
auto & dst_offsets = col_res->getOffsets();
|
||||
|
||||
const auto size = src_offsets.size();
|
||||
dst_data.resize(src_data.size() + size);
|
||||
dst_offsets.resize(size);
|
||||
dst_data.resize(src_data.size() + input_rows_count);
|
||||
dst_offsets.resize(input_rows_count);
|
||||
|
||||
ColumnString::Offset src_offset{};
|
||||
ColumnString::Offset dst_offset{};
|
||||
|
||||
for (const auto i : collections::range(0, size))
|
||||
for (size_t i = 0; i < input_rows_count; ++i)
|
||||
{
|
||||
const auto src_length = src_offsets[i] - src_offset;
|
||||
memcpySmallAllowReadWriteOverflow15(&dst_data[dst_offset], &src_data[src_offset], src_length);
|
||||
|
@ -45,9 +45,7 @@ struct AsciiImpl
|
||||
size_t size = data.size() / n;
|
||||
|
||||
for (size_t i = 0; i < size; ++i)
|
||||
{
|
||||
res[i] = doAscii(data, i * n, n);
|
||||
}
|
||||
}
|
||||
|
||||
[[noreturn]] static void array(const ColumnString::Offsets & /*offsets*/, PaddedPODArray<ReturnType> & /*res*/)
|
||||
|
@ -5,11 +5,12 @@ namespace DB
|
||||
{
|
||||
namespace
|
||||
{
|
||||
struct AsinhName
|
||||
{
|
||||
static constexpr auto name = "asinh";
|
||||
};
|
||||
using FunctionAsinh = FunctionMathUnary<UnaryFunctionVectorized<AsinhName, asinh>>;
|
||||
|
||||
struct AsinhName
|
||||
{
|
||||
static constexpr auto name = "asinh";
|
||||
};
|
||||
using FunctionAsinh = FunctionMathUnary<UnaryFunctionVectorized<AsinhName, asinh>>;
|
||||
|
||||
}
|
||||
|
||||
|
@ -5,11 +5,12 @@ namespace DB
|
||||
{
|
||||
namespace
|
||||
{
|
||||
struct Atan2Name
|
||||
{
|
||||
static constexpr auto name = "atan2";
|
||||
};
|
||||
using FunctionAtan2 = FunctionMathBinaryFloat64<BinaryFunctionVectorized<Atan2Name, atan2>>;
|
||||
|
||||
struct Atan2Name
|
||||
{
|
||||
static constexpr auto name = "atan2";
|
||||
};
|
||||
using FunctionAtan2 = FunctionMathBinaryFloat64<BinaryFunctionVectorized<Atan2Name, atan2>>;
|
||||
|
||||
}
|
||||
|
||||
|
@ -5,11 +5,12 @@ namespace DB
|
||||
{
|
||||
namespace
|
||||
{
|
||||
struct AtanhName
|
||||
{
|
||||
static constexpr auto name = "atanh";
|
||||
};
|
||||
using FunctionAtanh = FunctionMathUnary<UnaryFunctionVectorized<AtanhName, atanh>>;
|
||||
|
||||
struct AtanhName
|
||||
{
|
||||
static constexpr auto name = "atanh";
|
||||
};
|
||||
using FunctionAtanh = FunctionMathUnary<UnaryFunctionVectorized<AtanhName, atanh>>;
|
||||
|
||||
}
|
||||
|
||||
|
@ -3,8 +3,10 @@
|
||||
|
||||
namespace DB
|
||||
{
|
||||
|
||||
REGISTER_FUNCTION(Base58Encode)
|
||||
{
|
||||
factory.registerFunction<FunctionBase58Conversion<Base58Encode>>();
|
||||
}
|
||||
|
||||
}
|
||||
|
@ -5,6 +5,7 @@
|
||||
|
||||
namespace DB
|
||||
{
|
||||
|
||||
REGISTER_FUNCTION(Base64Decode)
|
||||
{
|
||||
FunctionDocumentation::Description description = R"(Accepts a String and decodes it from base64, according to RFC 4648 (https://datatracker.ietf.org/doc/html/rfc4648#section-4). Throws an exception in case of an error. Alias: FROM_BASE64.)";
|
||||
@ -19,6 +20,7 @@ REGISTER_FUNCTION(Base64Decode)
|
||||
/// MySQL compatibility alias.
|
||||
factory.registerAlias("FROM_BASE64", "base64Decode", FunctionFactory::Case::Insensitive);
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
#endif
|
||||
|
@ -5,6 +5,7 @@
|
||||
|
||||
namespace DB
|
||||
{
|
||||
|
||||
REGISTER_FUNCTION(Base64Encode)
|
||||
{
|
||||
FunctionDocumentation::Description description = R"(Encodes a String as base64, according to RFC 4648 (https://datatracker.ietf.org/doc/html/rfc4648#section-4). Alias: TO_BASE64.)";
|
||||
@ -19,6 +20,7 @@ REGISTER_FUNCTION(Base64Encode)
|
||||
/// MySQL compatibility alias.
|
||||
factory.registerAlias("TO_BASE64", "base64Encode", FunctionFactory::Case::Insensitive);
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
#endif
|
||||
|
@ -5,6 +5,7 @@
|
||||
|
||||
namespace DB
|
||||
{
|
||||
|
||||
REGISTER_FUNCTION(Base64URLDecode)
|
||||
{
|
||||
FunctionDocumentation::Description description = R"(Accepts a base64-encoded URL and decodes it from base64 with URL-specific modifications, according to RFC 4648 (https://datatracker.ietf.org/doc/html/rfc4648#section-5).)";
|
||||
@ -16,6 +17,7 @@ REGISTER_FUNCTION(Base64URLDecode)
|
||||
|
||||
factory.registerFunction<FunctionBase64Conversion<Base64Decode<Base64Variant::URL>>>({description, syntax, arguments, returned_value, examples, categories});
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
#endif
|
||||
|
@ -5,6 +5,7 @@
|
||||
|
||||
namespace DB
|
||||
{
|
||||
|
||||
REGISTER_FUNCTION(Base64URLEncode)
|
||||
{
|
||||
FunctionDocumentation::Description description = R"(Encodes an URL (String or FixedString) as base64 with URL-specific modifications, according to RFC 4648 (https://datatracker.ietf.org/doc/html/rfc4648#section-5).)";
|
||||
@ -16,6 +17,7 @@ REGISTER_FUNCTION(Base64URLEncode)
|
||||
|
||||
factory.registerFunction<FunctionBase64Conversion<Base64Encode<Base64Variant::URL>>>({description, syntax, arguments, returned_value, examples, categories});
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
#endif
|
||||
|
@ -67,11 +67,11 @@ public:
|
||||
const IColumn * column = arguments[arg_num].column.get();
|
||||
|
||||
if (arg_num == 0)
|
||||
for (size_t row_num = 0; row_num < input_rows_count; ++row_num)
|
||||
vec_res[row_num] = column->byteSizeAt(row_num);
|
||||
for (size_t row = 0; row < input_rows_count; ++row)
|
||||
vec_res[row] = column->byteSizeAt(row);
|
||||
else
|
||||
for (size_t row_num = 0; row_num < input_rows_count; ++row_num)
|
||||
vec_res[row_num] += column->byteSizeAt(row_num);
|
||||
for (size_t row = 0; row < input_rows_count; ++row)
|
||||
vec_res[row] += column->byteSizeAt(row);
|
||||
}
|
||||
|
||||
return result_col;
|
||||
|
@ -10,6 +10,7 @@ extern const int NOT_IMPLEMENTED;
|
||||
|
||||
namespace
|
||||
{
|
||||
|
||||
template <typename T>
|
||||
requires std::is_integral_v<T>
|
||||
T byteSwap(T x)
|
||||
|
@ -98,8 +98,7 @@ public:
|
||||
|
||||
/// Execute transform.
|
||||
ColumnsWithTypeAndName transform_args{args.front(), src_array_col, dst_array_col, args.back()};
|
||||
return FunctionFactory::instance().get("transform", context)->build(transform_args)
|
||||
->execute(transform_args, result_type, input_rows_count);
|
||||
return FunctionFactory::instance().get("transform", context)->build(transform_args)->execute(transform_args, result_type, input_rows_count);
|
||||
}
|
||||
|
||||
private:
|
||||
|
@ -88,7 +88,8 @@ private:
|
||||
|
||||
static void convert(const String & from_charset, const String & to_charset,
|
||||
const ColumnString::Chars & from_chars, const ColumnString::Offsets & from_offsets,
|
||||
ColumnString::Chars & to_chars, ColumnString::Offsets & to_offsets)
|
||||
ColumnString::Chars & to_chars, ColumnString::Offsets & to_offsets,
|
||||
size_t input_rows_count)
|
||||
{
|
||||
auto converter_from = getConverter(from_charset);
|
||||
auto converter_to = getConverter(to_charset);
|
||||
@ -96,12 +97,11 @@ private:
|
||||
ColumnString::Offset current_from_offset = 0;
|
||||
ColumnString::Offset current_to_offset = 0;
|
||||
|
||||
size_t size = from_offsets.size();
|
||||
to_offsets.resize(size);
|
||||
to_offsets.resize(input_rows_count);
|
||||
|
||||
PODArray<UChar> uchars;
|
||||
|
||||
for (size_t i = 0; i < size; ++i)
|
||||
for (size_t i = 0; i < input_rows_count; ++i)
|
||||
{
|
||||
size_t from_string_size = from_offsets[i] - current_from_offset - 1;
|
||||
|
||||
@ -184,7 +184,7 @@ public:
|
||||
bool useDefaultImplementationForConstants() const override { return true; }
|
||||
ColumnNumbers getArgumentsThatAreAlwaysConstant() const override { return {1, 2}; }
|
||||
|
||||
ColumnPtr executeImpl(const ColumnsWithTypeAndName & arguments, const DataTypePtr &, size_t /*input_rows_count*/) const override
|
||||
ColumnPtr executeImpl(const ColumnsWithTypeAndName & arguments, const DataTypePtr &, size_t input_rows_count) const override
|
||||
{
|
||||
const ColumnWithTypeAndName & arg_from = arguments[0];
|
||||
const ColumnWithTypeAndName & arg_charset_from = arguments[1];
|
||||
@ -204,7 +204,7 @@ public:
|
||||
if (const ColumnString * col_from = checkAndGetColumn<ColumnString>(arg_from.column.get()))
|
||||
{
|
||||
auto col_to = ColumnString::create();
|
||||
convert(charset_from, charset_to, col_from->getChars(), col_from->getOffsets(), col_to->getChars(), col_to->getOffsets());
|
||||
convert(charset_from, charset_to, col_from->getChars(), col_from->getOffsets(), col_to->getChars(), col_to->getOffsets(), input_rows_count);
|
||||
return col_to;
|
||||
}
|
||||
else
|
||||
|
@ -5,11 +5,12 @@ namespace DB
|
||||
{
|
||||
namespace
|
||||
{
|
||||
struct CoshName
|
||||
{
|
||||
static constexpr auto name = "cosh";
|
||||
};
|
||||
using FunctionCosh = FunctionMathUnary<UnaryFunctionVectorized<CoshName, cosh>>;
|
||||
|
||||
struct CoshName
|
||||
{
|
||||
static constexpr auto name = "cosh";
|
||||
};
|
||||
using FunctionCosh = FunctionMathUnary<UnaryFunctionVectorized<CoshName, cosh>>;
|
||||
|
||||
}
|
||||
|
||||
|
@ -13,8 +13,7 @@ struct NameCountSubstringsCaseInsensitiveUTF8
|
||||
static constexpr auto name = "countSubstringsCaseInsensitiveUTF8";
|
||||
};
|
||||
|
||||
using FunctionCountSubstringsCaseInsensitiveUTF8 = FunctionsStringSearch<
|
||||
CountSubstringsImpl<NameCountSubstringsCaseInsensitiveUTF8, PositionCaseInsensitiveUTF8>>;
|
||||
using FunctionCountSubstringsCaseInsensitiveUTF8 = FunctionsStringSearch<CountSubstringsImpl<NameCountSubstringsCaseInsensitiveUTF8, PositionCaseInsensitiveUTF8>>;
|
||||
|
||||
}
|
||||
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user