mirror of
https://github.com/ClickHouse/ClickHouse.git
synced 2024-11-21 23:21:59 +00:00
Merge remote-tracking branch 'ClickHouse/master' into bump-icu
This commit is contained in:
commit
2a68867a20
173
.github/actions/release/action.yml
vendored
Normal file
173
.github/actions/release/action.yml
vendored
Normal file
@ -0,0 +1,173 @@
|
||||
name: Release
|
||||
|
||||
description: Makes patch releases and creates new release branch
|
||||
|
||||
inputs:
|
||||
ref:
|
||||
description: 'Git reference (branch or commit sha) from which to create the release'
|
||||
required: true
|
||||
type: string
|
||||
type:
|
||||
description: 'The type of release: "new" for a new release or "patch" for a patch release'
|
||||
required: true
|
||||
type: choice
|
||||
options:
|
||||
- patch
|
||||
- new
|
||||
dry-run:
|
||||
description: 'Dry run'
|
||||
required: false
|
||||
default: true
|
||||
type: boolean
|
||||
token:
|
||||
required: true
|
||||
type: string
|
||||
|
||||
runs:
|
||||
using: "composite"
|
||||
steps:
|
||||
- name: Prepare Release Info
|
||||
shell: bash
|
||||
run: |
|
||||
python3 ./tests/ci/create_release.py --prepare-release-info \
|
||||
--ref ${{ inputs.ref }} --release-type ${{ inputs.type }} \
|
||||
${{ inputs.dry-run && '--dry-run' || '' }}
|
||||
echo "::group::Release Info"
|
||||
python3 -m json.tool /tmp/release_info.json
|
||||
echo "::endgroup::"
|
||||
release_tag=$(jq -r '.release_tag' /tmp/release_info.json)
|
||||
commit_sha=$(jq -r '.commit_sha' /tmp/release_info.json)
|
||||
echo "Release Tag: $release_tag"
|
||||
echo "RELEASE_TAG=$release_tag" >> "$GITHUB_ENV"
|
||||
echo "COMMIT_SHA=$commit_sha" >> "$GITHUB_ENV"
|
||||
- name: Download All Release Artifacts
|
||||
if: ${{ inputs.type == 'patch' }}
|
||||
shell: bash
|
||||
run: |
|
||||
python3 ./tests/ci/create_release.py --download-packages ${{ inputs.dry-run && '--dry-run' || '' }}
|
||||
- name: Push Git Tag for the Release
|
||||
shell: bash
|
||||
run: |
|
||||
python3 ./tests/ci/create_release.py --push-release-tag ${{ inputs.dry-run && '--dry-run' || '' }}
|
||||
- name: Push New Release Branch
|
||||
if: ${{ inputs.type == 'new' }}
|
||||
shell: bash
|
||||
run: |
|
||||
python3 ./tests/ci/create_release.py --push-new-release-branch ${{ inputs.dry-run && '--dry-run' || '' }}
|
||||
- name: Bump CH Version and Update Contributors' List
|
||||
shell: bash
|
||||
run: |
|
||||
python3 ./tests/ci/create_release.py --create-bump-version-pr ${{ inputs.dry-run && '--dry-run' || '' }}
|
||||
- name: Bump Docker versions, Changelog, Security
|
||||
if: ${{ inputs.type == 'patch' }}
|
||||
shell: bash
|
||||
run: |
|
||||
python3 ./tests/ci/create_release.py --set-progress-started --progress "update ChangeLog"
|
||||
[ "$(git branch --show-current)" != "master" ] && echo "not on the master" && exit 1
|
||||
echo "List versions"
|
||||
./utils/list-versions/list-versions.sh > ./utils/list-versions/version_date.tsv
|
||||
echo "Update docker version"
|
||||
./utils/list-versions/update-docker-version.sh
|
||||
echo "Generate ChangeLog"
|
||||
export CI=1
|
||||
docker run -u "${UID}:${GID}" -e PYTHONUNBUFFERED=1 -e CI=1 --network=host \
|
||||
--volume=".:/ClickHouse" clickhouse/style-test \
|
||||
/ClickHouse/tests/ci/changelog.py -v --debug-helpers \
|
||||
--gh-user-or-token=${{ inputs.token }} --jobs=5 \
|
||||
--output="/ClickHouse/docs/changelogs/${{ env.RELEASE_TAG }}.md" ${{ env.RELEASE_TAG }}
|
||||
git add ./docs/changelogs/${{ env.RELEASE_TAG }}.md
|
||||
echo "Generate Security"
|
||||
python3 ./utils/security-generator/generate_security.py > SECURITY.md
|
||||
git diff HEAD
|
||||
- name: Create ChangeLog PR
|
||||
if: ${{ inputs.type == 'patch' && ! inputs.dry-run }}
|
||||
uses: peter-evans/create-pull-request@v6
|
||||
with:
|
||||
author: "robot-clickhouse <robot-clickhouse@users.noreply.github.com>"
|
||||
token: ${{ inputs.token }}
|
||||
committer: "robot-clickhouse <robot-clickhouse@users.noreply.github.com>"
|
||||
commit-message: Update version_date.tsv and changelogs after ${{ env.RELEASE_TAG }}
|
||||
branch: auto/${{ env.RELEASE_TAG }}
|
||||
assignees: ${{ github.event.sender.login }} # assign the PR to the tag pusher
|
||||
delete-branch: true
|
||||
title: Update version_date.tsv and changelog after ${{ env.RELEASE_TAG }}
|
||||
labels: do not test
|
||||
body: |
|
||||
Update version_date.tsv and changelogs after ${{ env.RELEASE_TAG }}
|
||||
### Changelog category (leave one):
|
||||
- Not for changelog (changelog entry is not required)
|
||||
- name: Reset changes if Dry-run
|
||||
if: ${{ inputs.dry-run }}
|
||||
shell: bash
|
||||
run: |
|
||||
git reset --hard HEAD
|
||||
- name: Checkout back to GITHUB_REF
|
||||
shell: bash
|
||||
run: |
|
||||
git checkout "$GITHUB_REF_NAME"
|
||||
# set current progress to OK
|
||||
python3 ./tests/ci/create_release.py --set-progress-completed
|
||||
- name: Create GH Release
|
||||
shell: bash
|
||||
if: ${{ inputs.type == 'patch' }}
|
||||
run: |
|
||||
python3 ./tests/ci/create_release.py --create-gh-release ${{ inputs.dry-run && '--dry-run' || '' }}
|
||||
- name: Export TGZ Packages
|
||||
if: ${{ inputs.type == 'patch' }}
|
||||
shell: bash
|
||||
run: |
|
||||
python3 ./tests/ci/artifactory.py --export-tgz ${{ inputs.dry-run && '--dry-run' || '' }}
|
||||
- name: Test TGZ Packages
|
||||
if: ${{ inputs.type == 'patch' }}
|
||||
shell: bash
|
||||
run: |
|
||||
python3 ./tests/ci/artifactory.py --test-tgz ${{ inputs.dry-run && '--dry-run' || '' }}
|
||||
- name: Export RPM Packages
|
||||
if: ${{ inputs.type == 'patch' }}
|
||||
shell: bash
|
||||
run: |
|
||||
python3 ./tests/ci/artifactory.py --export-rpm ${{ inputs.dry-run && '--dry-run' || '' }}
|
||||
- name: Test RPM Packages
|
||||
if: ${{ inputs.type == 'patch' }}
|
||||
shell: bash
|
||||
run: |
|
||||
python3 ./tests/ci/artifactory.py --test-rpm ${{ inputs.dry-run && '--dry-run' || '' }}
|
||||
- name: Export Debian Packages
|
||||
if: ${{ inputs.type == 'patch' }}
|
||||
shell: bash
|
||||
run: |
|
||||
python3 ./tests/ci/artifactory.py --export-debian ${{ inputs.dry-run && '--dry-run' || '' }}
|
||||
- name: Test Debian Packages
|
||||
if: ${{ inputs.type == 'patch' }}
|
||||
shell: bash
|
||||
run: |
|
||||
python3 ./tests/ci/artifactory.py --test-debian ${{ inputs.dry-run && '--dry-run' || '' }}
|
||||
- name: Docker clickhouse/clickhouse-server building
|
||||
if: ${{ inputs.type == 'patch' }}
|
||||
shell: bash
|
||||
run: |
|
||||
python3 ./tests/ci/create_release.py --set-progress-started --progress "docker server release"
|
||||
cd "./tests/ci"
|
||||
export CHECK_NAME="Docker server image"
|
||||
python3 docker_server.py --release-type auto --version ${{ env.RELEASE_TAG }} --check-name "$CHECK_NAME" --sha ${{ env.COMMIT_SHA }} ${{ ! inputs.dry-run && '--push' || '' }}
|
||||
python3 ./tests/ci/create_release.py --set-progress-completed
|
||||
- name: Docker clickhouse/clickhouse-keeper building
|
||||
if: ${{ inputs.type == 'patch' }}
|
||||
shell: bash
|
||||
run: |
|
||||
python3 ./tests/ci/create_release.py --set-progress-started --progress "docker keeper release"
|
||||
cd "./tests/ci"
|
||||
export CHECK_NAME="Docker keeper image"
|
||||
python3 docker_server.py --release-type auto --version ${{ env.RELEASE_TAG }} --check-name "$CHECK_NAME" --sha ${{ env.COMMIT_SHA }} ${{ ! inputs.dry-run && '--push' || '' }}
|
||||
python3 ./tests/ci/create_release.py --set-progress-completed
|
||||
- name: Set Release progress completed
|
||||
shell: bash
|
||||
run: |
|
||||
# If we here - set completed status, to post proper Slack OK or FAIL message in the next step
|
||||
python3 ./tests/ci/create_release.py --set-progress-started --progress "completed"
|
||||
python3 ./tests/ci/create_release.py --set-progress-completed
|
||||
- name: Post Slack Message
|
||||
if: ${{ !cancelled() }}
|
||||
shell: bash
|
||||
run: |
|
||||
python3 ./tests/ci/create_release.py --post-status ${{ inputs.dry-run && '--dry-run' || '' }}
|
98
.github/workflows/auto_release.yml
vendored
98
.github/workflows/auto_release.yml
vendored
@ -1,44 +1,110 @@
|
||||
name: AutoRelease
|
||||
|
||||
env:
|
||||
# Force the stdout and stderr streams to be unbuffered
|
||||
PYTHONUNBUFFERED: 1
|
||||
DRY_RUN: true
|
||||
|
||||
concurrency:
|
||||
group: auto-release
|
||||
group: release
|
||||
on: # yamllint disable-line rule:truthy
|
||||
# schedule:
|
||||
# - cron: '0 10-16 * * 1-5'
|
||||
# Workflow uses a test bucket for packages and dry run mode (no real releases)
|
||||
schedule:
|
||||
- cron: '0 9 * * *'
|
||||
- cron: '0 15 * * *'
|
||||
workflow_dispatch:
|
||||
inputs:
|
||||
dry-run:
|
||||
description: 'Dry run'
|
||||
required: false
|
||||
default: true
|
||||
type: boolean
|
||||
|
||||
jobs:
|
||||
CherryPick:
|
||||
runs-on: [self-hosted, style-checker-aarch64]
|
||||
AutoRelease:
|
||||
runs-on: [self-hosted, release-maker]
|
||||
steps:
|
||||
- name: DebugInfo
|
||||
uses: hmarr/debug-action@f7318c783045ac39ed9bb497e22ce835fdafbfe6
|
||||
- name: Set envs
|
||||
# https://docs.github.com/en/actions/learn-github-actions/workflow-commands-for-github-actions#multiline-strings
|
||||
run: |
|
||||
cat >> "$GITHUB_ENV" << 'EOF'
|
||||
TEMP_PATH=${{runner.temp}}/cherry_pick
|
||||
ROBOT_CLICKHOUSE_SSH_KEY<<RCSK
|
||||
${{secrets.ROBOT_CLICKHOUSE_SSH_KEY}}
|
||||
RCSK
|
||||
REPO_OWNER=ClickHouse
|
||||
REPO_NAME=ClickHouse
|
||||
REPO_TEAM=core
|
||||
EOF
|
||||
- name: Set DRY_RUN for schedule
|
||||
if: ${{ github.event_name == 'schedule' }}
|
||||
run: echo "DRY_RUN=true" >> "$GITHUB_ENV"
|
||||
- name: Set DRY_RUN for dispatch
|
||||
if: ${{ github.event_name == 'workflow_dispatch' }}
|
||||
run: echo "DRY_RUN=${{ github.event.inputs.dry-run }}" >> "$GITHUB_ENV"
|
||||
- name: Check out repository code
|
||||
uses: ClickHouse/checkout@v1
|
||||
with:
|
||||
clear-repository: true
|
||||
token: ${{secrets.ROBOT_CLICKHOUSE_COMMIT_TOKEN}}
|
||||
fetch-depth: 0
|
||||
- name: Auto-release
|
||||
- name: Auto Release Prepare
|
||||
run: |
|
||||
cd "$GITHUB_WORKSPACE/tests/ci"
|
||||
python3 auto_release.py --release-after-days=3
|
||||
- name: Cleanup
|
||||
if: always()
|
||||
python3 auto_release.py --prepare
|
||||
echo "::group::Auto Release Info"
|
||||
python3 -m json.tool /tmp/autorelease_info.json
|
||||
echo "::endgroup::"
|
||||
{
|
||||
echo 'AUTO_RELEASE_PARAMS<<EOF'
|
||||
cat /tmp/autorelease_info.json
|
||||
echo 'EOF'
|
||||
} >> "$GITHUB_ENV"
|
||||
- name: Post Release Branch statuses
|
||||
run: |
|
||||
cd "$GITHUB_WORKSPACE/tests/ci"
|
||||
python3 auto_release.py --post-status
|
||||
- name: Release ${{ fromJson(env.AUTO_RELEASE_PARAMS).releases[0].release_branch }}
|
||||
if: ${{ fromJson(env.AUTO_RELEASE_PARAMS).releases[0] && fromJson(env.AUTO_RELEASE_PARAMS).releases[0].ready }}
|
||||
uses: ./.github/actions/release
|
||||
with:
|
||||
ref: ${{ fromJson(env.AUTO_RELEASE_PARAMS).releases[0].commit_sha }}
|
||||
type: patch
|
||||
dry-run: ${{ env.DRY_RUN }}
|
||||
token: ${{secrets.ROBOT_CLICKHOUSE_COMMIT_TOKEN}}
|
||||
- name: Release ${{ fromJson(env.AUTO_RELEASE_PARAMS).releases[1].release_branch }}
|
||||
if: ${{ fromJson(env.AUTO_RELEASE_PARAMS).releases[0] && fromJson(env.AUTO_RELEASE_PARAMS).releases[1].ready }}
|
||||
uses: ./.github/actions/release
|
||||
with:
|
||||
ref: ${{ fromJson(env.AUTO_RELEASE_PARAMS).releases[1].commit_sha }}
|
||||
type: patch
|
||||
dry-run: ${{ env.DRY_RUN }}
|
||||
token: ${{secrets.ROBOT_CLICKHOUSE_COMMIT_TOKEN}}
|
||||
- name: Release ${{ fromJson(env.AUTO_RELEASE_PARAMS).releases[2].release_branch }}
|
||||
if: ${{ fromJson(env.AUTO_RELEASE_PARAMS).releases[2] && fromJson(env.AUTO_RELEASE_PARAMS).releases[2].ready }}
|
||||
uses: ./.github/actions/release
|
||||
with:
|
||||
ref: ${{ fromJson(env.AUTO_RELEASE_PARAMS).releases[2].commit_sha }}
|
||||
type: patch
|
||||
dry-run: ${{ env.DRY_RUN }}
|
||||
token: ${{secrets.ROBOT_CLICKHOUSE_COMMIT_TOKEN}}
|
||||
- name: Release ${{ fromJson(env.AUTO_RELEASE_PARAMS).releases[3].release_branch }}
|
||||
if: ${{ fromJson(env.AUTO_RELEASE_PARAMS).releases[3] && fromJson(env.AUTO_RELEASE_PARAMS).releases[3].ready }}
|
||||
uses: ./.github/actions/release
|
||||
with:
|
||||
ref: ${{ fromJson(env.AUTO_RELEASE_PARAMS).releases[3].commit_sha }}
|
||||
type: patch
|
||||
dry-run: ${{ env.DRY_RUN }}
|
||||
token: ${{secrets.ROBOT_CLICKHOUSE_COMMIT_TOKEN}}
|
||||
- name: Release ${{ fromJson(env.AUTO_RELEASE_PARAMS).releases[4].release_branch }}
|
||||
if: ${{ fromJson(env.AUTO_RELEASE_PARAMS).releases[4] && fromJson(env.AUTO_RELEASE_PARAMS).releases[4].ready }}
|
||||
uses: ./.github/actions/release
|
||||
with:
|
||||
ref: ${{ fromJson(env.AUTO_RELEASE_PARAMS).releases[4].commit_sha }}
|
||||
type: patch
|
||||
dry-run: ${{ env.DRY_RUN }}
|
||||
token: ${{secrets.ROBOT_CLICKHOUSE_COMMIT_TOKEN}}
|
||||
- name: Post Slack Message
|
||||
if: ${{ !cancelled() }}
|
||||
run: |
|
||||
cd "$GITHUB_WORKSPACE/tests/ci"
|
||||
python3 auto_release.py --post-auto-release-complete --wf-status ${{ job.status }}
|
||||
- name: Clean up
|
||||
run: |
|
||||
docker ps --quiet | xargs --no-run-if-empty docker kill ||:
|
||||
docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||:
|
||||
|
134
.github/workflows/create_release.yml
vendored
134
.github/workflows/create_release.yml
vendored
@ -2,7 +2,6 @@ name: CreateRelease
|
||||
|
||||
concurrency:
|
||||
group: release
|
||||
|
||||
'on':
|
||||
workflow_dispatch:
|
||||
inputs:
|
||||
@ -31,136 +30,15 @@ jobs:
|
||||
steps:
|
||||
- name: DebugInfo
|
||||
uses: hmarr/debug-action@f7318c783045ac39ed9bb497e22ce835fdafbfe6
|
||||
- name: Set envs
|
||||
# https://docs.github.com/en/actions/learn-github-actions/workflow-commands-for-github-actions#multiline-strings
|
||||
run: |
|
||||
cat >> "$GITHUB_ENV" << 'EOF'
|
||||
ROBOT_CLICKHOUSE_SSH_KEY<<RCSK
|
||||
${{secrets.ROBOT_CLICKHOUSE_SSH_KEY}}
|
||||
RCSK
|
||||
RELEASE_INFO_FILE=${{ runner.temp }}/release_info.json
|
||||
EOF
|
||||
- name: Check out repository code
|
||||
uses: ClickHouse/checkout@v1
|
||||
with:
|
||||
token: ${{secrets.ROBOT_CLICKHOUSE_COMMIT_TOKEN}}
|
||||
fetch-depth: 0
|
||||
- name: Prepare Release Info
|
||||
run: |
|
||||
python3 ./tests/ci/create_release.py --prepare-release-info \
|
||||
--ref ${{ inputs.ref }} --release-type ${{ inputs.type }} \
|
||||
--outfile ${{ env.RELEASE_INFO_FILE }} ${{ inputs.dry-run && '--dry-run' || '' }}
|
||||
echo "::group::Release Info"
|
||||
python3 -m json.tool "$RELEASE_INFO_FILE"
|
||||
echo "::endgroup::"
|
||||
release_tag=$(jq -r '.release_tag' "$RELEASE_INFO_FILE")
|
||||
commit_sha=$(jq -r '.commit_sha' "$RELEASE_INFO_FILE")
|
||||
echo "Release Tag: $release_tag"
|
||||
echo "RELEASE_TAG=$release_tag" >> "$GITHUB_ENV"
|
||||
echo "COMMIT_SHA=$commit_sha" >> "$GITHUB_ENV"
|
||||
- name: Download All Release Artifacts
|
||||
if: ${{ inputs.type == 'patch' }}
|
||||
run: |
|
||||
python3 ./tests/ci/create_release.py --infile "$RELEASE_INFO_FILE" --download-packages ${{ inputs.dry-run && '--dry-run' || '' }}
|
||||
- name: Push Git Tag for the Release
|
||||
run: |
|
||||
python3 ./tests/ci/create_release.py --push-release-tag --infile "$RELEASE_INFO_FILE" ${{ inputs.dry-run && '--dry-run' || '' }}
|
||||
- name: Push New Release Branch
|
||||
if: ${{ inputs.type == 'new' }}
|
||||
run: |
|
||||
python3 ./tests/ci/create_release.py --push-new-release-branch --infile "$RELEASE_INFO_FILE" ${{ inputs.dry-run && '--dry-run' || '' }}
|
||||
- name: Bump CH Version and Update Contributors' List
|
||||
run: |
|
||||
python3 ./tests/ci/create_release.py --create-bump-version-pr --infile "$RELEASE_INFO_FILE" ${{ inputs.dry-run && '--dry-run' || '' }}
|
||||
- name: Checkout master
|
||||
run: |
|
||||
git checkout master
|
||||
- name: Bump Docker versions, Changelog, Security
|
||||
if: ${{ inputs.type == 'patch' }}
|
||||
run: |
|
||||
[ "$(git branch --show-current)" != "master" ] && echo "not on the master" && exit 1
|
||||
echo "List versions"
|
||||
./utils/list-versions/list-versions.sh > ./utils/list-versions/version_date.tsv
|
||||
echo "Update docker version"
|
||||
./utils/list-versions/update-docker-version.sh
|
||||
echo "Generate ChangeLog"
|
||||
export CI=1
|
||||
docker run -u "${UID}:${GID}" -e PYTHONUNBUFFERED=1 -e CI=1 --network=host \
|
||||
--volume=".:/ClickHouse" clickhouse/style-test \
|
||||
/ClickHouse/tests/ci/changelog.py -v --debug-helpers \
|
||||
--gh-user-or-token="$GH_TOKEN" --jobs=5 \
|
||||
--output="/ClickHouse/docs/changelogs/${{ env.RELEASE_TAG }}.md" ${{ env.RELEASE_TAG }}
|
||||
git add ./docs/changelogs/${{ env.RELEASE_TAG }}.md
|
||||
echo "Generate Security"
|
||||
python3 ./utils/security-generator/generate_security.py > SECURITY.md
|
||||
git diff HEAD
|
||||
- name: Create ChangeLog PR
|
||||
if: ${{ inputs.type == 'patch' && ! inputs.dry-run }}
|
||||
uses: peter-evans/create-pull-request@v6
|
||||
- name: Call Release Action
|
||||
uses: ./.github/actions/release
|
||||
with:
|
||||
author: "robot-clickhouse <robot-clickhouse@users.noreply.github.com>"
|
||||
token: ${{ secrets.ROBOT_CLICKHOUSE_COMMIT_TOKEN }}
|
||||
committer: "robot-clickhouse <robot-clickhouse@users.noreply.github.com>"
|
||||
commit-message: Update version_date.tsv and changelogs after ${{ env.RELEASE_TAG }}
|
||||
branch: auto/${{ env.RELEASE_TAG }}
|
||||
assignees: ${{ github.event.sender.login }} # assign the PR to the tag pusher
|
||||
delete-branch: true
|
||||
title: Update version_date.tsv and changelog after ${{ env.RELEASE_TAG }}
|
||||
labels: do not test
|
||||
body: |
|
||||
Update version_date.tsv and changelogs after ${{ env.RELEASE_TAG }}
|
||||
### Changelog category (leave one):
|
||||
- Not for changelog (changelog entry is not required)
|
||||
- name: Reset changes if Dry-run
|
||||
if: ${{ inputs.dry-run }}
|
||||
run: |
|
||||
git reset --hard HEAD
|
||||
- name: Checkout back to GITHUB_REF
|
||||
run: |
|
||||
git checkout "$GITHUB_REF_NAME"
|
||||
- name: Create GH Release
|
||||
if: ${{ inputs.type == 'patch' }}
|
||||
run: |
|
||||
python3 ./tests/ci/create_release.py --create-gh-release \
|
||||
--infile ${{ env.RELEASE_INFO_FILE }} ${{ inputs.dry-run && '--dry-run' || '' }}
|
||||
|
||||
- name: Export TGZ Packages
|
||||
if: ${{ inputs.type == 'patch' }}
|
||||
run: |
|
||||
python3 ./tests/ci/artifactory.py --export-tgz --infile ${{ env.RELEASE_INFO_FILE }} ${{ inputs.dry-run && '--dry-run' || '' }}
|
||||
- name: Test TGZ Packages
|
||||
if: ${{ inputs.type == 'patch' }}
|
||||
run: |
|
||||
python3 ./tests/ci/artifactory.py --test-tgz --infile ${{ env.RELEASE_INFO_FILE }} ${{ inputs.dry-run && '--dry-run' || '' }}
|
||||
- name: Export RPM Packages
|
||||
if: ${{ inputs.type == 'patch' }}
|
||||
run: |
|
||||
python3 ./tests/ci/artifactory.py --export-rpm --infile ${{ env.RELEASE_INFO_FILE }} ${{ inputs.dry-run && '--dry-run' || '' }}
|
||||
- name: Test RPM Packages
|
||||
if: ${{ inputs.type == 'patch' }}
|
||||
run: |
|
||||
python3 ./tests/ci/artifactory.py --test-rpm --infile ${{ env.RELEASE_INFO_FILE }} ${{ inputs.dry-run && '--dry-run' || '' }}
|
||||
- name: Export Debian Packages
|
||||
if: ${{ inputs.type == 'patch' }}
|
||||
run: |
|
||||
python3 ./tests/ci/artifactory.py --export-debian --infile ${{ env.RELEASE_INFO_FILE }} ${{ inputs.dry-run && '--dry-run' || '' }}
|
||||
- name: Test Debian Packages
|
||||
if: ${{ inputs.type == 'patch' }}
|
||||
run: |
|
||||
python3 ./tests/ci/artifactory.py --test-debian --infile ${{ env.RELEASE_INFO_FILE }} ${{ inputs.dry-run && '--dry-run' || '' }}
|
||||
- name: Docker clickhouse/clickhouse-server building
|
||||
if: ${{ inputs.type == 'patch' }}
|
||||
run: |
|
||||
cd "./tests/ci"
|
||||
export CHECK_NAME="Docker server image"
|
||||
python3 docker_server.py --release-type auto --version ${{ env.RELEASE_TAG }} --check-name "$CHECK_NAME" --sha ${{ env.COMMIT_SHA }} ${{ ! inputs.dry-run && '--push' || '' }}
|
||||
- name: Docker clickhouse/clickhouse-keeper building
|
||||
if: ${{ inputs.type == 'patch' }}
|
||||
run: |
|
||||
cd "./tests/ci"
|
||||
export CHECK_NAME="Docker keeper image"
|
||||
python3 docker_server.py --release-type auto --version ${{ env.RELEASE_TAG }} --check-name "$CHECK_NAME" --sha ${{ env.COMMIT_SHA }} ${{ ! inputs.dry-run && '--push' || '' }}
|
||||
- name: Post Slack Message
|
||||
if: always()
|
||||
run: |
|
||||
echo Slack Message
|
||||
ref: ${{ inputs.ref }}
|
||||
type: ${{ inputs.type }}
|
||||
dry-run: ${{ inputs.dry-run }}
|
||||
token: ${{secrets.ROBOT_CLICKHOUSE_COMMIT_TOKEN}}
|
||||
|
@ -1298,7 +1298,6 @@ elseif(ARCH_PPC64LE)
|
||||
${OPENSSL_SOURCE_DIR}/crypto/camellia/camellia.c
|
||||
${OPENSSL_SOURCE_DIR}/crypto/camellia/cmll_cbc.c
|
||||
${OPENSSL_SOURCE_DIR}/crypto/chacha/chacha_enc.c
|
||||
${OPENSSL_SOURCE_DIR}/crypto/mem_clr.c
|
||||
${OPENSSL_SOURCE_DIR}/crypto/rc4/rc4_enc.c
|
||||
${OPENSSL_SOURCE_DIR}/crypto/rc4/rc4_skey.c
|
||||
${OPENSSL_SOURCE_DIR}/crypto/sha/keccak1600.c
|
||||
|
@ -4,6 +4,9 @@
|
||||
source /setup_export_logs.sh
|
||||
set -e -x
|
||||
|
||||
MAX_RUN_TIME=${MAX_RUN_TIME:-3600}
|
||||
MAX_RUN_TIME=$((MAX_RUN_TIME == 0 ? 3600 : MAX_RUN_TIME))
|
||||
|
||||
# Choose random timezone for this test run
|
||||
TZ="$(rg -v '#' /usr/share/zoneinfo/zone.tab | awk '{print $3}' | shuf | head -n1)"
|
||||
echo "Choosen random timezone $TZ"
|
||||
@ -242,7 +245,22 @@ function run_tests()
|
||||
}
|
||||
|
||||
export -f run_tests
|
||||
timeout "$MAX_RUN_TIME" bash -c run_tests ||:
|
||||
|
||||
function timeout_with_logging() {
|
||||
local exit_code=0
|
||||
|
||||
timeout -s TERM --preserve-status "${@}" || exit_code="${?}"
|
||||
|
||||
if [[ "${exit_code}" -eq "124" ]]
|
||||
then
|
||||
echo "The command 'timeout ${*}' has been killed by timeout"
|
||||
fi
|
||||
|
||||
return $exit_code
|
||||
}
|
||||
|
||||
TIMEOUT=$((MAX_RUN_TIME - 700))
|
||||
timeout_with_logging "$TIMEOUT" bash -c run_tests ||:
|
||||
|
||||
echo "Files in current directory"
|
||||
ls -la ./
|
||||
|
@ -6,18 +6,12 @@ source /setup_export_logs.sh
|
||||
# fail on errors, verbose and export all env variables
|
||||
set -e -x -a
|
||||
|
||||
MAX_RUN_TIME=${MAX_RUN_TIME:-7200}
|
||||
MAX_RUN_TIME=$((MAX_RUN_TIME == 0 ? 7200 : MAX_RUN_TIME))
|
||||
MAX_RUN_TIME=${MAX_RUN_TIME:-9000}
|
||||
MAX_RUN_TIME=$((MAX_RUN_TIME == 0 ? 9000 : MAX_RUN_TIME))
|
||||
|
||||
USE_DATABASE_REPLICATED=${USE_DATABASE_REPLICATED:=0}
|
||||
USE_SHARED_CATALOG=${USE_SHARED_CATALOG:=0}
|
||||
|
||||
RUN_SEQUENTIAL_TESTS_IN_PARALLEL=0
|
||||
|
||||
if [[ "$USE_DATABASE_REPLICATED" -eq 1 ]] || [[ "$USE_SHARED_CATALOG" -eq 1 ]]; then
|
||||
RUN_SEQUENTIAL_TESTS_IN_PARALLEL=0
|
||||
fi
|
||||
|
||||
# Choose random timezone for this test run.
|
||||
#
|
||||
# NOTE: that clickhouse-test will randomize session_timezone by itself as well
|
||||
@ -101,53 +95,6 @@ if [ "$NUM_TRIES" -gt "1" ]; then
|
||||
mkdir -p /var/run/clickhouse-server
|
||||
fi
|
||||
|
||||
# Run a CH instance to execute sequential tests on it in parallel with all other tests.
|
||||
if [[ "$RUN_SEQUENTIAL_TESTS_IN_PARALLEL" -eq 1 ]]; then
|
||||
mkdir -p /var/run/clickhouse-server3 /etc/clickhouse-server3 /var/lib/clickhouse3
|
||||
cp -r -L /etc/clickhouse-server/* /etc/clickhouse-server3/
|
||||
|
||||
sudo chown clickhouse:clickhouse /var/run/clickhouse-server3 /var/lib/clickhouse3 /etc/clickhouse-server3/
|
||||
sudo chown -R clickhouse:clickhouse /etc/clickhouse-server3/*
|
||||
|
||||
function replace(){
|
||||
sudo find /etc/clickhouse-server3/ -type f -name '*.xml' -exec sed -i "$1" {} \;
|
||||
}
|
||||
|
||||
replace "s|<port>9000</port>|<port>19000</port>|g"
|
||||
replace "s|<port>9440</port>|<port>19440</port>|g"
|
||||
replace "s|<port>9988</port>|<port>19988</port>|g"
|
||||
replace "s|<port>9234</port>|<port>19234</port>|g"
|
||||
replace "s|<port>9181</port>|<port>19181</port>|g"
|
||||
replace "s|<https_port>8443</https_port>|<https_port>18443</https_port>|g"
|
||||
replace "s|<tcp_port>9000</tcp_port>|<tcp_port>19000</tcp_port>|g"
|
||||
replace "s|<tcp_port>9181</tcp_port>|<tcp_port>19181</tcp_port>|g"
|
||||
replace "s|<tcp_port_secure>9440</tcp_port_secure>|<tcp_port_secure>19440</tcp_port_secure>|g"
|
||||
replace "s|<tcp_with_proxy_port>9010</tcp_with_proxy_port>|<tcp_with_proxy_port>19010</tcp_with_proxy_port>|g"
|
||||
replace "s|<mysql_port>9004</mysql_port>|<mysql_port>19004</mysql_port>|g"
|
||||
replace "s|<postgresql_port>9005</postgresql_port>|<postgresql_port>19005</postgresql_port>|g"
|
||||
replace "s|<interserver_http_port>9009</interserver_http_port>|<interserver_http_port>19009</interserver_http_port>|g"
|
||||
replace "s|8123|18123|g"
|
||||
replace "s|/var/lib/clickhouse/|/var/lib/clickhouse3/|g"
|
||||
replace "s|/etc/clickhouse-server/|/etc/clickhouse-server3/|g"
|
||||
# distributed cache
|
||||
replace "s|<tcp_port>10001</tcp_port>|<tcp_port>10003</tcp_port>|g"
|
||||
replace "s|<tcp_port>10002</tcp_port>|<tcp_port>10004</tcp_port>|g"
|
||||
|
||||
sudo -E -u clickhouse /usr/bin/clickhouse server --daemon --config /etc/clickhouse-server3/config.xml \
|
||||
--pid-file /var/run/clickhouse-server3/clickhouse-server.pid \
|
||||
-- --path /var/lib/clickhouse3/ --logger.stderr /var/log/clickhouse-server/stderr3.log \
|
||||
--logger.log /var/log/clickhouse-server/clickhouse-server3.log --logger.errorlog /var/log/clickhouse-server/clickhouse-server3.err.log \
|
||||
--tcp_port 19000 --tcp_port_secure 19440 --http_port 18123 --https_port 18443 --interserver_http_port 19009 --tcp_with_proxy_port 19010 \
|
||||
--prometheus.port 19988 --keeper_server.raft_configuration.server.port 19234 --keeper_server.tcp_port 19181 \
|
||||
--mysql_port 19004 --postgresql_port 19005
|
||||
|
||||
for _ in {1..100}
|
||||
do
|
||||
clickhouse-client --port 19000 --query "SELECT 1" && break
|
||||
sleep 1
|
||||
done
|
||||
fi
|
||||
|
||||
# simplest way to forward env variables to server
|
||||
sudo -E -u clickhouse /usr/bin/clickhouse-server --config /etc/clickhouse-server/config.xml --daemon --pid-file /var/run/clickhouse-server/clickhouse-server.pid
|
||||
|
||||
@ -183,9 +130,6 @@ if [[ "$USE_DATABASE_REPLICATED" -eq 1 ]]; then
|
||||
--keeper_server.tcp_port 29181 --keeper_server.server_id 3 \
|
||||
--prometheus.port 29988 \
|
||||
--macros.shard s2 # It doesn't work :(
|
||||
|
||||
MAX_RUN_TIME=$((MAX_RUN_TIME < 9000 ? MAX_RUN_TIME : 9000)) # min(MAX_RUN_TIME, 2.5 hours)
|
||||
MAX_RUN_TIME=$((MAX_RUN_TIME != 0 ? MAX_RUN_TIME : 9000)) # set to 2.5 hours if 0 (unlimited)
|
||||
fi
|
||||
|
||||
if [[ "$USE_SHARED_CATALOG" -eq 1 ]]; then
|
||||
@ -210,9 +154,6 @@ if [[ "$USE_SHARED_CATALOG" -eq 1 ]]; then
|
||||
--keeper_server.tcp_port 19181 --keeper_server.server_id 2 \
|
||||
--prometheus.port 19988 \
|
||||
--macros.replica r2 # It doesn't work :(
|
||||
|
||||
MAX_RUN_TIME=$((MAX_RUN_TIME < 9000 ? MAX_RUN_TIME : 9000)) # min(MAX_RUN_TIME, 2.5 hours)
|
||||
MAX_RUN_TIME=$((MAX_RUN_TIME != 0 ? MAX_RUN_TIME : 9000)) # set to 2.5 hours if 0 (unlimited)
|
||||
fi
|
||||
|
||||
# Wait for the server to start, but not for too long.
|
||||
@ -223,7 +164,6 @@ do
|
||||
done
|
||||
|
||||
setup_logs_replication
|
||||
|
||||
attach_gdb_to_clickhouse || true # FIXME: to not break old builds, clean on 2023-09-01
|
||||
|
||||
function fn_exists() {
|
||||
@ -284,11 +224,7 @@ function run_tests()
|
||||
else
|
||||
# All other configurations are OK.
|
||||
ADDITIONAL_OPTIONS+=('--jobs')
|
||||
ADDITIONAL_OPTIONS+=('5')
|
||||
fi
|
||||
|
||||
if [[ "$RUN_SEQUENTIAL_TESTS_IN_PARALLEL" -eq 1 ]]; then
|
||||
ADDITIONAL_OPTIONS+=('--run-sequential-tests-in-parallel')
|
||||
ADDITIONAL_OPTIONS+=('8')
|
||||
fi
|
||||
|
||||
if [[ -n "$RUN_BY_HASH_NUM" ]] && [[ -n "$RUN_BY_HASH_TOTAL" ]]; then
|
||||
@ -373,9 +309,6 @@ done
|
||||
# Because it's the simplest way to read it when server has crashed.
|
||||
sudo clickhouse stop ||:
|
||||
|
||||
if [[ "$RUN_SEQUENTIAL_TESTS_IN_PARALLEL" -eq 1 ]]; then
|
||||
sudo clickhouse stop --pid-path /var/run/clickhouse-server3 ||:
|
||||
fi
|
||||
|
||||
if [[ "$USE_DATABASE_REPLICATED" -eq 1 ]]; then
|
||||
sudo clickhouse stop --pid-path /var/run/clickhouse-server1 ||:
|
||||
@ -393,12 +326,6 @@ rg -Fa "<Fatal>" /var/log/clickhouse-server/clickhouse-server.log ||:
|
||||
rg -A50 -Fa "============" /var/log/clickhouse-server/stderr.log ||:
|
||||
zstd --threads=0 < /var/log/clickhouse-server/clickhouse-server.log > /test_output/clickhouse-server.log.zst &
|
||||
|
||||
if [[ "$RUN_SEQUENTIAL_TESTS_IN_PARALLEL" -eq 1 ]]; then
|
||||
rg -Fa "<Fatal>" /var/log/clickhouse-server3/clickhouse-server.log ||:
|
||||
rg -A50 -Fa "============" /var/log/clickhouse-server3/stderr.log ||:
|
||||
zstd --threads=0 < /var/log/clickhouse-server3/clickhouse-server.log > /test_output/clickhouse-server3.log.zst &
|
||||
fi
|
||||
|
||||
data_path_config="--path=/var/lib/clickhouse/"
|
||||
if [[ -n "$USE_S3_STORAGE_FOR_MERGE_TREE" ]] && [[ "$USE_S3_STORAGE_FOR_MERGE_TREE" -eq 1 ]]; then
|
||||
# We need s3 storage configuration (but it's more likely that clickhouse-local will fail for some reason)
|
||||
@ -419,10 +346,6 @@ if [ $failed_to_save_logs -ne 0 ]; then
|
||||
do
|
||||
clickhouse-local "$data_path_config" --only-system-tables --stacktrace -q "select * from system.$table format TSVWithNamesAndTypes" | zstd --threads=0 > /test_output/$table.tsv.zst ||:
|
||||
|
||||
if [[ "$RUN_SEQUENTIAL_TESTS_IN_PARALLEL" -eq 1 ]]; then
|
||||
clickhouse-local --path /var/lib/clickhouse3/ --only-system-tables --stacktrace -q "select * from system.$table format TSVWithNamesAndTypes" | zstd --threads=0 > /test_output/$table.3.tsv.zst ||:
|
||||
fi
|
||||
|
||||
if [[ "$USE_DATABASE_REPLICATED" -eq 1 ]]; then
|
||||
clickhouse-local --path /var/lib/clickhouse1/ --only-system-tables --stacktrace -q "select * from system.$table format TSVWithNamesAndTypes" | zstd --threads=0 > /test_output/$table.1.tsv.zst ||:
|
||||
clickhouse-local --path /var/lib/clickhouse2/ --only-system-tables --stacktrace -q "select * from system.$table format TSVWithNamesAndTypes" | zstd --threads=0 > /test_output/$table.2.tsv.zst ||:
|
||||
@ -464,12 +387,6 @@ rm -rf /var/lib/clickhouse/data/system/*/
|
||||
tar -chf /test_output/store.tar /var/lib/clickhouse/store ||:
|
||||
tar -chf /test_output/metadata.tar /var/lib/clickhouse/metadata/*.sql ||:
|
||||
|
||||
if [[ "$RUN_SEQUENTIAL_TESTS_IN_PARALLEL" -eq 1 ]]; then
|
||||
rm -rf /var/lib/clickhouse3/data/system/*/
|
||||
tar -chf /test_output/store.tar /var/lib/clickhouse3/store ||:
|
||||
tar -chf /test_output/metadata.tar /var/lib/clickhouse3/metadata/*.sql ||:
|
||||
fi
|
||||
|
||||
|
||||
if [[ "$USE_DATABASE_REPLICATED" -eq 1 ]]; then
|
||||
rg -Fa "<Fatal>" /var/log/clickhouse-server/clickhouse-server1.log ||:
|
||||
|
@ -124,7 +124,7 @@ which is equal to
|
||||
|
||||
#### Default values for from_env and from_zk attributes
|
||||
|
||||
It's possible to set the default value and substitute it only if the environment variable or zookeeper node is set using `replace="1"`.
|
||||
It's possible to set the default value and substitute it only if the environment variable or zookeeper node is set using `replace="1"` (must be declared before from_env).
|
||||
|
||||
With previous example, but `MAX_QUERY_SIZE` is unset:
|
||||
|
||||
@ -132,7 +132,7 @@ With previous example, but `MAX_QUERY_SIZE` is unset:
|
||||
<clickhouse>
|
||||
<profiles>
|
||||
<default>
|
||||
<max_query_size from_env="MAX_QUERY_SIZE" replace="1">150000</max_query_size>
|
||||
<max_query_size replace="1" from_env="MAX_QUERY_SIZE">150000</max_query_size>
|
||||
</default>
|
||||
</profiles>
|
||||
</clickhouse>
|
||||
|
@ -1,6 +1,6 @@
|
||||
---
|
||||
slug: /en/sql-reference/table-functions/azureBlobStorageCluster
|
||||
sidebar_position: 55
|
||||
sidebar_position: 15
|
||||
sidebar_label: azureBlobStorageCluster
|
||||
title: "azureBlobStorageCluster Table Function"
|
||||
---
|
||||
|
@ -6,38 +6,38 @@ sidebar_label: Playground
|
||||
|
||||
# ClickHouse Playground {#clickhouse-playground}
|
||||
|
||||
[ClickHouse Playground](https://play.clickhouse.com/play?user=play) allows people to experiment with ClickHouse by running queries instantly, without setting up their server or cluster.
|
||||
Several example datasets are available in Playground.
|
||||
[ClickHouse Playground](https://play.clickhouse.com/play?user=play) позволяет пользователям экспериментировать с ClickHouse, выполняя запросы мгновенно, без необходимости настройки сервера или кластера.
|
||||
В Playground доступны несколько примеров наборов данных.
|
||||
|
||||
You can make queries to Playground using any HTTP client, for example [curl](https://curl.haxx.se) or [wget](https://www.gnu.org/software/wget/), or set up a connection using [JDBC](../interfaces/jdbc.md) or [ODBC](../interfaces/odbc.md) drivers. More information about software products that support ClickHouse is available [here](../interfaces/index.md).
|
||||
Вы можете выполнять запросы к Playground, используя любой HTTP-клиент, например [curl](https://curl.haxx.se) или [wget](https://www.gnu.org/software/wget/), или настроить соединение, используя драйверы [JDBC](../interfaces/jdbc.md) или [ODBC](../interfaces/odbc.md). Дополнительную информацию о программных продуктах, поддерживающих ClickHouse, можно найти [здесь](../interfaces/index.md).
|
||||
|
||||
## Credentials {#credentials}
|
||||
## Учетные данные {#credentials}
|
||||
|
||||
| Parameter | Value |
|
||||
| Параметр | Значение |
|
||||
|:--------------------|:-----------------------------------|
|
||||
| HTTPS endpoint | `https://play.clickhouse.com:443/` |
|
||||
| Native TCP endpoint | `play.clickhouse.com:9440` |
|
||||
| User | `explorer` or `play` |
|
||||
| Password | (empty) |
|
||||
| HTTPS-адрес | `https://play.clickhouse.com:443/` |
|
||||
| TCP-адрес | `play.clickhouse.com:9440` |
|
||||
| Пользователь | `explorer` или `play` |
|
||||
| Пароль | (пусто) |
|
||||
|
||||
## Limitations {#limitations}
|
||||
## Ограничения {#limitations}
|
||||
|
||||
The queries are executed as a read-only user. It implies some limitations:
|
||||
Запросы выполняются от имени пользователя с правами только на чтение. Это предполагает некоторые ограничения:
|
||||
|
||||
- DDL queries are not allowed
|
||||
- INSERT queries are not allowed
|
||||
- DDL-запросы не разрешены
|
||||
- INSERT-запросы не разрешены
|
||||
|
||||
The service also have quotas on its usage.
|
||||
Сервис также имеет квоты на использование.
|
||||
|
||||
## Examples {#examples}
|
||||
## Примеры {#examples}
|
||||
|
||||
HTTPS endpoint example with `curl`:
|
||||
Пример использования HTTPS-адреса с `curl`:
|
||||
|
||||
``` bash
|
||||
```bash
|
||||
curl "https://play.clickhouse.com/?user=explorer" --data-binary "SELECT 'Play ClickHouse'"
|
||||
```
|
||||
|
||||
TCP endpoint example with [CLI](../interfaces/cli.md):
|
||||
Пример использования TCP-адреса с [CLI](../interfaces/cli.md):
|
||||
|
||||
``` bash
|
||||
clickhouse client --secure --host play.clickhouse.com --user explorer
|
||||
|
@ -45,16 +45,17 @@ int mainEntryClickHouseKeeperConverter(int argc, char ** argv)
|
||||
keeper_context->setDigestEnabled(true);
|
||||
keeper_context->setSnapshotDisk(std::make_shared<DiskLocal>("Keeper-snapshots", options["output-dir"].as<std::string>()));
|
||||
|
||||
DB::KeeperStorage storage(/* tick_time_ms */ 500, /* superdigest */ "", keeper_context, /* initialize_system_nodes */ false);
|
||||
/// TODO(hanfei): support rocksdb here
|
||||
DB::KeeperMemoryStorage storage(/* tick_time_ms */ 500, /* superdigest */ "", keeper_context, /* initialize_system_nodes */ false);
|
||||
|
||||
DB::deserializeKeeperStorageFromSnapshotsDir(storage, options["zookeeper-snapshots-dir"].as<std::string>(), logger);
|
||||
storage.initializeSystemNodes();
|
||||
|
||||
DB::deserializeLogsAndApplyToStorage(storage, options["zookeeper-logs-dir"].as<std::string>(), logger);
|
||||
DB::SnapshotMetadataPtr snapshot_meta = std::make_shared<DB::SnapshotMetadata>(storage.getZXID(), 1, std::make_shared<nuraft::cluster_config>());
|
||||
DB::KeeperStorageSnapshot snapshot(&storage, snapshot_meta);
|
||||
DB::KeeperStorageSnapshot<DB::KeeperMemoryStorage> snapshot(&storage, snapshot_meta);
|
||||
|
||||
DB::KeeperSnapshotManager manager(1, keeper_context);
|
||||
DB::KeeperSnapshotManager<DB::KeeperMemoryStorage> manager(1, keeper_context);
|
||||
auto snp = manager.serializeSnapshotToBuffer(snapshot);
|
||||
auto file_info = manager.serializeSnapshotBufferToDisk(*snp, storage.getZXID());
|
||||
std::cout << "Snapshot serialized to path:" << fs::path(file_info->disk->getPath()) / file_info->path << std::endl;
|
||||
|
@ -52,6 +52,10 @@
|
||||
# include <Server/CertificateReloader.h>
|
||||
#endif
|
||||
|
||||
#if USE_GWP_ASAN
|
||||
# include <Common/GWPAsan.h>
|
||||
#endif
|
||||
|
||||
#include <Server/ProtocolServerAdapter.h>
|
||||
#include <Server/KeeperTCPHandlerFactory.h>
|
||||
|
||||
@ -639,6 +643,10 @@ try
|
||||
tryLogCurrentException(log, "Disabling cgroup memory observer because of an error during initialization");
|
||||
}
|
||||
|
||||
#if USE_GWP_ASAN
|
||||
GWPAsan::initFinished();
|
||||
#endif
|
||||
|
||||
|
||||
LOG_INFO(log, "Ready for connections.");
|
||||
|
||||
|
@ -2213,6 +2213,7 @@ try
|
||||
CannotAllocateThreadFaultInjector::setFaultProbability(server_settings.cannot_allocate_thread_fault_injection_probability);
|
||||
|
||||
#if USE_GWP_ASAN
|
||||
GWPAsan::initFinished();
|
||||
GWPAsan::setForceSampleProbability(server_settings.gwp_asan_force_sample_probability);
|
||||
#endif
|
||||
|
||||
|
@ -4124,7 +4124,9 @@ void QueryAnalyzer::resolveInterpolateColumnsNodeList(QueryTreeNodePtr & interpo
|
||||
|
||||
auto * column_to_interpolate = interpolate_node_typed.getExpression()->as<IdentifierNode>();
|
||||
if (!column_to_interpolate)
|
||||
throw Exception(ErrorCodes::LOGICAL_ERROR, "INTERPOLATE can work only for indentifiers, but {} is found",
|
||||
throw Exception(
|
||||
ErrorCodes::LOGICAL_ERROR,
|
||||
"INTERPOLATE can work only for identifiers, but {} is found",
|
||||
interpolate_node_typed.getExpression()->formatASTForErrorMessage());
|
||||
auto column_to_interpolate_name = column_to_interpolate->getIdentifier().getFullName();
|
||||
|
||||
|
@ -419,9 +419,6 @@ dbms_target_link_libraries (
|
||||
boost::circular_buffer
|
||||
boost::heap)
|
||||
|
||||
target_include_directories(clickhouse_common_io PUBLIC "${CMAKE_CURRENT_BINARY_DIR}/Core/include") # uses some includes from core
|
||||
dbms_target_include_directories(PUBLIC "${CMAKE_CURRENT_BINARY_DIR}/Core/include")
|
||||
|
||||
target_link_libraries(clickhouse_common_io PUBLIC
|
||||
ch_contrib::miniselect
|
||||
ch_contrib::pdqsort)
|
||||
|
@ -81,6 +81,10 @@
|
||||
#include <Common/config_version.h>
|
||||
#include "config.h"
|
||||
|
||||
#if USE_GWP_ASAN
|
||||
# include <Common/GWPAsan.h>
|
||||
#endif
|
||||
|
||||
|
||||
namespace fs = std::filesystem;
|
||||
using namespace std::literals;
|
||||
@ -3264,6 +3268,11 @@ void ClientBase::init(int argc, char ** argv)
|
||||
fatal_log = createLogger("ClientBase", fatal_channel_ptr.get(), Poco::Message::PRIO_FATAL);
|
||||
signal_listener = std::make_unique<SignalListener>(nullptr, fatal_log);
|
||||
signal_listener_thread.start(*signal_listener);
|
||||
|
||||
#if USE_GWP_ASAN
|
||||
GWPAsan::initFinished();
|
||||
#endif
|
||||
|
||||
}
|
||||
|
||||
}
|
||||
|
@ -366,13 +366,10 @@ void ColumnAggregateFunction::updateHashWithValue(size_t n, SipHash & hash) cons
|
||||
hash.update(wbuf.str().c_str(), wbuf.str().size());
|
||||
}
|
||||
|
||||
void ColumnAggregateFunction::updateWeakHash32(WeakHash32 & hash) const
|
||||
WeakHash32 ColumnAggregateFunction::getWeakHash32() const
|
||||
{
|
||||
auto s = data.size();
|
||||
if (hash.getData().size() != data.size())
|
||||
throw Exception(ErrorCodes::LOGICAL_ERROR, "Size of WeakHash32 does not match size of column: "
|
||||
"column size is {}, hash size is {}", std::to_string(s), hash.getData().size());
|
||||
|
||||
WeakHash32 hash(s);
|
||||
auto & hash_data = hash.getData();
|
||||
|
||||
std::vector<UInt8> v;
|
||||
@ -383,6 +380,8 @@ void ColumnAggregateFunction::updateWeakHash32(WeakHash32 & hash) const
|
||||
wbuf.finalize();
|
||||
hash_data[i] = ::updateWeakHash32(v.data(), v.size(), hash_data[i]);
|
||||
}
|
||||
|
||||
return hash;
|
||||
}
|
||||
|
||||
void ColumnAggregateFunction::updateHashFast(SipHash & hash) const
|
||||
|
@ -177,7 +177,7 @@ public:
|
||||
|
||||
void updateHashWithValue(size_t n, SipHash & hash) const override;
|
||||
|
||||
void updateWeakHash32(WeakHash32 & hash) const override;
|
||||
WeakHash32 getWeakHash32() const override;
|
||||
|
||||
void updateHashFast(SipHash & hash) const override;
|
||||
|
||||
|
@ -271,15 +271,12 @@ void ColumnArray::updateHashWithValue(size_t n, SipHash & hash) const
|
||||
getData().updateHashWithValue(offset + i, hash);
|
||||
}
|
||||
|
||||
void ColumnArray::updateWeakHash32(WeakHash32 & hash) const
|
||||
WeakHash32 ColumnArray::getWeakHash32() const
|
||||
{
|
||||
auto s = offsets->size();
|
||||
if (hash.getData().size() != s)
|
||||
throw Exception(ErrorCodes::LOGICAL_ERROR, "Size of WeakHash32 does not match size of column: "
|
||||
"column size is {}, hash size is {}", s, hash.getData().size());
|
||||
WeakHash32 hash(s);
|
||||
|
||||
WeakHash32 internal_hash(data->size());
|
||||
data->updateWeakHash32(internal_hash);
|
||||
WeakHash32 internal_hash = data->getWeakHash32();
|
||||
|
||||
Offset prev_offset = 0;
|
||||
const auto & offsets_data = getOffsets();
|
||||
@ -300,6 +297,8 @@ void ColumnArray::updateWeakHash32(WeakHash32 & hash) const
|
||||
|
||||
prev_offset = offsets_data[i];
|
||||
}
|
||||
|
||||
return hash;
|
||||
}
|
||||
|
||||
void ColumnArray::updateHashFast(SipHash & hash) const
|
||||
|
@ -82,7 +82,7 @@ public:
|
||||
const char * deserializeAndInsertFromArena(const char * pos) override;
|
||||
const char * skipSerializedInArena(const char * pos) const override;
|
||||
void updateHashWithValue(size_t n, SipHash & hash) const override;
|
||||
void updateWeakHash32(WeakHash32 & hash) const override;
|
||||
WeakHash32 getWeakHash32() const override;
|
||||
void updateHashFast(SipHash & hash) const override;
|
||||
#if !defined(ABORT_ON_LOGICAL_ERROR)
|
||||
void insertRangeFrom(const IColumn & src, size_t start, size_t length) override;
|
||||
|
@ -3,6 +3,7 @@
|
||||
#include <optional>
|
||||
#include <Core/Field.h>
|
||||
#include <Columns/IColumn.h>
|
||||
#include <Common/WeakHash.h>
|
||||
#include <IO/BufferWithOwnMemory.h>
|
||||
|
||||
|
||||
@ -98,7 +99,7 @@ public:
|
||||
const char * deserializeAndInsertFromArena(const char *) override { throwMustBeDecompressed(); }
|
||||
const char * skipSerializedInArena(const char *) const override { throwMustBeDecompressed(); }
|
||||
void updateHashWithValue(size_t, SipHash &) const override { throwMustBeDecompressed(); }
|
||||
void updateWeakHash32(WeakHash32 &) const override { throwMustBeDecompressed(); }
|
||||
WeakHash32 getWeakHash32() const override { throwMustBeDecompressed(); }
|
||||
void updateHashFast(SipHash &) const override { throwMustBeDecompressed(); }
|
||||
ColumnPtr filter(const Filter &, ssize_t) const override { throwMustBeDecompressed(); }
|
||||
void expand(const Filter &, bool) override { throwMustBeDecompressed(); }
|
||||
|
@ -137,18 +137,10 @@ void ColumnConst::updatePermutation(PermutationSortDirection /*direction*/, Perm
|
||||
{
|
||||
}
|
||||
|
||||
void ColumnConst::updateWeakHash32(WeakHash32 & hash) const
|
||||
WeakHash32 ColumnConst::getWeakHash32() const
|
||||
{
|
||||
if (hash.getData().size() != s)
|
||||
throw Exception(ErrorCodes::LOGICAL_ERROR, "Size of WeakHash32 does not match size of column: "
|
||||
"column size is {}, hash size is {}", std::to_string(s), std::to_string(hash.getData().size()));
|
||||
|
||||
WeakHash32 element_hash(1);
|
||||
data->updateWeakHash32(element_hash);
|
||||
size_t data_hash = element_hash.getData()[0];
|
||||
|
||||
for (auto & value : hash.getData())
|
||||
value = static_cast<UInt32>(intHashCRC32(data_hash, value));
|
||||
WeakHash32 element_hash = data->getWeakHash32();
|
||||
return WeakHash32(s, element_hash.getData()[0]);
|
||||
}
|
||||
|
||||
void ColumnConst::compareColumn(
|
||||
|
@ -204,7 +204,7 @@ public:
|
||||
data->updateHashWithValue(0, hash);
|
||||
}
|
||||
|
||||
void updateWeakHash32(WeakHash32 & hash) const override;
|
||||
WeakHash32 getWeakHash32() const override;
|
||||
|
||||
void updateHashFast(SipHash & hash) const override
|
||||
{
|
||||
|
@ -28,7 +28,6 @@ namespace ErrorCodes
|
||||
extern const int PARAMETER_OUT_OF_BOUND;
|
||||
extern const int SIZES_OF_COLUMNS_DOESNT_MATCH;
|
||||
extern const int NOT_IMPLEMENTED;
|
||||
extern const int LOGICAL_ERROR;
|
||||
}
|
||||
|
||||
template <is_decimal T>
|
||||
@ -76,13 +75,10 @@ void ColumnDecimal<T>::updateHashWithValue(size_t n, SipHash & hash) const
|
||||
}
|
||||
|
||||
template <is_decimal T>
|
||||
void ColumnDecimal<T>::updateWeakHash32(WeakHash32 & hash) const
|
||||
WeakHash32 ColumnDecimal<T>::getWeakHash32() const
|
||||
{
|
||||
auto s = data.size();
|
||||
|
||||
if (hash.getData().size() != s)
|
||||
throw Exception(ErrorCodes::LOGICAL_ERROR, "Size of WeakHash32 does not match size of column: "
|
||||
"column size is {}, hash size is {}", std::to_string(s), std::to_string(hash.getData().size()));
|
||||
WeakHash32 hash(s);
|
||||
|
||||
const T * begin = data.data();
|
||||
const T * end = begin + s;
|
||||
@ -94,6 +90,8 @@ void ColumnDecimal<T>::updateWeakHash32(WeakHash32 & hash) const
|
||||
++begin;
|
||||
++hash_data;
|
||||
}
|
||||
|
||||
return hash;
|
||||
}
|
||||
|
||||
template <is_decimal T>
|
||||
|
@ -102,7 +102,7 @@ public:
|
||||
const char * deserializeAndInsertFromArena(const char * pos) override;
|
||||
const char * skipSerializedInArena(const char * pos) const override;
|
||||
void updateHashWithValue(size_t n, SipHash & hash) const override;
|
||||
void updateWeakHash32(WeakHash32 & hash) const override;
|
||||
WeakHash32 getWeakHash32() const override;
|
||||
void updateHashFast(SipHash & hash) const override;
|
||||
#if !defined(ABORT_ON_LOGICAL_ERROR)
|
||||
int compareAt(size_t n, size_t m, const IColumn & rhs_, int nan_direction_hint) const override;
|
||||
|
@ -4,6 +4,7 @@
|
||||
#include <Columns/ColumnVector.h>
|
||||
#include <Columns/ColumnVariant.h>
|
||||
#include <DataTypes/IDataType.h>
|
||||
#include <Common/WeakHash.h>
|
||||
|
||||
|
||||
namespace DB
|
||||
@ -174,9 +175,9 @@ public:
|
||||
|
||||
void updateHashWithValue(size_t n, SipHash & hash) const override;
|
||||
|
||||
void updateWeakHash32(WeakHash32 & hash) const override
|
||||
WeakHash32 getWeakHash32() const override
|
||||
{
|
||||
variant_column->updateWeakHash32(hash);
|
||||
return variant_column->getWeakHash32();
|
||||
}
|
||||
|
||||
void updateHashFast(SipHash & hash) const override
|
||||
|
@ -137,14 +137,10 @@ void ColumnFixedString::updateHashWithValue(size_t index, SipHash & hash) const
|
||||
hash.update(reinterpret_cast<const char *>(&chars[n * index]), n);
|
||||
}
|
||||
|
||||
void ColumnFixedString::updateWeakHash32(WeakHash32 & hash) const
|
||||
WeakHash32 ColumnFixedString::getWeakHash32() const
|
||||
{
|
||||
auto s = size();
|
||||
|
||||
if (hash.getData().size() != s)
|
||||
throw Exception(ErrorCodes::LOGICAL_ERROR, "Size of WeakHash32 does not match size of column: "
|
||||
"column size is {}, "
|
||||
"hash size is {}", std::to_string(s), std::to_string(hash.getData().size()));
|
||||
WeakHash32 hash(s);
|
||||
|
||||
const UInt8 * pos = chars.data();
|
||||
UInt32 * hash_data = hash.getData().data();
|
||||
@ -156,6 +152,8 @@ void ColumnFixedString::updateWeakHash32(WeakHash32 & hash) const
|
||||
pos += n;
|
||||
++hash_data;
|
||||
}
|
||||
|
||||
return hash;
|
||||
}
|
||||
|
||||
void ColumnFixedString::updateHashFast(SipHash & hash) const
|
||||
|
@ -133,7 +133,7 @@ public:
|
||||
|
||||
void updateHashWithValue(size_t index, SipHash & hash) const override;
|
||||
|
||||
void updateWeakHash32(WeakHash32 & hash) const override;
|
||||
WeakHash32 getWeakHash32() const override;
|
||||
|
||||
void updateHashFast(SipHash & hash) const override;
|
||||
|
||||
|
@ -4,6 +4,7 @@
|
||||
#include <Core/NamesAndTypes.h>
|
||||
#include <Core/ColumnsWithTypeAndName.h>
|
||||
#include <Columns/IColumn.h>
|
||||
#include <Common/WeakHash.h>
|
||||
|
||||
|
||||
namespace DB
|
||||
@ -130,9 +131,9 @@ public:
|
||||
throw Exception(ErrorCodes::NOT_IMPLEMENTED, "updateHashWithValue is not implemented for {}", getName());
|
||||
}
|
||||
|
||||
void updateWeakHash32(WeakHash32 &) const override
|
||||
WeakHash32 getWeakHash32() const override
|
||||
{
|
||||
throw Exception(ErrorCodes::NOT_IMPLEMENTED, "updateWeakHash32 is not implemented for {}", getName());
|
||||
throw Exception(ErrorCodes::NOT_IMPLEMENTED, "getWeakHash32 is not implemented for {}", getName());
|
||||
}
|
||||
|
||||
void updateHashFast(SipHash &) const override
|
||||
|
@ -7,8 +7,7 @@
|
||||
#include <Common/HashTable/HashMap.h>
|
||||
#include <Common/WeakHash.h>
|
||||
#include <Common/assert_cast.h>
|
||||
#include "Storages/IndicesDescription.h"
|
||||
#include "base/types.h"
|
||||
#include <base/types.h>
|
||||
#include <base/sort.h>
|
||||
#include <base/scope_guard.h>
|
||||
|
||||
@ -320,19 +319,10 @@ const char * ColumnLowCardinality::skipSerializedInArena(const char * pos) const
|
||||
return getDictionary().skipSerializedInArena(pos);
|
||||
}
|
||||
|
||||
void ColumnLowCardinality::updateWeakHash32(WeakHash32 & hash) const
|
||||
WeakHash32 ColumnLowCardinality::getWeakHash32() const
|
||||
{
|
||||
auto s = size();
|
||||
|
||||
if (hash.getData().size() != s)
|
||||
throw Exception(ErrorCodes::LOGICAL_ERROR, "Size of WeakHash32 does not match size of column: "
|
||||
"column size is {}, hash size is {}", std::to_string(s), std::to_string(hash.getData().size()));
|
||||
|
||||
const auto & dict = getDictionary().getNestedColumn();
|
||||
WeakHash32 dict_hash(dict->size());
|
||||
dict->updateWeakHash32(dict_hash);
|
||||
|
||||
idx.updateWeakHash(hash, dict_hash);
|
||||
WeakHash32 dict_hash = getDictionary().getNestedColumn()->getWeakHash32();
|
||||
return idx.getWeakHash(dict_hash);
|
||||
}
|
||||
|
||||
void ColumnLowCardinality::updateHashFast(SipHash & hash) const
|
||||
@ -832,10 +822,11 @@ bool ColumnLowCardinality::Index::containsDefault() const
|
||||
return contains;
|
||||
}
|
||||
|
||||
void ColumnLowCardinality::Index::updateWeakHash(WeakHash32 & hash, WeakHash32 & dict_hash) const
|
||||
WeakHash32 ColumnLowCardinality::Index::getWeakHash(const WeakHash32 & dict_hash) const
|
||||
{
|
||||
WeakHash32 hash(positions->size());
|
||||
auto & hash_data = hash.getData();
|
||||
auto & dict_hash_data = dict_hash.getData();
|
||||
const auto & dict_hash_data = dict_hash.getData();
|
||||
|
||||
auto update_weak_hash = [&](auto x)
|
||||
{
|
||||
@ -844,10 +835,11 @@ void ColumnLowCardinality::Index::updateWeakHash(WeakHash32 & hash, WeakHash32 &
|
||||
auto size = data.size();
|
||||
|
||||
for (size_t i = 0; i < size; ++i)
|
||||
hash_data[i] = static_cast<UInt32>(intHashCRC32(dict_hash_data[data[i]], hash_data[i]));
|
||||
hash_data[i] = dict_hash_data[data[i]];
|
||||
};
|
||||
|
||||
callForType(std::move(update_weak_hash), size_of_type);
|
||||
return hash;
|
||||
}
|
||||
|
||||
void ColumnLowCardinality::Index::collectSerializedValueSizes(
|
||||
|
@ -111,7 +111,7 @@ public:
|
||||
getDictionary().updateHashWithValue(getIndexes().getUInt(n), hash);
|
||||
}
|
||||
|
||||
void updateWeakHash32(WeakHash32 & hash) const override;
|
||||
WeakHash32 getWeakHash32() const override;
|
||||
|
||||
void updateHashFast(SipHash &) const override;
|
||||
|
||||
@ -325,7 +325,7 @@ public:
|
||||
|
||||
bool containsDefault() const;
|
||||
|
||||
void updateWeakHash(WeakHash32 & hash, WeakHash32 & dict_hash) const;
|
||||
WeakHash32 getWeakHash(const WeakHash32 & dict_hash) const;
|
||||
|
||||
void collectSerializedValueSizes(PaddedPODArray<UInt64> & sizes, const PaddedPODArray<UInt64> & dict_sizes) const;
|
||||
|
||||
|
@ -143,9 +143,9 @@ void ColumnMap::updateHashWithValue(size_t n, SipHash & hash) const
|
||||
nested->updateHashWithValue(n, hash);
|
||||
}
|
||||
|
||||
void ColumnMap::updateWeakHash32(WeakHash32 & hash) const
|
||||
WeakHash32 ColumnMap::getWeakHash32() const
|
||||
{
|
||||
nested->updateWeakHash32(hash);
|
||||
return nested->getWeakHash32();
|
||||
}
|
||||
|
||||
void ColumnMap::updateHashFast(SipHash & hash) const
|
||||
|
@ -64,7 +64,7 @@ public:
|
||||
const char * deserializeAndInsertFromArena(const char * pos) override;
|
||||
const char * skipSerializedInArena(const char * pos) const override;
|
||||
void updateHashWithValue(size_t n, SipHash & hash) const override;
|
||||
void updateWeakHash32(WeakHash32 & hash) const override;
|
||||
WeakHash32 getWeakHash32() const override;
|
||||
void updateHashFast(SipHash & hash) const override;
|
||||
|
||||
#if !defined(ABORT_ON_LOGICAL_ERROR)
|
||||
|
@ -56,25 +56,21 @@ void ColumnNullable::updateHashWithValue(size_t n, SipHash & hash) const
|
||||
getNestedColumn().updateHashWithValue(n, hash);
|
||||
}
|
||||
|
||||
void ColumnNullable::updateWeakHash32(WeakHash32 & hash) const
|
||||
WeakHash32 ColumnNullable::getWeakHash32() const
|
||||
{
|
||||
auto s = size();
|
||||
|
||||
if (hash.getData().size() != s)
|
||||
throw Exception(ErrorCodes::LOGICAL_ERROR, "Size of WeakHash32 does not match size of column: "
|
||||
"column size is {}, hash size is {}", std::to_string(s), std::to_string(hash.getData().size()));
|
||||
|
||||
WeakHash32 old_hash = hash;
|
||||
nested_column->updateWeakHash32(hash);
|
||||
WeakHash32 hash = nested_column->getWeakHash32();
|
||||
|
||||
const auto & null_map_data = getNullMapData();
|
||||
auto & hash_data = hash.getData();
|
||||
auto & old_hash_data = old_hash.getData();
|
||||
|
||||
/// Use old data for nulls.
|
||||
/// Use default for nulls.
|
||||
for (size_t row = 0; row < s; ++row)
|
||||
if (null_map_data[row])
|
||||
hash_data[row] = old_hash_data[row];
|
||||
hash_data[row] = WeakHash32::kDefaultInitialValue;
|
||||
|
||||
return hash;
|
||||
}
|
||||
|
||||
void ColumnNullable::updateHashFast(SipHash & hash) const
|
||||
|
@ -133,7 +133,7 @@ public:
|
||||
void protect() override;
|
||||
ColumnPtr replicate(const Offsets & replicate_offsets) const override;
|
||||
void updateHashWithValue(size_t n, SipHash & hash) const override;
|
||||
void updateWeakHash32(WeakHash32 & hash) const override;
|
||||
WeakHash32 getWeakHash32() const override;
|
||||
void updateHashFast(SipHash & hash) const override;
|
||||
void getExtremes(Field & min, Field & max) const override;
|
||||
// Special function for nullable minmax index
|
||||
|
@ -5,6 +5,7 @@
|
||||
#include <Core/Names.h>
|
||||
#include <DataTypes/Serializations/SubcolumnsTree.h>
|
||||
#include <Common/PODArray.h>
|
||||
#include <Common/WeakHash.h>
|
||||
|
||||
#include <DataTypes/IDataType.h>
|
||||
|
||||
@ -252,7 +253,7 @@ public:
|
||||
const char * deserializeAndInsertFromArena(const char *) override { throwMustBeConcrete(); }
|
||||
const char * skipSerializedInArena(const char *) const override { throwMustBeConcrete(); }
|
||||
void updateHashWithValue(size_t, SipHash &) const override { throwMustBeConcrete(); }
|
||||
void updateWeakHash32(WeakHash32 &) const override { throwMustBeConcrete(); }
|
||||
WeakHash32 getWeakHash32() const override { throwMustBeConcrete(); }
|
||||
void updateHashFast(SipHash & hash) const override;
|
||||
void expand(const Filter &, bool) override { throwMustBeConcrete(); }
|
||||
bool hasEqualValues() const override { throwMustBeConcrete(); }
|
||||
|
@ -678,20 +678,22 @@ void ColumnSparse::updateHashWithValue(size_t n, SipHash & hash) const
|
||||
values->updateHashWithValue(getValueIndex(n), hash);
|
||||
}
|
||||
|
||||
void ColumnSparse::updateWeakHash32(WeakHash32 & hash) const
|
||||
WeakHash32 ColumnSparse::getWeakHash32() const
|
||||
{
|
||||
if (hash.getData().size() != _size)
|
||||
throw Exception(ErrorCodes::LOGICAL_ERROR, "Size of WeakHash32 does not match size of column: "
|
||||
"column size is {}, hash size is {}", _size, hash.getData().size());
|
||||
WeakHash32 values_hash = values->getWeakHash32();
|
||||
WeakHash32 hash(size());
|
||||
|
||||
auto & hash_data = hash.getData();
|
||||
auto & values_hash_data = values_hash.getData();
|
||||
|
||||
auto offset_it = begin();
|
||||
auto & hash_data = hash.getData();
|
||||
for (size_t i = 0; i < _size; ++i, ++offset_it)
|
||||
{
|
||||
size_t value_index = offset_it.getValueIndex();
|
||||
auto data_ref = values->getDataAt(value_index);
|
||||
hash_data[i] = ::updateWeakHash32(reinterpret_cast<const UInt8 *>(data_ref.data), data_ref.size, hash_data[i]);
|
||||
hash_data[i] = values_hash_data[value_index];
|
||||
}
|
||||
|
||||
return hash;
|
||||
}
|
||||
|
||||
void ColumnSparse::updateHashFast(SipHash & hash) const
|
||||
|
@ -139,7 +139,7 @@ public:
|
||||
void protect() override;
|
||||
ColumnPtr replicate(const Offsets & replicate_offsets) const override;
|
||||
void updateHashWithValue(size_t n, SipHash & hash) const override;
|
||||
void updateWeakHash32(WeakHash32 & hash) const override;
|
||||
WeakHash32 getWeakHash32() const override;
|
||||
void updateHashFast(SipHash & hash) const override;
|
||||
void getExtremes(Field & min, Field & max) const override;
|
||||
|
||||
|
@ -108,13 +108,10 @@ MutableColumnPtr ColumnString::cloneResized(size_t to_size) const
|
||||
return res;
|
||||
}
|
||||
|
||||
void ColumnString::updateWeakHash32(WeakHash32 & hash) const
|
||||
WeakHash32 ColumnString::getWeakHash32() const
|
||||
{
|
||||
auto s = offsets.size();
|
||||
|
||||
if (hash.getData().size() != s)
|
||||
throw Exception(ErrorCodes::LOGICAL_ERROR, "Size of WeakHash32 does not match size of column: "
|
||||
"column size is {}, hash size is {}", std::to_string(s), std::to_string(hash.getData().size()));
|
||||
WeakHash32 hash(s);
|
||||
|
||||
const UInt8 * pos = chars.data();
|
||||
UInt32 * hash_data = hash.getData().data();
|
||||
@ -130,6 +127,8 @@ void ColumnString::updateWeakHash32(WeakHash32 & hash) const
|
||||
prev_offset = offset;
|
||||
++hash_data;
|
||||
}
|
||||
|
||||
return hash;
|
||||
}
|
||||
|
||||
|
||||
|
@ -212,7 +212,7 @@ public:
|
||||
hash.update(reinterpret_cast<const char *>(&chars[offset]), string_size);
|
||||
}
|
||||
|
||||
void updateWeakHash32(WeakHash32 & hash) const override;
|
||||
WeakHash32 getWeakHash32() const override;
|
||||
|
||||
void updateHashFast(SipHash & hash) const override
|
||||
{
|
||||
|
@ -201,6 +201,7 @@ bool ColumnTuple::tryInsert(const Field & x)
|
||||
return false;
|
||||
}
|
||||
}
|
||||
++column_length;
|
||||
|
||||
return true;
|
||||
}
|
||||
@ -236,6 +237,7 @@ void ColumnTuple::doInsertManyFrom(const IColumn & src, size_t position, size_t
|
||||
|
||||
for (size_t i = 0; i < tuple_size; ++i)
|
||||
columns[i]->insertManyFrom(*src_tuple.columns[i], position, length);
|
||||
column_length += length;
|
||||
}
|
||||
|
||||
void ColumnTuple::insertDefault()
|
||||
@ -308,16 +310,15 @@ void ColumnTuple::updateHashWithValue(size_t n, SipHash & hash) const
|
||||
column->updateHashWithValue(n, hash);
|
||||
}
|
||||
|
||||
void ColumnTuple::updateWeakHash32(WeakHash32 & hash) const
|
||||
WeakHash32 ColumnTuple::getWeakHash32() const
|
||||
{
|
||||
auto s = size();
|
||||
|
||||
if (hash.getData().size() != s)
|
||||
throw Exception(ErrorCodes::LOGICAL_ERROR, "Size of WeakHash32 does not match size of column: "
|
||||
"column size is {}, hash size is {}", std::to_string(s), std::to_string(hash.getData().size()));
|
||||
WeakHash32 hash(s);
|
||||
|
||||
for (const auto & column : columns)
|
||||
column->updateWeakHash32(hash);
|
||||
hash.update(column->getWeakHash32());
|
||||
|
||||
return hash;
|
||||
}
|
||||
|
||||
void ColumnTuple::updateHashFast(SipHash & hash) const
|
||||
|
@ -81,7 +81,7 @@ public:
|
||||
const char * deserializeAndInsertFromArena(const char * pos) override;
|
||||
const char * skipSerializedInArena(const char * pos) const override;
|
||||
void updateHashWithValue(size_t n, SipHash & hash) const override;
|
||||
void updateWeakHash32(WeakHash32 & hash) const override;
|
||||
WeakHash32 getWeakHash32() const override;
|
||||
void updateHashFast(SipHash & hash) const override;
|
||||
#if !defined(ABORT_ON_LOGICAL_ERROR)
|
||||
void insertRangeFrom(const IColumn & src, size_t start, size_t length) override;
|
||||
|
@ -789,36 +789,26 @@ void ColumnVariant::updateHashWithValue(size_t n, SipHash & hash) const
|
||||
variants[localDiscriminatorByGlobal(global_discr)]->updateHashWithValue(offsetAt(n), hash);
|
||||
}
|
||||
|
||||
void ColumnVariant::updateWeakHash32(WeakHash32 & hash) const
|
||||
WeakHash32 ColumnVariant::getWeakHash32() const
|
||||
{
|
||||
auto s = size();
|
||||
|
||||
if (hash.getData().size() != s)
|
||||
throw Exception(ErrorCodes::LOGICAL_ERROR, "Size of WeakHash32 does not match size of column: "
|
||||
"column size is {}, hash size is {}", std::to_string(s), std::to_string(hash.getData().size()));
|
||||
|
||||
/// If we have only NULLs, keep hash unchanged.
|
||||
if (hasOnlyNulls())
|
||||
return;
|
||||
return WeakHash32(s);
|
||||
|
||||
/// Optimization for case when there is only 1 non-empty variant and no NULLs.
|
||||
/// In this case we can just calculate weak hash for this variant.
|
||||
if (auto non_empty_local_discr = getLocalDiscriminatorOfOneNoneEmptyVariantNoNulls())
|
||||
{
|
||||
variants[*non_empty_local_discr]->updateWeakHash32(hash);
|
||||
return;
|
||||
}
|
||||
return variants[*non_empty_local_discr]->getWeakHash32();
|
||||
|
||||
/// Calculate weak hash for all variants.
|
||||
std::vector<WeakHash32> nested_hashes;
|
||||
for (const auto & variant : variants)
|
||||
{
|
||||
WeakHash32 nested_hash(variant->size());
|
||||
variant->updateWeakHash32(nested_hash);
|
||||
nested_hashes.emplace_back(std::move(nested_hash));
|
||||
}
|
||||
nested_hashes.emplace_back(variant->getWeakHash32());
|
||||
|
||||
/// For each row hash is a hash of corresponding row from corresponding variant.
|
||||
WeakHash32 hash(s);
|
||||
auto & hash_data = hash.getData();
|
||||
const auto & local_discriminators_data = getLocalDiscriminators();
|
||||
const auto & offsets_data = getOffsets();
|
||||
@ -827,11 +817,10 @@ void ColumnVariant::updateWeakHash32(WeakHash32 & hash) const
|
||||
Discriminator discr = local_discriminators_data[i];
|
||||
/// Update hash only for non-NULL values
|
||||
if (discr != NULL_DISCRIMINATOR)
|
||||
{
|
||||
auto nested_hash = nested_hashes[local_discriminators_data[i]].getData()[offsets_data[i]];
|
||||
hash_data[i] = static_cast<UInt32>(hashCRC32(nested_hash, hash_data[i]));
|
||||
}
|
||||
hash_data[i] = nested_hashes[discr].getData()[offsets_data[i]];
|
||||
}
|
||||
|
||||
return hash;
|
||||
}
|
||||
|
||||
void ColumnVariant::updateHashFast(SipHash & hash) const
|
||||
|
@ -213,7 +213,7 @@ public:
|
||||
const char * deserializeVariantAndInsertFromArena(Discriminator global_discr, const char * pos);
|
||||
const char * skipSerializedInArena(const char * pos) const override;
|
||||
void updateHashWithValue(size_t n, SipHash & hash) const override;
|
||||
void updateWeakHash32(WeakHash32 & hash) const override;
|
||||
WeakHash32 getWeakHash32() const override;
|
||||
void updateHashFast(SipHash & hash) const override;
|
||||
ColumnPtr filter(const Filter & filt, ssize_t result_size_hint) const override;
|
||||
void expand(const Filter & mask, bool inverted) override;
|
||||
|
@ -73,13 +73,10 @@ void ColumnVector<T>::updateHashWithValue(size_t n, SipHash & hash) const
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
void ColumnVector<T>::updateWeakHash32(WeakHash32 & hash) const
|
||||
WeakHash32 ColumnVector<T>::getWeakHash32() const
|
||||
{
|
||||
auto s = data.size();
|
||||
|
||||
if (hash.getData().size() != s)
|
||||
throw Exception(ErrorCodes::LOGICAL_ERROR, "Size of WeakHash32 does not match size of column: "
|
||||
"column size is {}, hash size is {}", std::to_string(s), std::to_string(hash.getData().size()));
|
||||
WeakHash32 hash(s);
|
||||
|
||||
const T * begin = data.data();
|
||||
const T * end = begin + s;
|
||||
@ -91,6 +88,8 @@ void ColumnVector<T>::updateWeakHash32(WeakHash32 & hash) const
|
||||
++begin;
|
||||
++hash_data;
|
||||
}
|
||||
|
||||
return hash;
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
|
@ -114,7 +114,7 @@ public:
|
||||
|
||||
void updateHashWithValue(size_t n, SipHash & hash) const override;
|
||||
|
||||
void updateWeakHash32(WeakHash32 & hash) const override;
|
||||
WeakHash32 getWeakHash32() const override;
|
||||
|
||||
void updateHashFast(SipHash & hash) const override;
|
||||
|
||||
|
@ -300,10 +300,10 @@ public:
|
||||
/// passed bytes to hash must identify sequence of values unambiguously.
|
||||
virtual void updateHashWithValue(size_t n, SipHash & hash) const = 0;
|
||||
|
||||
/// Update hash function value. Hash is calculated for each element.
|
||||
/// Get hash function value. Hash is calculated for each element.
|
||||
/// It's a fast weak hash function. Mainly need to scatter data between threads.
|
||||
/// WeakHash32 must have the same size as column.
|
||||
virtual void updateWeakHash32(WeakHash32 & hash) const = 0;
|
||||
virtual WeakHash32 getWeakHash32() const = 0;
|
||||
|
||||
/// Update state of hash with all column.
|
||||
virtual void updateHashFast(SipHash & hash) const = 0;
|
||||
|
@ -1,6 +1,7 @@
|
||||
#pragma once
|
||||
|
||||
#include <Columns/IColumn.h>
|
||||
#include <Common/WeakHash.h>
|
||||
|
||||
|
||||
namespace DB
|
||||
@ -63,8 +64,9 @@ public:
|
||||
{
|
||||
}
|
||||
|
||||
void updateWeakHash32(WeakHash32 & /*hash*/) const override
|
||||
WeakHash32 getWeakHash32() const override
|
||||
{
|
||||
return WeakHash32(s);
|
||||
}
|
||||
|
||||
void updateHashFast(SipHash & /*hash*/) const override
|
||||
|
@ -1,6 +1,7 @@
|
||||
#pragma once
|
||||
#include <optional>
|
||||
#include <Columns/IColumn.h>
|
||||
#include <Common/WeakHash.h>
|
||||
|
||||
namespace DB
|
||||
{
|
||||
@ -166,9 +167,9 @@ public:
|
||||
throw Exception(ErrorCodes::NOT_IMPLEMENTED, "Method scatter is not supported for ColumnUnique.");
|
||||
}
|
||||
|
||||
void updateWeakHash32(WeakHash32 &) const override
|
||||
WeakHash32 getWeakHash32() const override
|
||||
{
|
||||
throw Exception(ErrorCodes::NOT_IMPLEMENTED, "Method updateWeakHash32 is not supported for ColumnUnique.");
|
||||
throw Exception(ErrorCodes::NOT_IMPLEMENTED, "Method getWeakHash32 is not supported for ColumnUnique.");
|
||||
}
|
||||
|
||||
void updateHashFast(SipHash &) const override
|
||||
|
@ -60,8 +60,7 @@ TEST(WeakHash32, ColumnVectorU8)
|
||||
data.push_back(i);
|
||||
}
|
||||
|
||||
WeakHash32 hash(col->size());
|
||||
col->updateWeakHash32(hash);
|
||||
WeakHash32 hash = col->getWeakHash32();
|
||||
|
||||
checkColumn(hash.getData(), col->getData());
|
||||
}
|
||||
@ -77,8 +76,7 @@ TEST(WeakHash32, ColumnVectorI8)
|
||||
data.push_back(i);
|
||||
}
|
||||
|
||||
WeakHash32 hash(col->size());
|
||||
col->updateWeakHash32(hash);
|
||||
WeakHash32 hash = col->getWeakHash32();
|
||||
|
||||
checkColumn(hash.getData(), col->getData());
|
||||
}
|
||||
@ -94,8 +92,7 @@ TEST(WeakHash32, ColumnVectorU16)
|
||||
data.push_back(i);
|
||||
}
|
||||
|
||||
WeakHash32 hash(col->size());
|
||||
col->updateWeakHash32(hash);
|
||||
WeakHash32 hash = col->getWeakHash32();
|
||||
|
||||
checkColumn(hash.getData(), col->getData());
|
||||
}
|
||||
@ -111,8 +108,7 @@ TEST(WeakHash32, ColumnVectorI16)
|
||||
data.push_back(i);
|
||||
}
|
||||
|
||||
WeakHash32 hash(col->size());
|
||||
col->updateWeakHash32(hash);
|
||||
WeakHash32 hash = col->getWeakHash32();
|
||||
|
||||
checkColumn(hash.getData(), col->getData());
|
||||
}
|
||||
@ -128,8 +124,7 @@ TEST(WeakHash32, ColumnVectorU32)
|
||||
data.push_back(i << 16u);
|
||||
}
|
||||
|
||||
WeakHash32 hash(col->size());
|
||||
col->updateWeakHash32(hash);
|
||||
WeakHash32 hash = col->getWeakHash32();
|
||||
|
||||
checkColumn(hash.getData(), col->getData());
|
||||
}
|
||||
@ -145,8 +140,7 @@ TEST(WeakHash32, ColumnVectorI32)
|
||||
data.push_back(i << 16);
|
||||
}
|
||||
|
||||
WeakHash32 hash(col->size());
|
||||
col->updateWeakHash32(hash);
|
||||
WeakHash32 hash = col->getWeakHash32();
|
||||
|
||||
checkColumn(hash.getData(), col->getData());
|
||||
}
|
||||
@ -162,8 +156,7 @@ TEST(WeakHash32, ColumnVectorU64)
|
||||
data.push_back(i << 32u);
|
||||
}
|
||||
|
||||
WeakHash32 hash(col->size());
|
||||
col->updateWeakHash32(hash);
|
||||
WeakHash32 hash = col->getWeakHash32();
|
||||
|
||||
checkColumn(hash.getData(), col->getData());
|
||||
}
|
||||
@ -179,8 +172,7 @@ TEST(WeakHash32, ColumnVectorI64)
|
||||
data.push_back(i << 32);
|
||||
}
|
||||
|
||||
WeakHash32 hash(col->size());
|
||||
col->updateWeakHash32(hash);
|
||||
WeakHash32 hash = col->getWeakHash32();
|
||||
|
||||
checkColumn(hash.getData(), col->getData());
|
||||
}
|
||||
@ -204,8 +196,7 @@ TEST(WeakHash32, ColumnVectorU128)
|
||||
}
|
||||
}
|
||||
|
||||
WeakHash32 hash(col->size());
|
||||
col->updateWeakHash32(hash);
|
||||
WeakHash32 hash = col->getWeakHash32();
|
||||
|
||||
checkColumn(hash.getData(), eq_data);
|
||||
}
|
||||
@ -221,8 +212,7 @@ TEST(WeakHash32, ColumnVectorI128)
|
||||
data.push_back(i << 32);
|
||||
}
|
||||
|
||||
WeakHash32 hash(col->size());
|
||||
col->updateWeakHash32(hash);
|
||||
WeakHash32 hash = col->getWeakHash32();
|
||||
|
||||
checkColumn(hash.getData(), col->getData());
|
||||
}
|
||||
@ -238,8 +228,7 @@ TEST(WeakHash32, ColumnDecimal32)
|
||||
data.push_back(i << 16);
|
||||
}
|
||||
|
||||
WeakHash32 hash(col->size());
|
||||
col->updateWeakHash32(hash);
|
||||
WeakHash32 hash = col->getWeakHash32();
|
||||
|
||||
checkColumn(hash.getData(), col->getData());
|
||||
}
|
||||
@ -255,8 +244,7 @@ TEST(WeakHash32, ColumnDecimal64)
|
||||
data.push_back(i << 32);
|
||||
}
|
||||
|
||||
WeakHash32 hash(col->size());
|
||||
col->updateWeakHash32(hash);
|
||||
WeakHash32 hash = col->getWeakHash32();
|
||||
|
||||
checkColumn(hash.getData(), col->getData());
|
||||
}
|
||||
@ -272,8 +260,7 @@ TEST(WeakHash32, ColumnDecimal128)
|
||||
data.push_back(i << 32);
|
||||
}
|
||||
|
||||
WeakHash32 hash(col->size());
|
||||
col->updateWeakHash32(hash);
|
||||
WeakHash32 hash = col->getWeakHash32();
|
||||
|
||||
checkColumn(hash.getData(), col->getData());
|
||||
}
|
||||
@ -294,8 +281,7 @@ TEST(WeakHash32, ColumnString1)
|
||||
}
|
||||
}
|
||||
|
||||
WeakHash32 hash(col->size());
|
||||
col->updateWeakHash32(hash);
|
||||
WeakHash32 hash = col->getWeakHash32();
|
||||
|
||||
checkColumn(hash.getData(), data);
|
||||
}
|
||||
@ -331,8 +317,7 @@ TEST(WeakHash32, ColumnString2)
|
||||
}
|
||||
}
|
||||
|
||||
WeakHash32 hash(col->size());
|
||||
col->updateWeakHash32(hash);
|
||||
WeakHash32 hash = col->getWeakHash32();
|
||||
|
||||
checkColumn(hash.getData(), data);
|
||||
}
|
||||
@ -369,8 +354,7 @@ TEST(WeakHash32, ColumnString3)
|
||||
}
|
||||
}
|
||||
|
||||
WeakHash32 hash(col->size());
|
||||
col->updateWeakHash32(hash);
|
||||
WeakHash32 hash = col->getWeakHash32();
|
||||
|
||||
checkColumn(hash.getData(), data);
|
||||
}
|
||||
@ -397,8 +381,7 @@ TEST(WeakHash32, ColumnFixedString)
|
||||
}
|
||||
}
|
||||
|
||||
WeakHash32 hash(col->size());
|
||||
col->updateWeakHash32(hash);
|
||||
WeakHash32 hash = col->getWeakHash32();
|
||||
|
||||
checkColumn(hash.getData(), data);
|
||||
}
|
||||
@ -444,8 +427,7 @@ TEST(WeakHash32, ColumnArray)
|
||||
|
||||
auto col_arr = ColumnArray::create(std::move(val), std::move(off));
|
||||
|
||||
WeakHash32 hash(col_arr->size());
|
||||
col_arr->updateWeakHash32(hash);
|
||||
WeakHash32 hash = col_arr->getWeakHash32();
|
||||
|
||||
checkColumn(hash.getData(), eq_data);
|
||||
}
|
||||
@ -479,8 +461,7 @@ TEST(WeakHash32, ColumnArray2)
|
||||
|
||||
auto col_arr = ColumnArray::create(std::move(val), std::move(off));
|
||||
|
||||
WeakHash32 hash(col_arr->size());
|
||||
col_arr->updateWeakHash32(hash);
|
||||
WeakHash32 hash = col_arr->getWeakHash32();
|
||||
|
||||
checkColumn(hash.getData(), eq_data);
|
||||
}
|
||||
@ -536,8 +517,7 @@ TEST(WeakHash32, ColumnArrayArray)
|
||||
auto col_arr = ColumnArray::create(std::move(val), std::move(off));
|
||||
auto col_arr_arr = ColumnArray::create(std::move(col_arr), std::move(off2));
|
||||
|
||||
WeakHash32 hash(col_arr_arr->size());
|
||||
col_arr_arr->updateWeakHash32(hash);
|
||||
WeakHash32 hash = col_arr_arr->getWeakHash32();
|
||||
|
||||
checkColumn(hash.getData(), eq_data);
|
||||
}
|
||||
@ -555,8 +535,7 @@ TEST(WeakHash32, ColumnConst)
|
||||
|
||||
auto col_const = ColumnConst::create(std::move(inner_col), 256);
|
||||
|
||||
WeakHash32 hash(col_const->size());
|
||||
col_const->updateWeakHash32(hash);
|
||||
WeakHash32 hash = col_const->getWeakHash32();
|
||||
|
||||
checkColumn(hash.getData(), data);
|
||||
}
|
||||
@ -576,8 +555,7 @@ TEST(WeakHash32, ColumnLowcardinality)
|
||||
}
|
||||
}
|
||||
|
||||
WeakHash32 hash(col->size());
|
||||
col->updateWeakHash32(hash);
|
||||
WeakHash32 hash = col->getWeakHash32();
|
||||
|
||||
checkColumn(hash.getData(), data);
|
||||
}
|
||||
@ -602,8 +580,7 @@ TEST(WeakHash32, ColumnNullable)
|
||||
|
||||
auto col_null = ColumnNullable::create(std::move(col), std::move(mask));
|
||||
|
||||
WeakHash32 hash(col_null->size());
|
||||
col_null->updateWeakHash32(hash);
|
||||
WeakHash32 hash = col_null->getWeakHash32();
|
||||
|
||||
checkColumn(hash.getData(), eq);
|
||||
}
|
||||
@ -633,8 +610,7 @@ TEST(WeakHash32, ColumnTupleUInt64UInt64)
|
||||
columns.emplace_back(std::move(col2));
|
||||
auto col_tuple = ColumnTuple::create(std::move(columns));
|
||||
|
||||
WeakHash32 hash(col_tuple->size());
|
||||
col_tuple->updateWeakHash32(hash);
|
||||
WeakHash32 hash = col_tuple->getWeakHash32();
|
||||
|
||||
checkColumn(hash.getData(), eq);
|
||||
}
|
||||
@ -671,8 +647,7 @@ TEST(WeakHash32, ColumnTupleUInt64String)
|
||||
columns.emplace_back(std::move(col2));
|
||||
auto col_tuple = ColumnTuple::create(std::move(columns));
|
||||
|
||||
WeakHash32 hash(col_tuple->size());
|
||||
col_tuple->updateWeakHash32(hash);
|
||||
WeakHash32 hash = col_tuple->getWeakHash32();
|
||||
|
||||
checkColumn(hash.getData(), eq);
|
||||
}
|
||||
@ -709,8 +684,7 @@ TEST(WeakHash32, ColumnTupleUInt64FixedString)
|
||||
columns.emplace_back(std::move(col2));
|
||||
auto col_tuple = ColumnTuple::create(std::move(columns));
|
||||
|
||||
WeakHash32 hash(col_tuple->size());
|
||||
col_tuple->updateWeakHash32(hash);
|
||||
WeakHash32 hash = col_tuple->getWeakHash32();
|
||||
|
||||
checkColumn(hash.getData(), eq);
|
||||
}
|
||||
@ -756,8 +730,7 @@ TEST(WeakHash32, ColumnTupleUInt64Array)
|
||||
columns.emplace_back(ColumnArray::create(std::move(val), std::move(off)));
|
||||
auto col_tuple = ColumnTuple::create(std::move(columns));
|
||||
|
||||
WeakHash32 hash(col_tuple->size());
|
||||
col_tuple->updateWeakHash32(hash);
|
||||
WeakHash32 hash = col_tuple->getWeakHash32();
|
||||
|
||||
checkColumn(hash.getData(), eq_data);
|
||||
}
|
||||
|
@ -68,7 +68,7 @@ void * allocNoTrack(size_t size, size_t alignment)
|
||||
{
|
||||
void * buf;
|
||||
#if USE_GWP_ASAN
|
||||
if (unlikely(GWPAsan::GuardedAlloc.shouldSample()))
|
||||
if (unlikely(GWPAsan::shouldSample()))
|
||||
{
|
||||
if (void * ptr = GWPAsan::GuardedAlloc.allocate(size, alignment))
|
||||
{
|
||||
@ -185,7 +185,7 @@ void * Allocator<clear_memory_, populate>::realloc(void * buf, size_t old_size,
|
||||
}
|
||||
|
||||
#if USE_GWP_ASAN
|
||||
if (unlikely(GWPAsan::GuardedAlloc.shouldSample()))
|
||||
if (unlikely(GWPAsan::shouldSample()))
|
||||
{
|
||||
auto trace_alloc = CurrentMemoryTracker::alloc(new_size);
|
||||
if (void * ptr = GWPAsan::GuardedAlloc.allocate(new_size, alignment))
|
||||
@ -206,7 +206,7 @@ void * Allocator<clear_memory_, populate>::realloc(void * buf, size_t old_size,
|
||||
}
|
||||
else
|
||||
{
|
||||
[[maybe_unused]] auto trace_free = CurrentMemoryTracker::free(old_size);
|
||||
[[maybe_unused]] auto trace_free = CurrentMemoryTracker::free(new_size);
|
||||
ProfileEvents::increment(ProfileEvents::GWPAsanAllocateFailed);
|
||||
}
|
||||
}
|
||||
@ -239,7 +239,7 @@ void * Allocator<clear_memory_, populate>::realloc(void * buf, size_t old_size,
|
||||
void * new_buf = ::realloc(buf, new_size);
|
||||
if (nullptr == new_buf)
|
||||
{
|
||||
[[maybe_unused]] auto trace_free = CurrentMemoryTracker::free(old_size);
|
||||
[[maybe_unused]] auto trace_free = CurrentMemoryTracker::free(new_size);
|
||||
throw DB::ErrnoException(
|
||||
DB::ErrorCodes::CANNOT_ALLOCATE_MEMORY,
|
||||
"Allocator: Cannot realloc from {} to {}",
|
||||
|
@ -244,6 +244,15 @@ private:
|
||||
const char * className() const noexcept override { return "DB::ErrnoException"; }
|
||||
};
|
||||
|
||||
/// An exception to use in unit tests to test interfaces.
|
||||
/// It is distinguished from others, so it does not have to be logged.
|
||||
class TestException : public Exception
|
||||
{
|
||||
public:
|
||||
using Exception::Exception;
|
||||
};
|
||||
|
||||
|
||||
using Exceptions = std::vector<std::exception_ptr>;
|
||||
|
||||
/** Try to write an exception to the log (and forget about it).
|
||||
|
@ -217,6 +217,13 @@ void printReport([[maybe_unused]] uintptr_t fault_address)
|
||||
reinterpret_cast<void **>(trace.data()), 0, trace_length, [&](const auto line) { LOG_FATAL(logger, fmt::runtime(line)); });
|
||||
}
|
||||
|
||||
std::atomic<bool> init_finished = false;
|
||||
|
||||
void initFinished()
|
||||
{
|
||||
init_finished.store(true, std::memory_order_relaxed);
|
||||
}
|
||||
|
||||
std::atomic<double> force_sample_probability = 0.0;
|
||||
|
||||
void setForceSampleProbability(double value)
|
||||
|
@ -19,12 +19,30 @@ bool isGWPAsanError(uintptr_t fault_address);
|
||||
|
||||
void printReport(uintptr_t fault_address);
|
||||
|
||||
extern std::atomic<bool> init_finished;
|
||||
|
||||
void initFinished();
|
||||
|
||||
extern std::atomic<double> force_sample_probability;
|
||||
|
||||
void setForceSampleProbability(double value);
|
||||
|
||||
/**
|
||||
* We'd like to postpone sampling allocations under the startup is finished. There are mainly
|
||||
* two reasons for that:
|
||||
*
|
||||
* - To avoid complex issues with initialization order
|
||||
* - Don't waste MaxSimultaneousAllocations on global objects as it's not useful
|
||||
*/
|
||||
inline bool shouldSample()
|
||||
{
|
||||
return init_finished.load(std::memory_order_relaxed) && GuardedAlloc.shouldSample();
|
||||
}
|
||||
|
||||
inline bool shouldForceSample()
|
||||
{
|
||||
if (!init_finished.load(std::memory_order_relaxed))
|
||||
return false;
|
||||
std::bernoulli_distribution dist(force_sample_probability.load(std::memory_order_relaxed));
|
||||
return dist(thread_local_rng);
|
||||
}
|
||||
|
@ -442,8 +442,6 @@ The server successfully detected this situation and will download merged part fr
|
||||
M(ReadBufferFromS3InitMicroseconds, "Time spent initializing connection to S3.") \
|
||||
M(ReadBufferFromS3Bytes, "Bytes read from S3.") \
|
||||
M(ReadBufferFromS3RequestsErrors, "Number of exceptions while reading from S3.") \
|
||||
M(ReadBufferFromS3ResetSessions, "Number of HTTP sessions that were reset in ReadBufferFromS3.") \
|
||||
M(ReadBufferFromS3PreservedSessions, "Number of HTTP sessions that were preserved in ReadBufferFromS3.") \
|
||||
\
|
||||
M(WriteBufferFromS3Microseconds, "Time spent on writing to S3.") \
|
||||
M(WriteBufferFromS3Bytes, "Bytes written to S3.") \
|
||||
|
@ -23,8 +23,20 @@ namespace DB
|
||||
|
||||
LazyPipeFDs TraceSender::pipe;
|
||||
|
||||
static thread_local bool inside_send = false;
|
||||
void TraceSender::send(TraceType trace_type, const StackTrace & stack_trace, Extras extras)
|
||||
{
|
||||
/** The method shouldn't be called recursively or throw exceptions.
|
||||
* There are several reasons:
|
||||
* - avoid infinite recursion when some of subsequent functions invoke tracing;
|
||||
* - avoid inconsistent writes if the method was interrupted by a signal handler in the middle of writing,
|
||||
* and then another tracing is invoked (e.g., from query profiler).
|
||||
*/
|
||||
if (unlikely(inside_send))
|
||||
return;
|
||||
inside_send = true;
|
||||
DENY_ALLOCATIONS_IN_SCOPE;
|
||||
|
||||
constexpr size_t buf_size = sizeof(char) /// TraceCollector stop flag
|
||||
+ sizeof(UInt8) /// String size
|
||||
+ QUERY_ID_MAX_LEN /// Maximum query_id length
|
||||
@ -80,6 +92,8 @@ void TraceSender::send(TraceType trace_type, const StackTrace & stack_trace, Ext
|
||||
writePODBinary(extras.increment, out);
|
||||
|
||||
out.next();
|
||||
|
||||
inside_send = false;
|
||||
}
|
||||
|
||||
}
|
||||
|
@ -1,2 +1,24 @@
|
||||
#include <Common/WeakHash.h>
|
||||
#include <Common/Exception.h>
|
||||
#include <Common/HashTable/Hash.h>
|
||||
|
||||
namespace DB
|
||||
{
|
||||
|
||||
namespace ErrorCodes
|
||||
{
|
||||
extern const int LOGICAL_ERROR;
|
||||
}
|
||||
|
||||
void WeakHash32::update(const WeakHash32 & other)
|
||||
{
|
||||
size_t size = data.size();
|
||||
if (size != other.data.size())
|
||||
throw Exception(ErrorCodes::LOGICAL_ERROR, "Size of WeakHash32 does not match:"
|
||||
"left size is {}, right size is {}", size, other.data.size());
|
||||
|
||||
for (size_t i = 0; i < size; ++i)
|
||||
data[i] = static_cast<UInt32>(intHashCRC32(other.data[i], data[i]));
|
||||
}
|
||||
|
||||
}
|
||||
|
@ -11,9 +11,8 @@ namespace DB
|
||||
/// The main purpose why this class needed is to support data initialization. Initially, every bit is 1.
|
||||
class WeakHash32
|
||||
{
|
||||
static constexpr UInt32 kDefaultInitialValue = ~UInt32(0);
|
||||
|
||||
public:
|
||||
static constexpr UInt32 kDefaultInitialValue = ~UInt32(0);
|
||||
|
||||
using Container = PaddedPODArray<UInt32>;
|
||||
|
||||
@ -22,6 +21,8 @@ public:
|
||||
|
||||
void reset(size_t size, UInt32 initial_value = kDefaultInitialValue) { data.assign(size, initial_value); }
|
||||
|
||||
void update(const WeakHash32 & other);
|
||||
|
||||
const Container & getData() const { return data; }
|
||||
Container & getData() { return data; }
|
||||
|
||||
|
@ -13,14 +13,14 @@
|
||||
#include <Common/ZooKeeper/Types.h>
|
||||
#include <Common/ZooKeeper/ZooKeeperCommon.h>
|
||||
#include <Common/randomSeed.h>
|
||||
#include <base/find_symbols.h>
|
||||
#include <base/sort.h>
|
||||
#include <base/map.h>
|
||||
#include <base/getFQDNOrHostName.h>
|
||||
#include <Core/ServerUUID.h>
|
||||
#include <Core/BackgroundSchedulePool.h>
|
||||
#include "Common/ZooKeeper/IKeeper.h"
|
||||
#include <Common/DNSResolver.h>
|
||||
#include <Common/ZooKeeper/IKeeper.h>
|
||||
#include <Common/StringUtils.h>
|
||||
#include <Common/quoteString.h>
|
||||
#include <Common/Exception.h>
|
||||
#include <Interpreters/Context.h>
|
||||
|
||||
@ -114,7 +114,11 @@ void ZooKeeper::init(ZooKeeperArgs args_, std::unique_ptr<Coordination::IKeeper>
|
||||
/// availability_zones is empty on server startup or after config reloading
|
||||
/// We will keep the az info when starting new sessions
|
||||
availability_zones = args.availability_zones;
|
||||
LOG_TEST(log, "Availability zones from config: [{}], client: {}", fmt::join(availability_zones, ", "), args.client_availability_zone);
|
||||
|
||||
LOG_TEST(log, "Availability zones from config: [{}], client: {}",
|
||||
fmt::join(collections::map(availability_zones, [](auto s){ return DB::quoteString(s); }), ", "),
|
||||
DB::quoteString(args.client_availability_zone));
|
||||
|
||||
if (args.availability_zone_autodetect)
|
||||
updateAvailabilityZones();
|
||||
}
|
||||
|
@ -37,7 +37,7 @@ requires DB::OptionalArgument<TAlign...>
|
||||
inline ALWAYS_INLINE void * newImpl(std::size_t size, TAlign... align)
|
||||
{
|
||||
#if USE_GWP_ASAN
|
||||
if (unlikely(GWPAsan::GuardedAlloc.shouldSample()))
|
||||
if (unlikely(GWPAsan::shouldSample()))
|
||||
{
|
||||
if constexpr (sizeof...(TAlign) == 1)
|
||||
{
|
||||
@ -83,7 +83,7 @@ inline ALWAYS_INLINE void * newImpl(std::size_t size, TAlign... align)
|
||||
inline ALWAYS_INLINE void * newNoExcept(std::size_t size) noexcept
|
||||
{
|
||||
#if USE_GWP_ASAN
|
||||
if (unlikely(GWPAsan::GuardedAlloc.shouldSample()))
|
||||
if (unlikely(GWPAsan::shouldSample()))
|
||||
{
|
||||
if (void * ptr = GWPAsan::GuardedAlloc.allocate(size))
|
||||
{
|
||||
@ -102,7 +102,7 @@ inline ALWAYS_INLINE void * newNoExcept(std::size_t size) noexcept
|
||||
inline ALWAYS_INLINE void * newNoExcept(std::size_t size, std::align_val_t align) noexcept
|
||||
{
|
||||
#if USE_GWP_ASAN
|
||||
if (unlikely(GWPAsan::GuardedAlloc.shouldSample()))
|
||||
if (unlikely(GWPAsan::shouldSample()))
|
||||
{
|
||||
if (void * ptr = GWPAsan::GuardedAlloc.allocate(size, alignToSizeT(align)))
|
||||
{
|
||||
|
@ -54,16 +54,3 @@ TEST(ShellCommand, ExecuteWithInput)
|
||||
|
||||
EXPECT_EQ(res, "Hello, world!\n");
|
||||
}
|
||||
|
||||
TEST(ShellCommand, AutoWait)
|
||||
{
|
||||
// <defunct> hunting:
|
||||
for (int i = 0; i < 1000; ++i)
|
||||
{
|
||||
auto command = ShellCommand::execute("echo " + std::to_string(i));
|
||||
//command->wait(); // now automatic
|
||||
}
|
||||
|
||||
// std::cerr << "inspect me: ps auxwwf\n";
|
||||
// std::this_thread::sleep_for(std::chrono::seconds(100));
|
||||
}
|
||||
|
@ -55,6 +55,7 @@ struct Settings;
|
||||
M(UInt64, min_request_size_for_cache, 50 * 1024, "Minimal size of the request to cache the deserialization result. Caching can have negative effect on latency for smaller requests, set to 0 to disable", 0) \
|
||||
M(UInt64, raft_limits_reconnect_limit, 50, "If connection to a peer is silent longer than this limit * (multiplied by heartbeat interval), we re-establish the connection.", 0) \
|
||||
M(Bool, async_replication, false, "Enable async replication. All write and read guarantees are preserved while better performance is achieved. Settings is disabled by default to not break backwards compatibility.", 0) \
|
||||
M(Bool, experimental_use_rocksdb, false, "Use rocksdb as backend storage", 0) \
|
||||
M(UInt64, latest_logs_cache_size_threshold, 1 * 1024 * 1024 * 1024, "Maximum total size of in-memory cache of latest log entries.", 0) \
|
||||
M(UInt64, commit_logs_cache_size_threshold, 500 * 1024 * 1024, "Maximum total size of in-memory cache of log entries needed next for commit.", 0) \
|
||||
M(UInt64, disk_move_retries_wait_ms, 1000, "How long to wait between retries after a failure which happened while a file was being moved between disks.", 0) \
|
||||
|
@ -183,8 +183,6 @@
|
||||
M(ReadBufferFromS3InitMicroseconds) \
|
||||
M(ReadBufferFromS3Bytes) \
|
||||
M(ReadBufferFromS3RequestsErrors) \
|
||||
M(ReadBufferFromS3ResetSessions) \
|
||||
M(ReadBufferFromS3PreservedSessions) \
|
||||
\
|
||||
M(WriteBufferFromS3Microseconds) \
|
||||
M(WriteBufferFromS3Bytes) \
|
||||
|
@ -5,18 +5,27 @@
|
||||
|
||||
#include <Coordination/CoordinationSettings.h>
|
||||
#include <Coordination/Defines.h>
|
||||
#include <Disks/DiskLocal.h>
|
||||
#include <Interpreters/Context.h>
|
||||
#include <IO/S3/Credentials.h>
|
||||
#include <IO/WriteHelpers.h>
|
||||
#include <Poco/Util/AbstractConfiguration.h>
|
||||
#include <Poco/Util/JSONConfiguration.h>
|
||||
#include <Coordination/KeeperConstants.h>
|
||||
#include <Server/CloudPlacementInfo.h>
|
||||
#include <Coordination/KeeperFeatureFlags.h>
|
||||
#include <Disks/DiskLocal.h>
|
||||
#include <Disks/DiskSelector.h>
|
||||
#include <IO/S3/Credentials.h>
|
||||
#include <Interpreters/Context.h>
|
||||
#include <Poco/Util/AbstractConfiguration.h>
|
||||
#include <Common/logger_useful.h>
|
||||
|
||||
#include <boost/algorithm/string.hpp>
|
||||
|
||||
#include "config.h"
|
||||
#if USE_ROCKSDB
|
||||
#include <rocksdb/table.h>
|
||||
#include <rocksdb/convenience.h>
|
||||
#include <rocksdb/utilities/db_ttl.h>
|
||||
#endif
|
||||
|
||||
namespace DB
|
||||
{
|
||||
|
||||
@ -24,6 +33,8 @@ namespace ErrorCodes
|
||||
{
|
||||
|
||||
extern const int BAD_ARGUMENTS;
|
||||
extern const int LOGICAL_ERROR;
|
||||
extern const int ROCKSDB_ERROR;
|
||||
|
||||
}
|
||||
|
||||
@ -41,6 +52,95 @@ KeeperContext::KeeperContext(bool standalone_keeper_, CoordinationSettingsPtr co
|
||||
system_nodes_with_data[keeper_api_version_path] = toString(static_cast<uint8_t>(KeeperApiVersion::WITH_MULTI_READ));
|
||||
}
|
||||
|
||||
#if USE_ROCKSDB
|
||||
using RocksDBOptions = std::unordered_map<std::string, std::string>;
|
||||
|
||||
static RocksDBOptions getOptionsFromConfig(const Poco::Util::AbstractConfiguration & config, const std::string & path)
|
||||
{
|
||||
RocksDBOptions options;
|
||||
|
||||
Poco::Util::AbstractConfiguration::Keys keys;
|
||||
config.keys(path, keys);
|
||||
|
||||
for (const auto & key : keys)
|
||||
{
|
||||
const String key_path = path + "." + key;
|
||||
options[key] = config.getString(key_path);
|
||||
}
|
||||
|
||||
return options;
|
||||
}
|
||||
|
||||
static rocksdb::Options getRocksDBOptionsFromConfig(const Poco::Util::AbstractConfiguration & config)
|
||||
{
|
||||
rocksdb::Status status;
|
||||
rocksdb::Options base;
|
||||
|
||||
base.create_if_missing = true;
|
||||
base.compression = rocksdb::CompressionType::kZSTD;
|
||||
base.statistics = rocksdb::CreateDBStatistics();
|
||||
/// It is too verbose by default, and in fact we don't care about rocksdb logs at all.
|
||||
base.info_log_level = rocksdb::ERROR_LEVEL;
|
||||
|
||||
rocksdb::Options merged = base;
|
||||
rocksdb::BlockBasedTableOptions table_options;
|
||||
|
||||
if (config.has("keeper_server.rocksdb.options"))
|
||||
{
|
||||
auto config_options = getOptionsFromConfig(config, "keeper_server.rocksdb.options");
|
||||
status = rocksdb::GetDBOptionsFromMap(merged, config_options, &merged);
|
||||
if (!status.ok())
|
||||
{
|
||||
throw Exception(ErrorCodes::ROCKSDB_ERROR, "Fail to merge rocksdb options from 'rocksdb.options' : {}",
|
||||
status.ToString());
|
||||
}
|
||||
}
|
||||
if (config.has("rocksdb.column_family_options"))
|
||||
{
|
||||
auto column_family_options = getOptionsFromConfig(config, "rocksdb.column_family_options");
|
||||
status = rocksdb::GetColumnFamilyOptionsFromMap(merged, column_family_options, &merged);
|
||||
if (!status.ok())
|
||||
{
|
||||
throw Exception(ErrorCodes::ROCKSDB_ERROR, "Fail to merge rocksdb options from 'rocksdb.column_family_options' at: {}", status.ToString());
|
||||
}
|
||||
}
|
||||
if (config.has("rocksdb.block_based_table_options"))
|
||||
{
|
||||
auto block_based_table_options = getOptionsFromConfig(config, "rocksdb.block_based_table_options");
|
||||
status = rocksdb::GetBlockBasedTableOptionsFromMap(table_options, block_based_table_options, &table_options);
|
||||
if (!status.ok())
|
||||
{
|
||||
throw Exception(ErrorCodes::ROCKSDB_ERROR, "Fail to merge rocksdb options from 'rocksdb.block_based_table_options' at: {}", status.ToString());
|
||||
}
|
||||
}
|
||||
|
||||
merged.table_factory.reset(rocksdb::NewBlockBasedTableFactory(table_options));
|
||||
return merged;
|
||||
}
|
||||
#endif
|
||||
|
||||
KeeperContext::Storage KeeperContext::getRocksDBPathFromConfig(const Poco::Util::AbstractConfiguration & config) const
|
||||
{
|
||||
const auto create_local_disk = [](const auto & path)
|
||||
{
|
||||
if (fs::exists(path))
|
||||
fs::remove_all(path);
|
||||
fs::create_directories(path);
|
||||
|
||||
return std::make_shared<DiskLocal>("LocalRocksDBDisk", path);
|
||||
};
|
||||
if (config.has("keeper_server.rocksdb_path"))
|
||||
return create_local_disk(config.getString("keeper_server.rocksdb_path"));
|
||||
|
||||
if (config.has("keeper_server.storage_path"))
|
||||
return create_local_disk(std::filesystem::path{config.getString("keeper_server.storage_path")} / "rocksdb");
|
||||
|
||||
if (standalone_keeper)
|
||||
return create_local_disk(std::filesystem::path{config.getString("path", KEEPER_DEFAULT_PATH)} / "rocksdb");
|
||||
else
|
||||
return create_local_disk(std::filesystem::path{config.getString("path", DBMS_DEFAULT_PATH)} / "coordination/rocksdb");
|
||||
}
|
||||
|
||||
void KeeperContext::initialize(const Poco::Util::AbstractConfiguration & config, KeeperDispatcher * dispatcher_)
|
||||
{
|
||||
dispatcher = dispatcher_;
|
||||
@ -59,6 +159,14 @@ void KeeperContext::initialize(const Poco::Util::AbstractConfiguration & config,
|
||||
|
||||
initializeFeatureFlags(config);
|
||||
initializeDisks(config);
|
||||
|
||||
#if USE_ROCKSDB
|
||||
if (config.getBool("keeper_server.coordination_settings.experimental_use_rocksdb", false))
|
||||
{
|
||||
rocksdb_options = std::make_shared<rocksdb::Options>(getRocksDBOptionsFromConfig(config));
|
||||
digest_enabled = false; /// TODO: support digest
|
||||
}
|
||||
#endif
|
||||
}
|
||||
|
||||
namespace
|
||||
@ -94,6 +202,8 @@ void KeeperContext::initializeDisks(const Poco::Util::AbstractConfiguration & co
|
||||
{
|
||||
disk_selector->initialize(config, "storage_configuration.disks", Context::getGlobalContextInstance(), diskValidator);
|
||||
|
||||
rocksdb_storage = getRocksDBPathFromConfig(config);
|
||||
|
||||
log_storage = getLogsPathFromConfig(config);
|
||||
|
||||
if (config.has("keeper_server.latest_log_storage_disk"))
|
||||
@ -262,6 +372,37 @@ void KeeperContext::dumpConfiguration(WriteBufferFromOwnString & buf) const
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
void KeeperContext::setRocksDBDisk(DiskPtr disk)
|
||||
{
|
||||
rocksdb_storage = std::move(disk);
|
||||
}
|
||||
|
||||
DiskPtr KeeperContext::getTemporaryRocksDBDisk() const
|
||||
{
|
||||
DiskPtr rocksdb_disk = getDisk(rocksdb_storage);
|
||||
if (!rocksdb_disk)
|
||||
{
|
||||
throw Exception(ErrorCodes::LOGICAL_ERROR, "rocksdb storage is not initialized");
|
||||
}
|
||||
auto uuid_str = formatUUID(UUIDHelpers::generateV4());
|
||||
String path_to_create = "rocks_" + std::string(uuid_str.data(), uuid_str.size());
|
||||
rocksdb_disk->createDirectory(path_to_create);
|
||||
return std::make_shared<DiskLocal>("LocalTmpRocksDBDisk", fullPath(rocksdb_disk, path_to_create));
|
||||
}
|
||||
|
||||
void KeeperContext::setRocksDBOptions(std::shared_ptr<rocksdb::Options> rocksdb_options_)
|
||||
{
|
||||
if (rocksdb_options_ != nullptr)
|
||||
rocksdb_options = rocksdb_options_;
|
||||
else
|
||||
{
|
||||
#if USE_ROCKSDB
|
||||
rocksdb_options = std::make_shared<rocksdb::Options>(getRocksDBOptionsFromConfig(Poco::Util::JSONConfiguration()));
|
||||
#endif
|
||||
}
|
||||
}
|
||||
|
||||
KeeperContext::Storage KeeperContext::getLogsPathFromConfig(const Poco::Util::AbstractConfiguration & config) const
|
||||
{
|
||||
const auto create_local_disk = [](const auto & path)
|
||||
|
@ -6,6 +6,11 @@
|
||||
#include <cstdint>
|
||||
#include <memory>
|
||||
|
||||
namespace rocksdb
|
||||
{
|
||||
struct Options;
|
||||
}
|
||||
|
||||
namespace DB
|
||||
{
|
||||
|
||||
@ -62,6 +67,12 @@ public:
|
||||
|
||||
constexpr KeeperDispatcher * getDispatcher() const { return dispatcher; }
|
||||
|
||||
void setRocksDBDisk(DiskPtr disk);
|
||||
DiskPtr getTemporaryRocksDBDisk() const;
|
||||
|
||||
void setRocksDBOptions(std::shared_ptr<rocksdb::Options> rocksdb_options_ = nullptr);
|
||||
std::shared_ptr<rocksdb::Options> getRocksDBOptions() const { return rocksdb_options; }
|
||||
|
||||
UInt64 getKeeperMemorySoftLimit() const { return memory_soft_limit; }
|
||||
void updateKeeperMemorySoftLimit(const Poco::Util::AbstractConfiguration & config);
|
||||
|
||||
@ -90,6 +101,7 @@ private:
|
||||
void initializeFeatureFlags(const Poco::Util::AbstractConfiguration & config);
|
||||
void initializeDisks(const Poco::Util::AbstractConfiguration & config);
|
||||
|
||||
Storage getRocksDBPathFromConfig(const Poco::Util::AbstractConfiguration & config) const;
|
||||
Storage getLogsPathFromConfig(const Poco::Util::AbstractConfiguration & config) const;
|
||||
Storage getSnapshotsPathFromConfig(const Poco::Util::AbstractConfiguration & config) const;
|
||||
Storage getStatePathFromConfig(const Poco::Util::AbstractConfiguration & config) const;
|
||||
@ -111,12 +123,15 @@ private:
|
||||
|
||||
std::shared_ptr<DiskSelector> disk_selector;
|
||||
|
||||
Storage rocksdb_storage;
|
||||
Storage log_storage;
|
||||
Storage latest_log_storage;
|
||||
Storage snapshot_storage;
|
||||
Storage latest_snapshot_storage;
|
||||
Storage state_file_storage;
|
||||
|
||||
std::shared_ptr<rocksdb::Options> rocksdb_options;
|
||||
|
||||
std::vector<std::string> old_log_disk_names;
|
||||
std::vector<std::string> old_snapshot_disk_names;
|
||||
|
||||
|
@ -117,13 +117,13 @@ void KeeperDispatcher::requestThread()
|
||||
RaftAppendResult prev_result = nullptr;
|
||||
/// Requests from previous iteration. We store them to be able
|
||||
/// to send errors to the client.
|
||||
KeeperStorage::RequestsForSessions prev_batch;
|
||||
KeeperStorageBase::RequestsForSessions prev_batch;
|
||||
|
||||
const auto & shutdown_called = keeper_context->isShutdownCalled();
|
||||
|
||||
while (!shutdown_called)
|
||||
{
|
||||
KeeperStorage::RequestForSession request;
|
||||
KeeperStorageBase::RequestForSession request;
|
||||
|
||||
auto coordination_settings = configuration_and_settings->coordination_settings;
|
||||
uint64_t max_wait = coordination_settings->operation_timeout_ms.totalMilliseconds();
|
||||
@ -153,7 +153,7 @@ void KeeperDispatcher::requestThread()
|
||||
continue;
|
||||
}
|
||||
|
||||
KeeperStorage::RequestsForSessions current_batch;
|
||||
KeeperStorageBase::RequestsForSessions current_batch;
|
||||
size_t current_batch_bytes_size = 0;
|
||||
|
||||
bool has_read_request = false;
|
||||
@ -311,7 +311,7 @@ void KeeperDispatcher::responseThread()
|
||||
const auto & shutdown_called = keeper_context->isShutdownCalled();
|
||||
while (!shutdown_called)
|
||||
{
|
||||
KeeperStorage::ResponseForSession response_for_session;
|
||||
KeeperStorageBase::ResponseForSession response_for_session;
|
||||
|
||||
uint64_t max_wait = configuration_and_settings->coordination_settings->operation_timeout_ms.totalMilliseconds();
|
||||
|
||||
@ -402,7 +402,7 @@ bool KeeperDispatcher::putRequest(const Coordination::ZooKeeperRequestPtr & requ
|
||||
return false;
|
||||
}
|
||||
|
||||
KeeperStorage::RequestForSession request_info;
|
||||
KeeperStorageBase::RequestForSession request_info;
|
||||
request_info.request = request;
|
||||
using namespace std::chrono;
|
||||
request_info.time = duration_cast<milliseconds>(system_clock::now().time_since_epoch()).count();
|
||||
@ -448,7 +448,7 @@ void KeeperDispatcher::initialize(const Poco::Util::AbstractConfiguration & conf
|
||||
snapshots_queue,
|
||||
keeper_context,
|
||||
snapshot_s3,
|
||||
[this](uint64_t /*log_idx*/, const KeeperStorage::RequestForSession & request_for_session)
|
||||
[this](uint64_t /*log_idx*/, const KeeperStorageBase::RequestForSession & request_for_session)
|
||||
{
|
||||
{
|
||||
/// check if we have queue of read requests depending on this request to be committed
|
||||
@ -540,7 +540,7 @@ void KeeperDispatcher::shutdown()
|
||||
update_configuration_thread.join();
|
||||
}
|
||||
|
||||
KeeperStorage::RequestForSession request_for_session;
|
||||
KeeperStorageBase::RequestForSession request_for_session;
|
||||
|
||||
/// Set session expired for all pending requests
|
||||
while (requests_queue && requests_queue->tryPop(request_for_session))
|
||||
@ -551,7 +551,7 @@ void KeeperDispatcher::shutdown()
|
||||
setResponse(request_for_session.session_id, response);
|
||||
}
|
||||
|
||||
KeeperStorage::RequestsForSessions close_requests;
|
||||
KeeperStorageBase::RequestsForSessions close_requests;
|
||||
{
|
||||
/// Clear all registered sessions
|
||||
std::lock_guard lock(session_to_response_callback_mutex);
|
||||
@ -565,7 +565,7 @@ void KeeperDispatcher::shutdown()
|
||||
auto request = Coordination::ZooKeeperRequestFactory::instance().get(Coordination::OpNum::Close);
|
||||
request->xid = Coordination::CLOSE_XID;
|
||||
using namespace std::chrono;
|
||||
KeeperStorage::RequestForSession request_info
|
||||
KeeperStorageBase::RequestForSession request_info
|
||||
{
|
||||
.session_id = session,
|
||||
.time = duration_cast<milliseconds>(system_clock::now().time_since_epoch()).count(),
|
||||
@ -663,7 +663,7 @@ void KeeperDispatcher::sessionCleanerTask()
|
||||
auto request = Coordination::ZooKeeperRequestFactory::instance().get(Coordination::OpNum::Close);
|
||||
request->xid = Coordination::CLOSE_XID;
|
||||
using namespace std::chrono;
|
||||
KeeperStorage::RequestForSession request_info
|
||||
KeeperStorageBase::RequestForSession request_info
|
||||
{
|
||||
.session_id = dead_session,
|
||||
.time = duration_cast<milliseconds>(system_clock::now().time_since_epoch()).count(),
|
||||
@ -711,16 +711,16 @@ void KeeperDispatcher::finishSession(int64_t session_id)
|
||||
}
|
||||
}
|
||||
|
||||
void KeeperDispatcher::addErrorResponses(const KeeperStorage::RequestsForSessions & requests_for_sessions, Coordination::Error error)
|
||||
void KeeperDispatcher::addErrorResponses(const KeeperStorageBase::RequestsForSessions & requests_for_sessions, Coordination::Error error)
|
||||
{
|
||||
for (const auto & request_for_session : requests_for_sessions)
|
||||
{
|
||||
KeeperStorage::ResponsesForSessions responses;
|
||||
KeeperStorageBase::ResponsesForSessions responses;
|
||||
auto response = request_for_session.request->makeResponse();
|
||||
response->xid = request_for_session.request->xid;
|
||||
response->zxid = 0;
|
||||
response->error = error;
|
||||
if (!responses_queue.push(DB::KeeperStorage::ResponseForSession{request_for_session.session_id, response}))
|
||||
if (!responses_queue.push(DB::KeeperStorageBase::ResponseForSession{request_for_session.session_id, response}))
|
||||
throw Exception(ErrorCodes::SYSTEM_ERROR,
|
||||
"Could not push error response xid {} zxid {} error message {} to responses queue",
|
||||
response->xid,
|
||||
@ -730,7 +730,7 @@ void KeeperDispatcher::addErrorResponses(const KeeperStorage::RequestsForSession
|
||||
}
|
||||
|
||||
nuraft::ptr<nuraft::buffer> KeeperDispatcher::forceWaitAndProcessResult(
|
||||
RaftAppendResult & result, KeeperStorage::RequestsForSessions & requests_for_sessions, bool clear_requests_on_success)
|
||||
RaftAppendResult & result, KeeperStorageBase::RequestsForSessions & requests_for_sessions, bool clear_requests_on_success)
|
||||
{
|
||||
if (!result->has_result())
|
||||
result->get();
|
||||
@ -755,7 +755,7 @@ int64_t KeeperDispatcher::getSessionID(int64_t session_timeout_ms)
|
||||
{
|
||||
/// New session id allocation is a special request, because we cannot process it in normal
|
||||
/// way: get request -> put to raft -> set response for registered callback.
|
||||
KeeperStorage::RequestForSession request_info;
|
||||
KeeperStorageBase::RequestForSession request_info;
|
||||
std::shared_ptr<Coordination::ZooKeeperSessionIDRequest> request = std::make_shared<Coordination::ZooKeeperSessionIDRequest>();
|
||||
/// Internal session id. It's a temporary number which is unique for each client on this server
|
||||
/// but can be same on different servers.
|
||||
|
@ -26,7 +26,7 @@ using ZooKeeperResponseCallback = std::function<void(const Coordination::ZooKeep
|
||||
class KeeperDispatcher
|
||||
{
|
||||
private:
|
||||
using RequestsQueue = ConcurrentBoundedQueue<KeeperStorage::RequestForSession>;
|
||||
using RequestsQueue = ConcurrentBoundedQueue<KeeperStorageBase::RequestForSession>;
|
||||
using SessionToResponseCallback = std::unordered_map<int64_t, ZooKeeperResponseCallback>;
|
||||
using ClusterUpdateQueue = ConcurrentBoundedQueue<ClusterUpdateAction>;
|
||||
|
||||
@ -95,18 +95,18 @@ private:
|
||||
|
||||
/// Add error responses for requests to responses queue.
|
||||
/// Clears requests.
|
||||
void addErrorResponses(const KeeperStorage::RequestsForSessions & requests_for_sessions, Coordination::Error error);
|
||||
void addErrorResponses(const KeeperStorageBase::RequestsForSessions & requests_for_sessions, Coordination::Error error);
|
||||
|
||||
/// Forcefully wait for result and sets errors if something when wrong.
|
||||
/// Clears both arguments
|
||||
nuraft::ptr<nuraft::buffer> forceWaitAndProcessResult(
|
||||
RaftAppendResult & result, KeeperStorage::RequestsForSessions & requests_for_sessions, bool clear_requests_on_success);
|
||||
RaftAppendResult & result, KeeperStorageBase::RequestsForSessions & requests_for_sessions, bool clear_requests_on_success);
|
||||
|
||||
public:
|
||||
std::mutex read_request_queue_mutex;
|
||||
|
||||
/// queue of read requests that can be processed after a request with specific session ID and XID is committed
|
||||
std::unordered_map<int64_t, std::unordered_map<Coordination::XID, KeeperStorage::RequestsForSessions>> read_request_queue;
|
||||
std::unordered_map<int64_t, std::unordered_map<Coordination::XID, KeeperStorageBase::RequestsForSessions>> read_request_queue;
|
||||
|
||||
/// Just allocate some objects, real initialization is done by `intialize method`
|
||||
KeeperDispatcher();
|
||||
@ -192,7 +192,7 @@ public:
|
||||
|
||||
Keeper4LWInfo getKeeper4LWInfo() const;
|
||||
|
||||
const KeeperStateMachine & getStateMachine() const
|
||||
const IKeeperStateMachine & getStateMachine() const
|
||||
{
|
||||
return *server->getKeeperStateMachine();
|
||||
}
|
||||
|
@ -123,7 +123,7 @@ KeeperServer::KeeperServer(
|
||||
SnapshotsQueue & snapshots_queue_,
|
||||
KeeperContextPtr keeper_context_,
|
||||
KeeperSnapshotManagerS3 & snapshot_manager_s3,
|
||||
KeeperStateMachine::CommitCallback commit_callback)
|
||||
IKeeperStateMachine::CommitCallback commit_callback)
|
||||
: server_id(configuration_and_settings_->server_id)
|
||||
, log(getLogger("KeeperServer"))
|
||||
, is_recovering(config.getBool("keeper_server.force_recovery", false))
|
||||
@ -134,13 +134,28 @@ KeeperServer::KeeperServer(
|
||||
if (keeper_context->getCoordinationSettings()->quorum_reads)
|
||||
LOG_WARNING(log, "Quorum reads enabled, Keeper will work slower.");
|
||||
|
||||
state_machine = nuraft::cs_new<KeeperStateMachine>(
|
||||
responses_queue_,
|
||||
snapshots_queue_,
|
||||
keeper_context,
|
||||
config.getBool("keeper_server.upload_snapshot_on_exit", false) ? &snapshot_manager_s3 : nullptr,
|
||||
commit_callback,
|
||||
checkAndGetSuperdigest(configuration_and_settings_->super_digest));
|
||||
#if USE_ROCKSDB
|
||||
const auto & coordination_settings = keeper_context->getCoordinationSettings();
|
||||
if (coordination_settings->experimental_use_rocksdb)
|
||||
{
|
||||
state_machine = nuraft::cs_new<KeeperStateMachine<KeeperRocksStorage>>(
|
||||
responses_queue_,
|
||||
snapshots_queue_,
|
||||
keeper_context,
|
||||
config.getBool("keeper_server.upload_snapshot_on_exit", false) ? &snapshot_manager_s3 : nullptr,
|
||||
commit_callback,
|
||||
checkAndGetSuperdigest(configuration_and_settings_->super_digest));
|
||||
LOG_WARNING(log, "Use RocksDB as Keeper backend storage.");
|
||||
}
|
||||
else
|
||||
#endif
|
||||
state_machine = nuraft::cs_new<KeeperStateMachine<KeeperMemoryStorage>>(
|
||||
responses_queue_,
|
||||
snapshots_queue_,
|
||||
keeper_context,
|
||||
config.getBool("keeper_server.upload_snapshot_on_exit", false) ? &snapshot_manager_s3 : nullptr,
|
||||
commit_callback,
|
||||
checkAndGetSuperdigest(configuration_and_settings_->super_digest));
|
||||
|
||||
state_manager = nuraft::cs_new<KeeperStateManager>(
|
||||
server_id,
|
||||
@ -522,7 +537,7 @@ namespace
|
||||
{
|
||||
|
||||
// Serialize the request for the log entry
|
||||
nuraft::ptr<nuraft::buffer> getZooKeeperLogEntry(const KeeperStorage::RequestForSession & request_for_session)
|
||||
nuraft::ptr<nuraft::buffer> getZooKeeperLogEntry(const KeeperStorageBase::RequestForSession & request_for_session)
|
||||
{
|
||||
DB::WriteBufferFromNuraftBuffer write_buf;
|
||||
DB::writeIntBinary(request_for_session.session_id, write_buf);
|
||||
@ -530,7 +545,7 @@ nuraft::ptr<nuraft::buffer> getZooKeeperLogEntry(const KeeperStorage::RequestFor
|
||||
DB::writeIntBinary(request_for_session.time, write_buf);
|
||||
/// we fill with dummy values to eliminate unnecessary copy later on when we will write correct values
|
||||
DB::writeIntBinary(static_cast<int64_t>(0), write_buf); /// zxid
|
||||
DB::writeIntBinary(KeeperStorage::DigestVersion::NO_DIGEST, write_buf); /// digest version or NO_DIGEST flag
|
||||
DB::writeIntBinary(KeeperStorageBase::DigestVersion::NO_DIGEST, write_buf); /// digest version or NO_DIGEST flag
|
||||
DB::writeIntBinary(static_cast<uint64_t>(0), write_buf); /// digest value
|
||||
/// if new fields are added, update KeeperStateMachine::ZooKeeperLogSerializationVersion along with parseRequest function and PreAppendLog callback handler
|
||||
return write_buf.getBuffer();
|
||||
@ -538,7 +553,7 @@ nuraft::ptr<nuraft::buffer> getZooKeeperLogEntry(const KeeperStorage::RequestFor
|
||||
|
||||
}
|
||||
|
||||
void KeeperServer::putLocalReadRequest(const KeeperStorage::RequestForSession & request_for_session)
|
||||
void KeeperServer::putLocalReadRequest(const KeeperStorageBase::RequestForSession & request_for_session)
|
||||
{
|
||||
if (!request_for_session.request->isReadRequest())
|
||||
throw Exception(ErrorCodes::LOGICAL_ERROR, "Cannot process non-read request locally");
|
||||
@ -546,7 +561,7 @@ void KeeperServer::putLocalReadRequest(const KeeperStorage::RequestForSession &
|
||||
state_machine->processReadRequest(request_for_session);
|
||||
}
|
||||
|
||||
RaftAppendResult KeeperServer::putRequestBatch(const KeeperStorage::RequestsForSessions & requests_for_sessions)
|
||||
RaftAppendResult KeeperServer::putRequestBatch(const KeeperStorageBase::RequestsForSessions & requests_for_sessions)
|
||||
{
|
||||
std::vector<nuraft::ptr<nuraft::buffer>> entries;
|
||||
entries.reserve(requests_for_sessions.size());
|
||||
@ -789,7 +804,7 @@ nuraft::cb_func::ReturnCode KeeperServer::callbackFunc(nuraft::cb_func::Type typ
|
||||
|
||||
auto entry_buf = entry->get_buf_ptr();
|
||||
|
||||
KeeperStateMachine::ZooKeeperLogSerializationVersion serialization_version;
|
||||
IKeeperStateMachine::ZooKeeperLogSerializationVersion serialization_version;
|
||||
auto request_for_session = state_machine->parseRequest(*entry_buf, /*final=*/false, &serialization_version);
|
||||
request_for_session->zxid = next_zxid;
|
||||
if (!state_machine->preprocess(*request_for_session))
|
||||
@ -799,10 +814,10 @@ nuraft::cb_func::ReturnCode KeeperServer::callbackFunc(nuraft::cb_func::Type typ
|
||||
|
||||
/// older versions of Keeper can send logs that are missing some fields
|
||||
size_t bytes_missing = 0;
|
||||
if (serialization_version < KeeperStateMachine::ZooKeeperLogSerializationVersion::WITH_TIME)
|
||||
if (serialization_version < IKeeperStateMachine::ZooKeeperLogSerializationVersion::WITH_TIME)
|
||||
bytes_missing += sizeof(request_for_session->time);
|
||||
|
||||
if (serialization_version < KeeperStateMachine::ZooKeeperLogSerializationVersion::WITH_ZXID_DIGEST)
|
||||
if (serialization_version < IKeeperStateMachine::ZooKeeperLogSerializationVersion::WITH_ZXID_DIGEST)
|
||||
bytes_missing += sizeof(request_for_session->zxid) + sizeof(request_for_session->digest->version) + sizeof(request_for_session->digest->value);
|
||||
|
||||
if (bytes_missing != 0)
|
||||
@ -816,19 +831,19 @@ nuraft::cb_func::ReturnCode KeeperServer::callbackFunc(nuraft::cb_func::Type typ
|
||||
size_t write_buffer_header_size
|
||||
= sizeof(request_for_session->zxid) + sizeof(request_for_session->digest->version) + sizeof(request_for_session->digest->value);
|
||||
|
||||
if (serialization_version < KeeperStateMachine::ZooKeeperLogSerializationVersion::WITH_TIME)
|
||||
if (serialization_version < IKeeperStateMachine::ZooKeeperLogSerializationVersion::WITH_TIME)
|
||||
write_buffer_header_size += sizeof(request_for_session->time);
|
||||
|
||||
auto * buffer_start = reinterpret_cast<BufferBase::Position>(entry_buf->data_begin() + entry_buf->size() - write_buffer_header_size);
|
||||
|
||||
WriteBufferFromPointer write_buf(buffer_start, write_buffer_header_size);
|
||||
|
||||
if (serialization_version < KeeperStateMachine::ZooKeeperLogSerializationVersion::WITH_TIME)
|
||||
if (serialization_version < IKeeperStateMachine::ZooKeeperLogSerializationVersion::WITH_TIME)
|
||||
writeIntBinary(request_for_session->time, write_buf);
|
||||
|
||||
writeIntBinary(request_for_session->zxid, write_buf);
|
||||
writeIntBinary(request_for_session->digest->version, write_buf);
|
||||
if (request_for_session->digest->version != KeeperStorage::NO_DIGEST)
|
||||
if (request_for_session->digest->version != KeeperStorageBase::NO_DIGEST)
|
||||
writeIntBinary(request_for_session->digest->value, write_buf);
|
||||
|
||||
write_buf.finalize();
|
||||
|
@ -24,7 +24,7 @@ class KeeperServer
|
||||
private:
|
||||
const int server_id;
|
||||
|
||||
nuraft::ptr<KeeperStateMachine> state_machine;
|
||||
nuraft::ptr<IKeeperStateMachine> state_machine;
|
||||
|
||||
nuraft::ptr<KeeperStateManager> state_manager;
|
||||
|
||||
@ -79,26 +79,26 @@ public:
|
||||
SnapshotsQueue & snapshots_queue_,
|
||||
KeeperContextPtr keeper_context_,
|
||||
KeeperSnapshotManagerS3 & snapshot_manager_s3,
|
||||
KeeperStateMachine::CommitCallback commit_callback);
|
||||
IKeeperStateMachine::CommitCallback commit_callback);
|
||||
|
||||
/// Load state machine from the latest snapshot and load log storage. Start NuRaft with required settings.
|
||||
void startup(const Poco::Util::AbstractConfiguration & config, bool enable_ipv6 = true);
|
||||
|
||||
/// Put local read request and execute in state machine directly and response into
|
||||
/// responses queue
|
||||
void putLocalReadRequest(const KeeperStorage::RequestForSession & request);
|
||||
void putLocalReadRequest(const KeeperStorageBase::RequestForSession & request);
|
||||
|
||||
bool isRecovering() const { return is_recovering; }
|
||||
bool reconfigEnabled() const { return enable_reconfiguration; }
|
||||
|
||||
/// Put batch of requests into Raft and get result of put. Responses will be set separately into
|
||||
/// responses_queue.
|
||||
RaftAppendResult putRequestBatch(const KeeperStorage::RequestsForSessions & requests);
|
||||
RaftAppendResult putRequestBatch(const KeeperStorageBase::RequestsForSessions & requests);
|
||||
|
||||
/// Return set of the non-active sessions
|
||||
std::vector<int64_t> getDeadSessions();
|
||||
|
||||
nuraft::ptr<KeeperStateMachine> getKeeperStateMachine() const { return state_machine; }
|
||||
nuraft::ptr<IKeeperStateMachine> getKeeperStateMachine() const { return state_machine; }
|
||||
|
||||
void forceRecovery();
|
||||
|
||||
|
@ -66,7 +66,8 @@ namespace
|
||||
return base;
|
||||
}
|
||||
|
||||
void writeNode(const KeeperStorage::Node & node, SnapshotVersion version, WriteBuffer & out)
|
||||
template<typename Node>
|
||||
void writeNode(const Node & node, SnapshotVersion version, WriteBuffer & out)
|
||||
{
|
||||
writeBinary(node.getData(), out);
|
||||
|
||||
@ -86,7 +87,7 @@ namespace
|
||||
writeBinary(node.aversion, out);
|
||||
writeBinary(node.ephemeralOwner(), out);
|
||||
if (version < SnapshotVersion::V6)
|
||||
writeBinary(static_cast<int32_t>(node.data_size), out);
|
||||
writeBinary(static_cast<int32_t>(node.getData().size()), out);
|
||||
writeBinary(node.numChildren(), out);
|
||||
writeBinary(node.pzxid, out);
|
||||
|
||||
@ -96,7 +97,8 @@ namespace
|
||||
writeBinary(node.sizeInBytes(), out);
|
||||
}
|
||||
|
||||
void readNode(KeeperStorage::Node & node, ReadBuffer & in, SnapshotVersion version, ACLMap & acl_map)
|
||||
template<typename Node>
|
||||
void readNode(Node & node, ReadBuffer & in, SnapshotVersion version, ACLMap & acl_map)
|
||||
{
|
||||
readVarUInt(node.data_size, in);
|
||||
if (node.data_size != 0)
|
||||
@ -195,7 +197,8 @@ namespace
|
||||
}
|
||||
}
|
||||
|
||||
void KeeperStorageSnapshot::serialize(const KeeperStorageSnapshot & snapshot, WriteBuffer & out, KeeperContextPtr keeper_context)
|
||||
template<typename Storage>
|
||||
void KeeperStorageSnapshot<Storage>::serialize(const KeeperStorageSnapshot<Storage> & snapshot, WriteBuffer & out, KeeperContextPtr keeper_context)
|
||||
{
|
||||
writeBinary(static_cast<uint8_t>(snapshot.version), out);
|
||||
serializeSnapshotMetadata(snapshot.snapshot_meta, out);
|
||||
@ -205,11 +208,11 @@ void KeeperStorageSnapshot::serialize(const KeeperStorageSnapshot & snapshot, Wr
|
||||
writeBinary(snapshot.zxid, out);
|
||||
if (keeper_context->digestEnabled())
|
||||
{
|
||||
writeBinary(static_cast<uint8_t>(KeeperStorage::CURRENT_DIGEST_VERSION), out);
|
||||
writeBinary(static_cast<uint8_t>(Storage::CURRENT_DIGEST_VERSION), out);
|
||||
writeBinary(snapshot.nodes_digest, out);
|
||||
}
|
||||
else
|
||||
writeBinary(static_cast<uint8_t>(KeeperStorage::NO_DIGEST), out);
|
||||
writeBinary(static_cast<uint8_t>(Storage::NO_DIGEST), out);
|
||||
}
|
||||
|
||||
writeBinary(snapshot.session_id, out);
|
||||
@ -255,7 +258,6 @@ void KeeperStorageSnapshot::serialize(const KeeperStorageSnapshot & snapshot, Wr
|
||||
/// slightly bigger than required.
|
||||
if (node.mzxid > snapshot.zxid)
|
||||
break;
|
||||
|
||||
writeBinary(path, out);
|
||||
writeNode(node, snapshot.version, out);
|
||||
|
||||
@ -282,7 +284,7 @@ void KeeperStorageSnapshot::serialize(const KeeperStorageSnapshot & snapshot, Wr
|
||||
writeBinary(session_id, out);
|
||||
writeBinary(timeout, out);
|
||||
|
||||
KeeperStorage::AuthIDs ids;
|
||||
KeeperStorageBase::AuthIDs ids;
|
||||
if (snapshot.session_and_auth.contains(session_id))
|
||||
ids = snapshot.session_and_auth.at(session_id);
|
||||
|
||||
@ -303,7 +305,8 @@ void KeeperStorageSnapshot::serialize(const KeeperStorageSnapshot & snapshot, Wr
|
||||
}
|
||||
}
|
||||
|
||||
void KeeperStorageSnapshot::deserialize(SnapshotDeserializationResult & deserialization_result, ReadBuffer & in, KeeperContextPtr keeper_context)
|
||||
template<typename Storage>
|
||||
void KeeperStorageSnapshot<Storage>::deserialize(SnapshotDeserializationResult<Storage> & deserialization_result, ReadBuffer & in, KeeperContextPtr keeper_context)
|
||||
{
|
||||
uint8_t version;
|
||||
readBinary(version, in);
|
||||
@ -312,7 +315,7 @@ void KeeperStorageSnapshot::deserialize(SnapshotDeserializationResult & deserial
|
||||
throw Exception(ErrorCodes::UNKNOWN_FORMAT_VERSION, "Unsupported snapshot version {}", version);
|
||||
|
||||
deserialization_result.snapshot_meta = deserializeSnapshotMetadata(in);
|
||||
KeeperStorage & storage = *deserialization_result.storage;
|
||||
Storage & storage = *deserialization_result.storage;
|
||||
|
||||
bool recalculate_digest = keeper_context->digestEnabled();
|
||||
if (version >= SnapshotVersion::V5)
|
||||
@ -320,11 +323,11 @@ void KeeperStorageSnapshot::deserialize(SnapshotDeserializationResult & deserial
|
||||
readBinary(storage.zxid, in);
|
||||
uint8_t digest_version;
|
||||
readBinary(digest_version, in);
|
||||
if (digest_version != KeeperStorage::DigestVersion::NO_DIGEST)
|
||||
if (digest_version != Storage::DigestVersion::NO_DIGEST)
|
||||
{
|
||||
uint64_t nodes_digest;
|
||||
readBinary(nodes_digest, in);
|
||||
if (digest_version == KeeperStorage::CURRENT_DIGEST_VERSION)
|
||||
if (digest_version == Storage::CURRENT_DIGEST_VERSION)
|
||||
{
|
||||
storage.nodes_digest = nodes_digest;
|
||||
recalculate_digest = false;
|
||||
@ -374,8 +377,8 @@ void KeeperStorageSnapshot::deserialize(SnapshotDeserializationResult & deserial
|
||||
|
||||
size_t snapshot_container_size;
|
||||
readBinary(snapshot_container_size, in);
|
||||
|
||||
storage.container.reserve(snapshot_container_size);
|
||||
if constexpr (!use_rocksdb)
|
||||
storage.container.reserve(snapshot_container_size);
|
||||
|
||||
if (recalculate_digest)
|
||||
storage.nodes_digest = 0;
|
||||
@ -389,7 +392,7 @@ void KeeperStorageSnapshot::deserialize(SnapshotDeserializationResult & deserial
|
||||
in.readStrict(path_data.get(), path_size);
|
||||
std::string_view path{path_data.get(), path_size};
|
||||
|
||||
KeeperStorage::Node node{};
|
||||
typename Storage::Node node{};
|
||||
readNode(node, in, current_version, storage.acl_map);
|
||||
|
||||
using enum Coordination::PathMatchResult;
|
||||
@ -421,7 +424,7 @@ void KeeperStorageSnapshot::deserialize(SnapshotDeserializationResult & deserial
|
||||
if (keeper_context->ignoreSystemPathOnStartup() || keeper_context->getServerState() != KeeperContext::Phase::INIT)
|
||||
{
|
||||
LOG_ERROR(getLogger("KeeperSnapshotManager"), "{}. Ignoring it", get_error_msg());
|
||||
node = KeeperStorage::Node{};
|
||||
node = typename Storage::Node{};
|
||||
}
|
||||
else
|
||||
throw Exception(
|
||||
@ -433,8 +436,9 @@ void KeeperStorageSnapshot::deserialize(SnapshotDeserializationResult & deserial
|
||||
}
|
||||
|
||||
auto ephemeral_owner = node.ephemeralOwner();
|
||||
if (!node.isEphemeral() && node.numChildren() > 0)
|
||||
node.getChildren().reserve(node.numChildren());
|
||||
if constexpr (!use_rocksdb)
|
||||
if (!node.isEphemeral() && node.numChildren() > 0)
|
||||
node.getChildren().reserve(node.numChildren());
|
||||
|
||||
if (ephemeral_owner != 0)
|
||||
storage.ephemerals[node.ephemeralOwner()].insert(std::string{path});
|
||||
@ -447,36 +451,38 @@ void KeeperStorageSnapshot::deserialize(SnapshotDeserializationResult & deserial
|
||||
|
||||
LOG_TRACE(getLogger("KeeperSnapshotManager"), "Building structure for children nodes");
|
||||
|
||||
for (const auto & itr : storage.container)
|
||||
if constexpr (!use_rocksdb)
|
||||
{
|
||||
if (itr.key != "/")
|
||||
for (const auto & itr : storage.container)
|
||||
{
|
||||
auto parent_path = parentNodePath(itr.key);
|
||||
storage.container.updateValue(
|
||||
parent_path, [path = itr.key](KeeperStorage::Node & value) { value.addChild(getBaseNodeName(path)); });
|
||||
}
|
||||
}
|
||||
|
||||
for (const auto & itr : storage.container)
|
||||
{
|
||||
if (itr.key != "/")
|
||||
{
|
||||
if (itr.value.numChildren() != static_cast<int32_t>(itr.value.getChildren().size()))
|
||||
if (itr.key != "/")
|
||||
{
|
||||
auto parent_path = parentNodePath(itr.key);
|
||||
storage.container.updateValue(
|
||||
parent_path, [path = itr.key](typename Storage::Node & value) { value.addChild(getBaseNodeName(path)); });
|
||||
}
|
||||
}
|
||||
|
||||
for (const auto & itr : storage.container)
|
||||
{
|
||||
if (itr.key != "/")
|
||||
{
|
||||
if (itr.value.numChildren() != static_cast<int32_t>(itr.value.getChildren().size()))
|
||||
{
|
||||
#ifdef NDEBUG
|
||||
/// TODO (alesapin) remove this, it should be always CORRUPTED_DATA.
|
||||
LOG_ERROR(getLogger("KeeperSnapshotManager"), "Children counter in stat.numChildren {}"
|
||||
" is different from actual children size {} for node {}", itr.value.numChildren(), itr.value.getChildren().size(), itr.key);
|
||||
/// TODO (alesapin) remove this, it should be always CORRUPTED_DATA.
|
||||
LOG_ERROR(getLogger("KeeperSnapshotManager"), "Children counter in stat.numChildren {}"
|
||||
" is different from actual children size {} for node {}", itr.value.numChildren(), itr.value.getChildren().size(), itr.key);
|
||||
#else
|
||||
throw Exception(ErrorCodes::LOGICAL_ERROR, "Children counter in stat.numChildren {}"
|
||||
" is different from actual children size {} for node {}",
|
||||
itr.value.numChildren(), itr.value.getChildren().size(), itr.key);
|
||||
throw Exception(ErrorCodes::LOGICAL_ERROR, "Children counter in stat.numChildren {}"
|
||||
" is different from actual children size {} for node {}",
|
||||
itr.value.numChildren(), itr.value.getChildren().size(), itr.key);
|
||||
#endif
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
size_t active_sessions_size;
|
||||
readBinary(active_sessions_size, in);
|
||||
|
||||
@ -493,14 +499,14 @@ void KeeperStorageSnapshot::deserialize(SnapshotDeserializationResult & deserial
|
||||
size_t session_auths_size;
|
||||
readBinary(session_auths_size, in);
|
||||
|
||||
KeeperStorage::AuthIDs ids;
|
||||
typename Storage::AuthIDs ids;
|
||||
size_t session_auth_counter = 0;
|
||||
while (session_auth_counter < session_auths_size)
|
||||
{
|
||||
String scheme, id;
|
||||
readBinary(scheme, in);
|
||||
readBinary(id, in);
|
||||
ids.emplace_back(KeeperStorage::AuthID{scheme, id});
|
||||
ids.emplace_back(typename Storage::AuthID{scheme, id});
|
||||
|
||||
session_auth_counter++;
|
||||
}
|
||||
@ -523,7 +529,8 @@ void KeeperStorageSnapshot::deserialize(SnapshotDeserializationResult & deserial
|
||||
}
|
||||
}
|
||||
|
||||
KeeperStorageSnapshot::KeeperStorageSnapshot(KeeperStorage * storage_, uint64_t up_to_log_idx_, const ClusterConfigPtr & cluster_config_)
|
||||
template<typename Storage>
|
||||
KeeperStorageSnapshot<Storage>::KeeperStorageSnapshot(Storage * storage_, uint64_t up_to_log_idx_, const ClusterConfigPtr & cluster_config_)
|
||||
: storage(storage_)
|
||||
, snapshot_meta(std::make_shared<SnapshotMetadata>(up_to_log_idx_, 0, std::make_shared<nuraft::cluster_config>()))
|
||||
, session_id(storage->session_id_counter)
|
||||
@ -540,8 +547,9 @@ KeeperStorageSnapshot::KeeperStorageSnapshot(KeeperStorage * storage_, uint64_t
|
||||
session_and_auth = storage->session_and_auth;
|
||||
}
|
||||
|
||||
KeeperStorageSnapshot::KeeperStorageSnapshot(
|
||||
KeeperStorage * storage_, const SnapshotMetadataPtr & snapshot_meta_, const ClusterConfigPtr & cluster_config_)
|
||||
template<typename Storage>
|
||||
KeeperStorageSnapshot<Storage>::KeeperStorageSnapshot(
|
||||
Storage * storage_, const SnapshotMetadataPtr & snapshot_meta_, const ClusterConfigPtr & cluster_config_)
|
||||
: storage(storage_)
|
||||
, snapshot_meta(snapshot_meta_)
|
||||
, session_id(storage->session_id_counter)
|
||||
@ -558,12 +566,14 @@ KeeperStorageSnapshot::KeeperStorageSnapshot(
|
||||
session_and_auth = storage->session_and_auth;
|
||||
}
|
||||
|
||||
KeeperStorageSnapshot::~KeeperStorageSnapshot()
|
||||
template<typename Storage>
|
||||
KeeperStorageSnapshot<Storage>::~KeeperStorageSnapshot()
|
||||
{
|
||||
storage->disableSnapshotMode();
|
||||
}
|
||||
|
||||
KeeperSnapshotManager::KeeperSnapshotManager(
|
||||
template<typename Storage>
|
||||
KeeperSnapshotManager<Storage>::KeeperSnapshotManager(
|
||||
size_t snapshots_to_keep_,
|
||||
const KeeperContextPtr & keeper_context_,
|
||||
bool compress_snapshots_zstd_,
|
||||
@ -651,7 +661,8 @@ KeeperSnapshotManager::KeeperSnapshotManager(
|
||||
moveSnapshotsIfNeeded();
|
||||
}
|
||||
|
||||
SnapshotFileInfoPtr KeeperSnapshotManager::serializeSnapshotBufferToDisk(nuraft::buffer & buffer, uint64_t up_to_log_idx)
|
||||
template<typename Storage>
|
||||
SnapshotFileInfoPtr KeeperSnapshotManager<Storage>::serializeSnapshotBufferToDisk(nuraft::buffer & buffer, uint64_t up_to_log_idx)
|
||||
{
|
||||
ReadBufferFromNuraftBuffer reader(buffer);
|
||||
|
||||
@ -680,7 +691,8 @@ SnapshotFileInfoPtr KeeperSnapshotManager::serializeSnapshotBufferToDisk(nuraft:
|
||||
return snapshot_file_info;
|
||||
}
|
||||
|
||||
nuraft::ptr<nuraft::buffer> KeeperSnapshotManager::deserializeLatestSnapshotBufferFromDisk()
|
||||
template<typename Storage>
|
||||
nuraft::ptr<nuraft::buffer> KeeperSnapshotManager<Storage>::deserializeLatestSnapshotBufferFromDisk()
|
||||
{
|
||||
while (!existing_snapshots.empty())
|
||||
{
|
||||
@ -701,7 +713,8 @@ nuraft::ptr<nuraft::buffer> KeeperSnapshotManager::deserializeLatestSnapshotBuff
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
nuraft::ptr<nuraft::buffer> KeeperSnapshotManager::deserializeSnapshotBufferFromDisk(uint64_t up_to_log_idx) const
|
||||
template<typename Storage>
|
||||
nuraft::ptr<nuraft::buffer> KeeperSnapshotManager<Storage>::deserializeSnapshotBufferFromDisk(uint64_t up_to_log_idx) const
|
||||
{
|
||||
const auto & [snapshot_path, snapshot_disk, size] = *existing_snapshots.at(up_to_log_idx);
|
||||
WriteBufferFromNuraftBuffer writer;
|
||||
@ -710,7 +723,8 @@ nuraft::ptr<nuraft::buffer> KeeperSnapshotManager::deserializeSnapshotBufferFrom
|
||||
return writer.getBuffer();
|
||||
}
|
||||
|
||||
nuraft::ptr<nuraft::buffer> KeeperSnapshotManager::serializeSnapshotToBuffer(const KeeperStorageSnapshot & snapshot) const
|
||||
template<typename Storage>
|
||||
nuraft::ptr<nuraft::buffer> KeeperSnapshotManager<Storage>::serializeSnapshotToBuffer(const KeeperStorageSnapshot<Storage> & snapshot) const
|
||||
{
|
||||
std::unique_ptr<WriteBufferFromNuraftBuffer> writer = std::make_unique<WriteBufferFromNuraftBuffer>();
|
||||
auto * buffer_raw_ptr = writer.get();
|
||||
@ -720,13 +734,13 @@ nuraft::ptr<nuraft::buffer> KeeperSnapshotManager::serializeSnapshotToBuffer(con
|
||||
else
|
||||
compressed_writer = std::make_unique<CompressedWriteBuffer>(*writer);
|
||||
|
||||
KeeperStorageSnapshot::serialize(snapshot, *compressed_writer, keeper_context);
|
||||
KeeperStorageSnapshot<Storage>::serialize(snapshot, *compressed_writer, keeper_context);
|
||||
compressed_writer->finalize();
|
||||
return buffer_raw_ptr->getBuffer();
|
||||
}
|
||||
|
||||
|
||||
bool KeeperSnapshotManager::isZstdCompressed(nuraft::ptr<nuraft::buffer> buffer)
|
||||
template<typename Storage>
|
||||
bool KeeperSnapshotManager<Storage>::isZstdCompressed(nuraft::ptr<nuraft::buffer> buffer)
|
||||
{
|
||||
static constexpr unsigned char ZSTD_COMPRESSED_MAGIC[4] = {0x28, 0xB5, 0x2F, 0xFD};
|
||||
|
||||
@ -737,7 +751,8 @@ bool KeeperSnapshotManager::isZstdCompressed(nuraft::ptr<nuraft::buffer> buffer)
|
||||
return memcmp(magic_from_buffer, ZSTD_COMPRESSED_MAGIC, 4) == 0;
|
||||
}
|
||||
|
||||
SnapshotDeserializationResult KeeperSnapshotManager::deserializeSnapshotFromBuffer(nuraft::ptr<nuraft::buffer> buffer) const
|
||||
template<typename Storage>
|
||||
SnapshotDeserializationResult<Storage> KeeperSnapshotManager<Storage>::deserializeSnapshotFromBuffer(nuraft::ptr<nuraft::buffer> buffer) const
|
||||
{
|
||||
bool is_zstd_compressed = isZstdCompressed(buffer);
|
||||
|
||||
@ -749,14 +764,15 @@ SnapshotDeserializationResult KeeperSnapshotManager::deserializeSnapshotFromBuff
|
||||
else
|
||||
compressed_reader = std::make_unique<CompressedReadBuffer>(*reader);
|
||||
|
||||
SnapshotDeserializationResult result;
|
||||
result.storage = std::make_unique<KeeperStorage>(storage_tick_time, superdigest, keeper_context, /* initialize_system_nodes */ false);
|
||||
KeeperStorageSnapshot::deserialize(result, *compressed_reader, keeper_context);
|
||||
SnapshotDeserializationResult<Storage> result;
|
||||
result.storage = std::make_unique<Storage>(storage_tick_time, superdigest, keeper_context, /* initialize_system_nodes */ false);
|
||||
KeeperStorageSnapshot<Storage>::deserialize(result, *compressed_reader, keeper_context);
|
||||
result.storage->initializeSystemNodes();
|
||||
return result;
|
||||
}
|
||||
|
||||
SnapshotDeserializationResult KeeperSnapshotManager::restoreFromLatestSnapshot()
|
||||
template<typename Storage>
|
||||
SnapshotDeserializationResult<Storage> KeeperSnapshotManager<Storage>::restoreFromLatestSnapshot()
|
||||
{
|
||||
if (existing_snapshots.empty())
|
||||
return {};
|
||||
@ -767,23 +783,27 @@ SnapshotDeserializationResult KeeperSnapshotManager::restoreFromLatestSnapshot()
|
||||
return deserializeSnapshotFromBuffer(buffer);
|
||||
}
|
||||
|
||||
DiskPtr KeeperSnapshotManager::getDisk() const
|
||||
template<typename Storage>
|
||||
DiskPtr KeeperSnapshotManager<Storage>::getDisk() const
|
||||
{
|
||||
return keeper_context->getSnapshotDisk();
|
||||
}
|
||||
|
||||
DiskPtr KeeperSnapshotManager::getLatestSnapshotDisk() const
|
||||
template<typename Storage>
|
||||
DiskPtr KeeperSnapshotManager<Storage>::getLatestSnapshotDisk() const
|
||||
{
|
||||
return keeper_context->getLatestSnapshotDisk();
|
||||
}
|
||||
|
||||
void KeeperSnapshotManager::removeOutdatedSnapshotsIfNeeded()
|
||||
template<typename Storage>
|
||||
void KeeperSnapshotManager<Storage>::removeOutdatedSnapshotsIfNeeded()
|
||||
{
|
||||
while (existing_snapshots.size() > snapshots_to_keep)
|
||||
removeSnapshot(existing_snapshots.begin()->first);
|
||||
}
|
||||
|
||||
void KeeperSnapshotManager::moveSnapshotsIfNeeded()
|
||||
template<typename Storage>
|
||||
void KeeperSnapshotManager<Storage>::moveSnapshotsIfNeeded()
|
||||
{
|
||||
/// move snapshots to correct disks
|
||||
|
||||
@ -813,7 +833,8 @@ void KeeperSnapshotManager::moveSnapshotsIfNeeded()
|
||||
|
||||
}
|
||||
|
||||
void KeeperSnapshotManager::removeSnapshot(uint64_t log_idx)
|
||||
template<typename Storage>
|
||||
void KeeperSnapshotManager<Storage>::removeSnapshot(uint64_t log_idx)
|
||||
{
|
||||
auto itr = existing_snapshots.find(log_idx);
|
||||
if (itr == existing_snapshots.end())
|
||||
@ -823,7 +844,8 @@ void KeeperSnapshotManager::removeSnapshot(uint64_t log_idx)
|
||||
existing_snapshots.erase(itr);
|
||||
}
|
||||
|
||||
SnapshotFileInfoPtr KeeperSnapshotManager::serializeSnapshotToDisk(const KeeperStorageSnapshot & snapshot)
|
||||
template<typename Storage>
|
||||
SnapshotFileInfoPtr KeeperSnapshotManager<Storage>::serializeSnapshotToDisk(const KeeperStorageSnapshot<Storage> & snapshot)
|
||||
{
|
||||
auto up_to_log_idx = snapshot.snapshot_meta->get_last_log_idx();
|
||||
auto snapshot_file_name = getSnapshotFileName(up_to_log_idx, compress_snapshots_zstd);
|
||||
@ -842,7 +864,7 @@ SnapshotFileInfoPtr KeeperSnapshotManager::serializeSnapshotToDisk(const KeeperS
|
||||
else
|
||||
compressed_writer = std::make_unique<CompressedWriteBuffer>(*writer);
|
||||
|
||||
KeeperStorageSnapshot::serialize(snapshot, *compressed_writer, keeper_context);
|
||||
KeeperStorageSnapshot<Storage>::serialize(snapshot, *compressed_writer, keeper_context);
|
||||
compressed_writer->finalize();
|
||||
compressed_writer->sync();
|
||||
|
||||
@ -864,14 +886,16 @@ SnapshotFileInfoPtr KeeperSnapshotManager::serializeSnapshotToDisk(const KeeperS
|
||||
return snapshot_file_info;
|
||||
}
|
||||
|
||||
size_t KeeperSnapshotManager::getLatestSnapshotIndex() const
|
||||
template<typename Storage>
|
||||
size_t KeeperSnapshotManager<Storage>::getLatestSnapshotIndex() const
|
||||
{
|
||||
if (!existing_snapshots.empty())
|
||||
return existing_snapshots.rbegin()->first;
|
||||
return 0;
|
||||
}
|
||||
|
||||
SnapshotFileInfoPtr KeeperSnapshotManager::getLatestSnapshotInfo() const
|
||||
template<typename Storage>
|
||||
SnapshotFileInfoPtr KeeperSnapshotManager<Storage>::getLatestSnapshotInfo() const
|
||||
{
|
||||
if (!existing_snapshots.empty())
|
||||
{
|
||||
@ -890,4 +914,10 @@ SnapshotFileInfoPtr KeeperSnapshotManager::getLatestSnapshotInfo() const
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
template struct KeeperStorageSnapshot<KeeperMemoryStorage>;
|
||||
template class KeeperSnapshotManager<KeeperMemoryStorage>;
|
||||
#if USE_ROCKSDB
|
||||
template struct KeeperStorageSnapshot<KeeperRocksStorage>;
|
||||
template class KeeperSnapshotManager<KeeperRocksStorage>;
|
||||
#endif
|
||||
}
|
||||
|
@ -34,10 +34,11 @@ enum SnapshotVersion : uint8_t
|
||||
static constexpr auto CURRENT_SNAPSHOT_VERSION = SnapshotVersion::V6;
|
||||
|
||||
/// What is stored in binary snapshot
|
||||
template<typename Storage>
|
||||
struct SnapshotDeserializationResult
|
||||
{
|
||||
/// Storage
|
||||
KeeperStoragePtr storage;
|
||||
std::unique_ptr<Storage> storage;
|
||||
/// Snapshot metadata (up_to_log_idx and so on)
|
||||
SnapshotMetadataPtr snapshot_meta;
|
||||
/// Cluster config
|
||||
@ -52,21 +53,31 @@ struct SnapshotDeserializationResult
|
||||
///
|
||||
/// This representation of snapshot have to be serialized into NuRaft
|
||||
/// buffer and send over network or saved to file.
|
||||
template<typename Storage>
|
||||
struct KeeperStorageSnapshot
|
||||
{
|
||||
#if USE_ROCKSDB
|
||||
static constexpr bool use_rocksdb = std::is_same_v<Storage, KeeperRocksStorage>;
|
||||
#else
|
||||
static constexpr bool use_rocksdb = false;
|
||||
#endif
|
||||
|
||||
public:
|
||||
KeeperStorageSnapshot(KeeperStorage * storage_, uint64_t up_to_log_idx_, const ClusterConfigPtr & cluster_config_ = nullptr);
|
||||
KeeperStorageSnapshot(Storage * storage_, uint64_t up_to_log_idx_, const ClusterConfigPtr & cluster_config_ = nullptr);
|
||||
|
||||
KeeperStorageSnapshot(
|
||||
KeeperStorage * storage_, const SnapshotMetadataPtr & snapshot_meta_, const ClusterConfigPtr & cluster_config_ = nullptr);
|
||||
Storage * storage_, const SnapshotMetadataPtr & snapshot_meta_, const ClusterConfigPtr & cluster_config_ = nullptr);
|
||||
|
||||
KeeperStorageSnapshot(const KeeperStorageSnapshot<Storage>&) = delete;
|
||||
KeeperStorageSnapshot(KeeperStorageSnapshot<Storage>&&) = default;
|
||||
|
||||
~KeeperStorageSnapshot();
|
||||
|
||||
static void serialize(const KeeperStorageSnapshot & snapshot, WriteBuffer & out, KeeperContextPtr keeper_context);
|
||||
static void serialize(const KeeperStorageSnapshot<Storage> & snapshot, WriteBuffer & out, KeeperContextPtr keeper_context);
|
||||
|
||||
static void deserialize(SnapshotDeserializationResult & deserialization_result, ReadBuffer & in, KeeperContextPtr keeper_context);
|
||||
static void deserialize(SnapshotDeserializationResult<Storage> & deserialization_result, ReadBuffer & in, KeeperContextPtr keeper_context);
|
||||
|
||||
KeeperStorage * storage;
|
||||
Storage * storage;
|
||||
|
||||
SnapshotVersion version = CURRENT_SNAPSHOT_VERSION;
|
||||
/// Snapshot metadata
|
||||
@ -77,11 +88,11 @@ public:
|
||||
/// so we have for loop for (i = 0; i < snapshot_container_size; ++i) { doSmth(begin + i); }
|
||||
size_t snapshot_container_size;
|
||||
/// Iterator to the start of the storage
|
||||
KeeperStorage::Container::const_iterator begin;
|
||||
Storage::Container::const_iterator begin;
|
||||
/// Active sessions and their timeouts
|
||||
SessionAndTimeout session_and_timeout;
|
||||
/// Sessions credentials
|
||||
KeeperStorage::SessionAndAuth session_and_auth;
|
||||
Storage::SessionAndAuth session_and_auth;
|
||||
/// ACLs cache for better performance. Without we cannot deserialize storage.
|
||||
std::unordered_map<uint64_t, Coordination::ACLs> acl_map;
|
||||
/// Cluster config from snapshot, can be empty
|
||||
@ -105,14 +116,16 @@ struct SnapshotFileInfo
|
||||
};
|
||||
|
||||
using SnapshotFileInfoPtr = std::shared_ptr<SnapshotFileInfo>;
|
||||
|
||||
using KeeperStorageSnapshotPtr = std::shared_ptr<KeeperStorageSnapshot>;
|
||||
using CreateSnapshotCallback = std::function<std::shared_ptr<SnapshotFileInfo>(KeeperStorageSnapshotPtr &&, bool)>;
|
||||
|
||||
using SnapshotMetaAndStorage = std::pair<SnapshotMetadataPtr, KeeperStoragePtr>;
|
||||
#if USE_ROCKSDB
|
||||
using KeeperStorageSnapshotPtr = std::variant<std::shared_ptr<KeeperStorageSnapshot<KeeperMemoryStorage>>, std::shared_ptr<KeeperStorageSnapshot<KeeperRocksStorage>>>;
|
||||
#else
|
||||
using KeeperStorageSnapshotPtr = std::variant<std::shared_ptr<KeeperStorageSnapshot<KeeperMemoryStorage>>>;
|
||||
#endif
|
||||
using CreateSnapshotCallback = std::function<SnapshotFileInfoPtr(KeeperStorageSnapshotPtr &&, bool)>;
|
||||
|
||||
/// Class responsible for snapshots serialization and deserialization. Each snapshot
|
||||
/// has it's path on disk and log index.
|
||||
template<typename Storage>
|
||||
class KeeperSnapshotManager
|
||||
{
|
||||
public:
|
||||
@ -124,18 +137,18 @@ public:
|
||||
size_t storage_tick_time_ = 500);
|
||||
|
||||
/// Restore storage from latest available snapshot
|
||||
SnapshotDeserializationResult restoreFromLatestSnapshot();
|
||||
SnapshotDeserializationResult<Storage> restoreFromLatestSnapshot();
|
||||
|
||||
/// Compress snapshot and serialize it to buffer
|
||||
nuraft::ptr<nuraft::buffer> serializeSnapshotToBuffer(const KeeperStorageSnapshot & snapshot) const;
|
||||
nuraft::ptr<nuraft::buffer> serializeSnapshotToBuffer(const KeeperStorageSnapshot<Storage> & snapshot) const;
|
||||
|
||||
/// Serialize already compressed snapshot to disk (return path)
|
||||
SnapshotFileInfoPtr serializeSnapshotBufferToDisk(nuraft::buffer & buffer, uint64_t up_to_log_idx);
|
||||
|
||||
/// Serialize snapshot directly to disk
|
||||
SnapshotFileInfoPtr serializeSnapshotToDisk(const KeeperStorageSnapshot & snapshot);
|
||||
SnapshotFileInfoPtr serializeSnapshotToDisk(const KeeperStorageSnapshot<Storage> & snapshot);
|
||||
|
||||
SnapshotDeserializationResult deserializeSnapshotFromBuffer(nuraft::ptr<nuraft::buffer> buffer) const;
|
||||
SnapshotDeserializationResult<Storage> deserializeSnapshotFromBuffer(nuraft::ptr<nuraft::buffer> buffer) const;
|
||||
|
||||
/// Deserialize snapshot with log index up_to_log_idx from disk into compressed nuraft buffer.
|
||||
nuraft::ptr<nuraft::buffer> deserializeSnapshotBufferFromDisk(uint64_t up_to_log_idx) const;
|
||||
|
@ -44,7 +44,7 @@ namespace ErrorCodes
|
||||
extern const int LOGICAL_ERROR;
|
||||
}
|
||||
|
||||
KeeperStateMachine::KeeperStateMachine(
|
||||
IKeeperStateMachine::IKeeperStateMachine(
|
||||
ResponsesQueue & responses_queue_,
|
||||
SnapshotsQueue & snapshots_queue_,
|
||||
const KeeperContextPtr & keeper_context_,
|
||||
@ -52,12 +52,6 @@ KeeperStateMachine::KeeperStateMachine(
|
||||
CommitCallback commit_callback_,
|
||||
const std::string & superdigest_)
|
||||
: commit_callback(commit_callback_)
|
||||
, snapshot_manager(
|
||||
keeper_context_->getCoordinationSettings()->snapshots_to_keep,
|
||||
keeper_context_,
|
||||
keeper_context_->getCoordinationSettings()->compress_snapshots_with_zstd_format,
|
||||
superdigest_,
|
||||
keeper_context_->getCoordinationSettings()->dead_session_check_period_ms.totalMilliseconds())
|
||||
, responses_queue(responses_queue_)
|
||||
, snapshots_queue(snapshots_queue_)
|
||||
, min_request_size_to_cache(keeper_context_->getCoordinationSettings()->min_request_size_for_cache)
|
||||
@ -68,6 +62,32 @@ KeeperStateMachine::KeeperStateMachine(
|
||||
{
|
||||
}
|
||||
|
||||
template<typename Storage>
|
||||
KeeperStateMachine<Storage>::KeeperStateMachine(
|
||||
ResponsesQueue & responses_queue_,
|
||||
SnapshotsQueue & snapshots_queue_,
|
||||
// const CoordinationSettingsPtr & coordination_settings_,
|
||||
const KeeperContextPtr & keeper_context_,
|
||||
KeeperSnapshotManagerS3 * snapshot_manager_s3_,
|
||||
IKeeperStateMachine::CommitCallback commit_callback_,
|
||||
const std::string & superdigest_)
|
||||
: IKeeperStateMachine(
|
||||
responses_queue_,
|
||||
snapshots_queue_,
|
||||
/// coordination_settings_,
|
||||
keeper_context_,
|
||||
snapshot_manager_s3_,
|
||||
commit_callback_,
|
||||
superdigest_),
|
||||
snapshot_manager(
|
||||
keeper_context_->getCoordinationSettings()->snapshots_to_keep,
|
||||
keeper_context_,
|
||||
keeper_context_->getCoordinationSettings()->compress_snapshots_with_zstd_format,
|
||||
superdigest_,
|
||||
keeper_context_->getCoordinationSettings()->dead_session_check_period_ms.totalMilliseconds())
|
||||
{
|
||||
}
|
||||
|
||||
namespace
|
||||
{
|
||||
|
||||
@ -78,7 +98,8 @@ bool isLocalDisk(const IDisk & disk)
|
||||
|
||||
}
|
||||
|
||||
void KeeperStateMachine::init()
|
||||
template<typename Storage>
|
||||
void KeeperStateMachine<Storage>::init()
|
||||
{
|
||||
/// Do everything without mutexes, no other threads exist.
|
||||
LOG_DEBUG(log, "Totally have {} snapshots", snapshot_manager.totalSnapshots());
|
||||
@ -123,7 +144,7 @@ void KeeperStateMachine::init()
|
||||
LOG_DEBUG(log, "No existing snapshots, last committed log index {}", last_committed_idx);
|
||||
|
||||
if (!storage)
|
||||
storage = std::make_unique<KeeperStorage>(
|
||||
storage = std::make_unique<Storage>(
|
||||
keeper_context->getCoordinationSettings()->dead_session_check_period_ms.totalMilliseconds(), superdigest, keeper_context);
|
||||
}
|
||||
|
||||
@ -131,13 +152,13 @@ namespace
|
||||
{
|
||||
|
||||
void assertDigest(
|
||||
const KeeperStorage::Digest & expected,
|
||||
const KeeperStorage::Digest & actual,
|
||||
const KeeperStorageBase::Digest & expected,
|
||||
const KeeperStorageBase::Digest & actual,
|
||||
const Coordination::ZooKeeperRequest & request,
|
||||
uint64_t log_idx,
|
||||
bool committing)
|
||||
{
|
||||
if (!KeeperStorage::checkDigest(expected, actual))
|
||||
if (!KeeperStorageBase::checkDigest(expected, actual))
|
||||
{
|
||||
LOG_FATAL(
|
||||
getLogger("KeeperStateMachine"),
|
||||
@ -170,7 +191,8 @@ struct TSA_SCOPED_LOCKABLE LockGuardWithStats final
|
||||
|
||||
}
|
||||
|
||||
nuraft::ptr<nuraft::buffer> KeeperStateMachine::pre_commit(uint64_t log_idx, nuraft::buffer & data)
|
||||
template<typename Storage>
|
||||
nuraft::ptr<nuraft::buffer> KeeperStateMachine<Storage>::pre_commit(uint64_t log_idx, nuraft::buffer & data)
|
||||
{
|
||||
auto result = nuraft::buffer::alloc(sizeof(log_idx));
|
||||
nuraft::buffer_serializer ss(result);
|
||||
@ -191,10 +213,10 @@ nuraft::ptr<nuraft::buffer> KeeperStateMachine::pre_commit(uint64_t log_idx, nur
|
||||
return result;
|
||||
}
|
||||
|
||||
std::shared_ptr<KeeperStorage::RequestForSession> KeeperStateMachine::parseRequest(nuraft::buffer & data, bool final, ZooKeeperLogSerializationVersion * serialization_version)
|
||||
std::shared_ptr<KeeperStorageBase::RequestForSession> IKeeperStateMachine::parseRequest(nuraft::buffer & data, bool final, ZooKeeperLogSerializationVersion * serialization_version)
|
||||
{
|
||||
ReadBufferFromNuraftBuffer buffer(data);
|
||||
auto request_for_session = std::make_shared<KeeperStorage::RequestForSession>();
|
||||
auto request_for_session = std::make_shared<KeeperStorageBase::RequestForSession>();
|
||||
readIntBinary(request_for_session->session_id, buffer);
|
||||
|
||||
int32_t length;
|
||||
@ -267,7 +289,7 @@ std::shared_ptr<KeeperStorage::RequestForSession> KeeperStateMachine::parseReque
|
||||
|
||||
request_for_session->digest.emplace();
|
||||
readIntBinary(request_for_session->digest->version, buffer);
|
||||
if (request_for_session->digest->version != KeeperStorage::DigestVersion::NO_DIGEST || !buffer.eof())
|
||||
if (request_for_session->digest->version != KeeperStorageBase::DigestVersion::NO_DIGEST || !buffer.eof())
|
||||
readIntBinary(request_for_session->digest->value, buffer);
|
||||
}
|
||||
|
||||
@ -283,7 +305,8 @@ std::shared_ptr<KeeperStorage::RequestForSession> KeeperStateMachine::parseReque
|
||||
return request_for_session;
|
||||
}
|
||||
|
||||
bool KeeperStateMachine::preprocess(const KeeperStorage::RequestForSession & request_for_session)
|
||||
template<typename Storage>
|
||||
bool KeeperStateMachine<Storage>::preprocess(const KeeperStorageBase::RequestForSession & request_for_session)
|
||||
{
|
||||
const auto op_num = request_for_session.request->getOpNum();
|
||||
if (op_num == Coordination::OpNum::SessionID || op_num == Coordination::OpNum::Reconfig)
|
||||
@ -317,10 +340,11 @@ bool KeeperStateMachine::preprocess(const KeeperStorage::RequestForSession & req
|
||||
return true;
|
||||
}
|
||||
|
||||
void KeeperStateMachine::reconfigure(const KeeperStorage::RequestForSession& request_for_session)
|
||||
template<typename Storage>
|
||||
void KeeperStateMachine<Storage>::reconfigure(const KeeperStorageBase::RequestForSession& request_for_session)
|
||||
{
|
||||
LockGuardWithStats lock(storage_and_responses_lock);
|
||||
KeeperStorage::ResponseForSession response = processReconfiguration(request_for_session);
|
||||
KeeperStorageBase::ResponseForSession response = processReconfiguration(request_for_session);
|
||||
if (!responses_queue.push(response))
|
||||
{
|
||||
ProfileEvents::increment(ProfileEvents::KeeperCommitsFailed);
|
||||
@ -330,8 +354,9 @@ void KeeperStateMachine::reconfigure(const KeeperStorage::RequestForSession& req
|
||||
}
|
||||
}
|
||||
|
||||
KeeperStorage::ResponseForSession KeeperStateMachine::processReconfiguration(
|
||||
const KeeperStorage::RequestForSession & request_for_session)
|
||||
template<typename Storage>
|
||||
KeeperStorageBase::ResponseForSession KeeperStateMachine<Storage>::processReconfiguration(
|
||||
const KeeperStorageBase::RequestForSession & request_for_session)
|
||||
{
|
||||
ProfileEvents::increment(ProfileEvents::KeeperReconfigRequest);
|
||||
|
||||
@ -340,7 +365,7 @@ KeeperStorage::ResponseForSession KeeperStateMachine::processReconfiguration(
|
||||
const int64_t zxid = request_for_session.zxid;
|
||||
|
||||
using enum Coordination::Error;
|
||||
auto bad_request = [&](Coordination::Error code = ZBADARGUMENTS) -> KeeperStorage::ResponseForSession
|
||||
auto bad_request = [&](Coordination::Error code = ZBADARGUMENTS) -> KeeperStorageBase::ResponseForSession
|
||||
{
|
||||
auto res = std::make_shared<Coordination::ZooKeeperReconfigResponse>();
|
||||
res->xid = request.xid;
|
||||
@ -397,7 +422,8 @@ KeeperStorage::ResponseForSession KeeperStateMachine::processReconfiguration(
|
||||
return { session_id, std::move(response) };
|
||||
}
|
||||
|
||||
nuraft::ptr<nuraft::buffer> KeeperStateMachine::commit(const uint64_t log_idx, nuraft::buffer & data)
|
||||
template<typename Storage>
|
||||
nuraft::ptr<nuraft::buffer> KeeperStateMachine<Storage>::commit(const uint64_t log_idx, nuraft::buffer & data)
|
||||
{
|
||||
auto request_for_session = parseRequest(data, true);
|
||||
if (!request_for_session->zxid)
|
||||
@ -408,7 +434,7 @@ nuraft::ptr<nuraft::buffer> KeeperStateMachine::commit(const uint64_t log_idx, n
|
||||
if (!keeper_context->localLogsPreprocessed() && !preprocess(*request_for_session))
|
||||
return nullptr;
|
||||
|
||||
auto try_push = [&](const KeeperStorage::ResponseForSession & response)
|
||||
auto try_push = [&](const KeeperStorageBase::ResponseForSession & response)
|
||||
{
|
||||
if (!responses_queue.push(response))
|
||||
{
|
||||
@ -430,7 +456,7 @@ nuraft::ptr<nuraft::buffer> KeeperStateMachine::commit(const uint64_t log_idx, n
|
||||
std::shared_ptr<Coordination::ZooKeeperSessionIDResponse> response = std::make_shared<Coordination::ZooKeeperSessionIDResponse>();
|
||||
response->internal_id = session_id_request.internal_id;
|
||||
response->server_id = session_id_request.server_id;
|
||||
KeeperStorage::ResponseForSession response_for_session;
|
||||
KeeperStorageBase::ResponseForSession response_for_session;
|
||||
response_for_session.session_id = -1;
|
||||
response_for_session.response = response;
|
||||
response_for_session.request = request_for_session->request;
|
||||
@ -451,7 +477,7 @@ nuraft::ptr<nuraft::buffer> KeeperStateMachine::commit(const uint64_t log_idx, n
|
||||
}
|
||||
|
||||
LockGuardWithStats lock(storage_and_responses_lock);
|
||||
KeeperStorage::ResponsesForSessions responses_for_sessions
|
||||
KeeperStorageBase::ResponsesForSessions responses_for_sessions
|
||||
= storage->processRequest(request_for_session->request, request_for_session->session_id, request_for_session->zxid);
|
||||
|
||||
for (auto & response_for_session : responses_for_sessions)
|
||||
@ -482,7 +508,8 @@ nuraft::ptr<nuraft::buffer> KeeperStateMachine::commit(const uint64_t log_idx, n
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
bool KeeperStateMachine::apply_snapshot(nuraft::snapshot & s)
|
||||
template<typename Storage>
|
||||
bool KeeperStateMachine<Storage>::apply_snapshot(nuraft::snapshot & s)
|
||||
{
|
||||
LOG_DEBUG(log, "Applying snapshot {}", s.get_last_log_idx());
|
||||
nuraft::ptr<nuraft::buffer> latest_snapshot_ptr;
|
||||
@ -509,7 +536,7 @@ bool KeeperStateMachine::apply_snapshot(nuraft::snapshot & s)
|
||||
{ /// deserialize and apply snapshot to storage
|
||||
LockGuardWithStats lock(storage_and_responses_lock);
|
||||
|
||||
SnapshotDeserializationResult snapshot_deserialization_result;
|
||||
SnapshotDeserializationResult<Storage> snapshot_deserialization_result;
|
||||
if (latest_snapshot_ptr)
|
||||
snapshot_deserialization_result = snapshot_manager.deserializeSnapshotFromBuffer(latest_snapshot_ptr);
|
||||
else
|
||||
@ -530,7 +557,7 @@ bool KeeperStateMachine::apply_snapshot(nuraft::snapshot & s)
|
||||
}
|
||||
|
||||
|
||||
void KeeperStateMachine::commit_config(const uint64_t log_idx, nuraft::ptr<nuraft::cluster_config> & new_conf)
|
||||
void IKeeperStateMachine::commit_config(const uint64_t log_idx, nuraft::ptr<nuraft::cluster_config> & new_conf)
|
||||
{
|
||||
std::lock_guard lock(cluster_config_lock);
|
||||
auto tmp = new_conf->serialize();
|
||||
@ -538,7 +565,7 @@ void KeeperStateMachine::commit_config(const uint64_t log_idx, nuraft::ptr<nuraf
|
||||
keeper_context->setLastCommitIndex(log_idx);
|
||||
}
|
||||
|
||||
void KeeperStateMachine::rollback(uint64_t log_idx, nuraft::buffer & data)
|
||||
void IKeeperStateMachine::rollback(uint64_t log_idx, nuraft::buffer & data)
|
||||
{
|
||||
/// Don't rollback anything until the first commit because nothing was preprocessed
|
||||
if (!keeper_context->localLogsPreprocessed())
|
||||
@ -554,7 +581,8 @@ void KeeperStateMachine::rollback(uint64_t log_idx, nuraft::buffer & data)
|
||||
rollbackRequest(*request_for_session, false);
|
||||
}
|
||||
|
||||
void KeeperStateMachine::rollbackRequest(const KeeperStorage::RequestForSession & request_for_session, bool allow_missing)
|
||||
template<typename Storage>
|
||||
void KeeperStateMachine<Storage>::rollbackRequest(const KeeperStorageBase::RequestForSession & request_for_session, bool allow_missing)
|
||||
{
|
||||
if (request_for_session.request->getOpNum() == Coordination::OpNum::SessionID)
|
||||
return;
|
||||
@ -563,7 +591,8 @@ void KeeperStateMachine::rollbackRequest(const KeeperStorage::RequestForSession
|
||||
storage->rollbackRequest(request_for_session.zxid, allow_missing);
|
||||
}
|
||||
|
||||
void KeeperStateMachine::rollbackRequestNoLock(const KeeperStorage::RequestForSession & request_for_session, bool allow_missing)
|
||||
template<typename Storage>
|
||||
void KeeperStateMachine<Storage>::rollbackRequestNoLock(const KeeperStorageBase::RequestForSession & request_for_session, bool allow_missing)
|
||||
{
|
||||
if (request_for_session.request->getOpNum() == Coordination::OpNum::SessionID)
|
||||
return;
|
||||
@ -571,14 +600,15 @@ void KeeperStateMachine::rollbackRequestNoLock(const KeeperStorage::RequestForSe
|
||||
storage->rollbackRequest(request_for_session.zxid, allow_missing);
|
||||
}
|
||||
|
||||
nuraft::ptr<nuraft::snapshot> KeeperStateMachine::last_snapshot()
|
||||
nuraft::ptr<nuraft::snapshot> IKeeperStateMachine::last_snapshot()
|
||||
{
|
||||
/// Just return the latest snapshot.
|
||||
std::lock_guard lock(snapshots_lock);
|
||||
return latest_snapshot_meta;
|
||||
}
|
||||
|
||||
void KeeperStateMachine::create_snapshot(nuraft::snapshot & s, nuraft::async_result<bool>::handler_type & when_done)
|
||||
template<typename Storage>
|
||||
void KeeperStateMachine<Storage>::create_snapshot(nuraft::snapshot & s, nuraft::async_result<bool>::handler_type & when_done)
|
||||
{
|
||||
LOG_DEBUG(log, "Creating snapshot {}", s.get_last_log_idx());
|
||||
|
||||
@ -587,14 +617,15 @@ void KeeperStateMachine::create_snapshot(nuraft::snapshot & s, nuraft::async_res
|
||||
CreateSnapshotTask snapshot_task;
|
||||
{ /// lock storage for a short period time to turn on "snapshot mode". After that we can read consistent storage state without locking.
|
||||
LockGuardWithStats lock(storage_and_responses_lock);
|
||||
snapshot_task.snapshot = std::make_shared<KeeperStorageSnapshot>(storage.get(), snapshot_meta_copy, getClusterConfig());
|
||||
snapshot_task.snapshot = std::make_shared<KeeperStorageSnapshot<Storage>>(storage.get(), snapshot_meta_copy, getClusterConfig());
|
||||
}
|
||||
|
||||
/// create snapshot task for background execution (in snapshot thread)
|
||||
snapshot_task.create_snapshot = [this, when_done](KeeperStorageSnapshotPtr && snapshot, bool execute_only_cleanup)
|
||||
snapshot_task.create_snapshot = [this, when_done](KeeperStorageSnapshotPtr && snapshot_, bool execute_only_cleanup)
|
||||
{
|
||||
nuraft::ptr<std::exception> exception(nullptr);
|
||||
bool ret = false;
|
||||
auto && snapshot = std::get<std::shared_ptr<KeeperStorageSnapshot<Storage>>>(std::move(snapshot_));
|
||||
if (!execute_only_cleanup)
|
||||
{
|
||||
try
|
||||
@ -683,7 +714,8 @@ void KeeperStateMachine::create_snapshot(nuraft::snapshot & s, nuraft::async_res
|
||||
LOG_WARNING(log, "Cannot push snapshot task into queue");
|
||||
}
|
||||
|
||||
void KeeperStateMachine::save_logical_snp_obj(
|
||||
template<typename Storage>
|
||||
void KeeperStateMachine<Storage>::save_logical_snp_obj(
|
||||
nuraft::snapshot & s, uint64_t & obj_id, nuraft::buffer & data, bool /*is_first_obj*/, bool /*is_last_obj*/)
|
||||
{
|
||||
LOG_DEBUG(log, "Saving snapshot {} obj_id {}", s.get_last_log_idx(), obj_id);
|
||||
@ -748,7 +780,7 @@ static int bufferFromFile(LoggerPtr log, const std::string & path, nuraft::ptr<n
|
||||
return 0;
|
||||
}
|
||||
|
||||
int KeeperStateMachine::read_logical_snp_obj(
|
||||
int IKeeperStateMachine::read_logical_snp_obj(
|
||||
nuraft::snapshot & s, void *& /*user_snp_ctx*/, uint64_t obj_id, nuraft::ptr<nuraft::buffer> & data_out, bool & is_last_obj)
|
||||
{
|
||||
LOG_DEBUG(log, "Reading snapshot {} obj_id {}", s.get_last_log_idx(), obj_id);
|
||||
@ -788,7 +820,8 @@ int KeeperStateMachine::read_logical_snp_obj(
|
||||
return 1;
|
||||
}
|
||||
|
||||
void KeeperStateMachine::processReadRequest(const KeeperStorage::RequestForSession & request_for_session)
|
||||
template<typename Storage>
|
||||
void KeeperStateMachine<Storage>::processReadRequest(const KeeperStorageBase::RequestForSession & request_for_session)
|
||||
{
|
||||
/// Pure local request, just process it with storage
|
||||
LockGuardWithStats lock(storage_and_responses_lock);
|
||||
@ -804,103 +837,120 @@ void KeeperStateMachine::processReadRequest(const KeeperStorage::RequestForSessi
|
||||
}
|
||||
}
|
||||
|
||||
void KeeperStateMachine::shutdownStorage()
|
||||
template<typename Storage>
|
||||
void KeeperStateMachine<Storage>::shutdownStorage()
|
||||
{
|
||||
LockGuardWithStats lock(storage_and_responses_lock);
|
||||
storage->finalize();
|
||||
}
|
||||
|
||||
std::vector<int64_t> KeeperStateMachine::getDeadSessions()
|
||||
template<typename Storage>
|
||||
std::vector<int64_t> KeeperStateMachine<Storage>::getDeadSessions()
|
||||
{
|
||||
LockGuardWithStats lock(storage_and_responses_lock);
|
||||
return storage->getDeadSessions();
|
||||
}
|
||||
|
||||
int64_t KeeperStateMachine::getNextZxid() const
|
||||
template<typename Storage>
|
||||
int64_t KeeperStateMachine<Storage>::getNextZxid() const
|
||||
{
|
||||
LockGuardWithStats lock(storage_and_responses_lock);
|
||||
return storage->getNextZXID();
|
||||
}
|
||||
|
||||
KeeperStorage::Digest KeeperStateMachine::getNodesDigest() const
|
||||
template<typename Storage>
|
||||
KeeperStorageBase::Digest KeeperStateMachine<Storage>::getNodesDigest() const
|
||||
{
|
||||
LockGuardWithStats lock(storage_and_responses_lock);
|
||||
return storage->getNodesDigest(false);
|
||||
}
|
||||
|
||||
uint64_t KeeperStateMachine::getLastProcessedZxid() const
|
||||
template<typename Storage>
|
||||
uint64_t KeeperStateMachine<Storage>::getLastProcessedZxid() const
|
||||
{
|
||||
LockGuardWithStats lock(storage_and_responses_lock);
|
||||
return storage->getZXID();
|
||||
}
|
||||
|
||||
uint64_t KeeperStateMachine::getNodesCount() const
|
||||
template<typename Storage>
|
||||
uint64_t KeeperStateMachine<Storage>::getNodesCount() const
|
||||
{
|
||||
LockGuardWithStats lock(storage_and_responses_lock);
|
||||
return storage->getNodesCount();
|
||||
}
|
||||
|
||||
uint64_t KeeperStateMachine::getTotalWatchesCount() const
|
||||
template<typename Storage>
|
||||
uint64_t KeeperStateMachine<Storage>::getTotalWatchesCount() const
|
||||
{
|
||||
LockGuardWithStats lock(storage_and_responses_lock);
|
||||
return storage->getTotalWatchesCount();
|
||||
}
|
||||
|
||||
uint64_t KeeperStateMachine::getWatchedPathsCount() const
|
||||
template<typename Storage>
|
||||
uint64_t KeeperStateMachine<Storage>::getWatchedPathsCount() const
|
||||
{
|
||||
LockGuardWithStats lock(storage_and_responses_lock);
|
||||
return storage->getWatchedPathsCount();
|
||||
}
|
||||
|
||||
uint64_t KeeperStateMachine::getSessionsWithWatchesCount() const
|
||||
template<typename Storage>
|
||||
uint64_t KeeperStateMachine<Storage>::getSessionsWithWatchesCount() const
|
||||
{
|
||||
LockGuardWithStats lock(storage_and_responses_lock);
|
||||
return storage->getSessionsWithWatchesCount();
|
||||
}
|
||||
|
||||
uint64_t KeeperStateMachine::getTotalEphemeralNodesCount() const
|
||||
template<typename Storage>
|
||||
uint64_t KeeperStateMachine<Storage>::getTotalEphemeralNodesCount() const
|
||||
{
|
||||
LockGuardWithStats lock(storage_and_responses_lock);
|
||||
return storage->getTotalEphemeralNodesCount();
|
||||
}
|
||||
|
||||
uint64_t KeeperStateMachine::getSessionWithEphemeralNodesCount() const
|
||||
template<typename Storage>
|
||||
uint64_t KeeperStateMachine<Storage>::getSessionWithEphemeralNodesCount() const
|
||||
{
|
||||
LockGuardWithStats lock(storage_and_responses_lock);
|
||||
return storage->getSessionWithEphemeralNodesCount();
|
||||
}
|
||||
|
||||
void KeeperStateMachine::dumpWatches(WriteBufferFromOwnString & buf) const
|
||||
template<typename Storage>
|
||||
void KeeperStateMachine<Storage>::dumpWatches(WriteBufferFromOwnString & buf) const
|
||||
{
|
||||
LockGuardWithStats lock(storage_and_responses_lock);
|
||||
storage->dumpWatches(buf);
|
||||
}
|
||||
|
||||
void KeeperStateMachine::dumpWatchesByPath(WriteBufferFromOwnString & buf) const
|
||||
template<typename Storage>
|
||||
void KeeperStateMachine<Storage>::dumpWatchesByPath(WriteBufferFromOwnString & buf) const
|
||||
{
|
||||
LockGuardWithStats lock(storage_and_responses_lock);
|
||||
storage->dumpWatchesByPath(buf);
|
||||
}
|
||||
|
||||
void KeeperStateMachine::dumpSessionsAndEphemerals(WriteBufferFromOwnString & buf) const
|
||||
template<typename Storage>
|
||||
void KeeperStateMachine<Storage>::dumpSessionsAndEphemerals(WriteBufferFromOwnString & buf) const
|
||||
{
|
||||
LockGuardWithStats lock(storage_and_responses_lock);
|
||||
storage->dumpSessionsAndEphemerals(buf);
|
||||
}
|
||||
|
||||
uint64_t KeeperStateMachine::getApproximateDataSize() const
|
||||
template<typename Storage>
|
||||
uint64_t KeeperStateMachine<Storage>::getApproximateDataSize() const
|
||||
{
|
||||
LockGuardWithStats lock(storage_and_responses_lock);
|
||||
return storage->getApproximateDataSize();
|
||||
}
|
||||
|
||||
uint64_t KeeperStateMachine::getKeyArenaSize() const
|
||||
template<typename Storage>
|
||||
uint64_t KeeperStateMachine<Storage>::getKeyArenaSize() const
|
||||
{
|
||||
LockGuardWithStats lock(storage_and_responses_lock);
|
||||
return storage->getArenaDataSize();
|
||||
}
|
||||
|
||||
uint64_t KeeperStateMachine::getLatestSnapshotSize() const
|
||||
template<typename Storage>
|
||||
uint64_t KeeperStateMachine<Storage>::getLatestSnapshotSize() const
|
||||
{
|
||||
auto snapshot_info = [&]
|
||||
{
|
||||
@ -923,7 +973,7 @@ uint64_t KeeperStateMachine::getLatestSnapshotSize() const
|
||||
return size;
|
||||
}
|
||||
|
||||
ClusterConfigPtr KeeperStateMachine::getClusterConfig() const
|
||||
ClusterConfigPtr IKeeperStateMachine::getClusterConfig() const
|
||||
{
|
||||
std::lock_guard lock(cluster_config_lock);
|
||||
if (cluster_config)
|
||||
@ -935,11 +985,18 @@ ClusterConfigPtr KeeperStateMachine::getClusterConfig() const
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
void KeeperStateMachine::recalculateStorageStats()
|
||||
template<typename Storage>
|
||||
void KeeperStateMachine<Storage>::recalculateStorageStats()
|
||||
{
|
||||
LockGuardWithStats lock(storage_and_responses_lock);
|
||||
LOG_INFO(log, "Recalculating storage stats");
|
||||
storage->recalculateStats();
|
||||
LOG_INFO(log, "Done recalculating storage stats");
|
||||
}
|
||||
|
||||
template class KeeperStateMachine<KeeperMemoryStorage>;
|
||||
#if USE_ROCKSDB
|
||||
template class KeeperStateMachine<KeeperRocksStorage>;
|
||||
#endif
|
||||
|
||||
}
|
||||
|
@ -11,26 +11,24 @@
|
||||
|
||||
namespace DB
|
||||
{
|
||||
using ResponsesQueue = ConcurrentBoundedQueue<KeeperStorage::ResponseForSession>;
|
||||
using ResponsesQueue = ConcurrentBoundedQueue<KeeperStorageBase::ResponseForSession>;
|
||||
using SnapshotsQueue = ConcurrentBoundedQueue<CreateSnapshotTask>;
|
||||
|
||||
/// ClickHouse Keeper state machine. Wrapper for KeeperStorage.
|
||||
/// Responsible for entries commit, snapshots creation and so on.
|
||||
class KeeperStateMachine : public nuraft::state_machine
|
||||
class IKeeperStateMachine : public nuraft::state_machine
|
||||
{
|
||||
public:
|
||||
using CommitCallback = std::function<void(uint64_t, const KeeperStorage::RequestForSession &)>;
|
||||
using CommitCallback = std::function<void(uint64_t, const KeeperStorageBase::RequestForSession &)>;
|
||||
|
||||
KeeperStateMachine(
|
||||
IKeeperStateMachine(
|
||||
ResponsesQueue & responses_queue_,
|
||||
SnapshotsQueue & snapshots_queue_,
|
||||
const KeeperContextPtr & keeper_context_,
|
||||
KeeperSnapshotManagerS3 * snapshot_manager_s3_,
|
||||
CommitCallback commit_callback_ = {},
|
||||
const std::string & superdigest_ = "");
|
||||
CommitCallback commit_callback_,
|
||||
const std::string & superdigest_);
|
||||
|
||||
/// Read state from the latest snapshot
|
||||
void init();
|
||||
virtual void init() = 0;
|
||||
|
||||
enum ZooKeeperLogSerializationVersion
|
||||
{
|
||||
@ -47,89 +45,66 @@ public:
|
||||
///
|
||||
/// final - whether it's the final time we will fetch the request so we can safely remove it from cache
|
||||
/// serialization_version - information about which fields were parsed from the buffer so we can modify the buffer accordingly
|
||||
std::shared_ptr<KeeperStorage::RequestForSession> parseRequest(nuraft::buffer & data, bool final, ZooKeeperLogSerializationVersion * serialization_version = nullptr);
|
||||
std::shared_ptr<KeeperStorageBase::RequestForSession> parseRequest(nuraft::buffer & data, bool final, ZooKeeperLogSerializationVersion * serialization_version = nullptr);
|
||||
|
||||
bool preprocess(const KeeperStorage::RequestForSession & request_for_session);
|
||||
virtual bool preprocess(const KeeperStorageBase::RequestForSession & request_for_session) = 0;
|
||||
|
||||
nuraft::ptr<nuraft::buffer> pre_commit(uint64_t log_idx, nuraft::buffer & data) override;
|
||||
|
||||
nuraft::ptr<nuraft::buffer> commit(const uint64_t log_idx, nuraft::buffer & data) override; /// NOLINT
|
||||
|
||||
/// Save new cluster config to our snapshot (copy of the config stored in StateManager)
|
||||
void commit_config(const uint64_t log_idx, nuraft::ptr<nuraft::cluster_config> & new_conf) override; /// NOLINT
|
||||
|
||||
void rollback(uint64_t log_idx, nuraft::buffer & data) override;
|
||||
|
||||
// allow_missing - whether the transaction we want to rollback can be missing from storage
|
||||
// (can happen in case of exception during preprocessing)
|
||||
void rollbackRequest(const KeeperStorage::RequestForSession & request_for_session, bool allow_missing);
|
||||
|
||||
void rollbackRequestNoLock(
|
||||
const KeeperStorage::RequestForSession & request_for_session,
|
||||
bool allow_missing) TSA_NO_THREAD_SAFETY_ANALYSIS;
|
||||
virtual void rollbackRequest(const KeeperStorageBase::RequestForSession & request_for_session, bool allow_missing) = 0;
|
||||
|
||||
uint64_t last_commit_index() override { return keeper_context->lastCommittedIndex(); }
|
||||
|
||||
/// Apply preliminarily saved (save_logical_snp_obj) snapshot to our state.
|
||||
bool apply_snapshot(nuraft::snapshot & s) override;
|
||||
|
||||
nuraft::ptr<nuraft::snapshot> last_snapshot() override;
|
||||
|
||||
/// Create new snapshot from current state.
|
||||
void create_snapshot(nuraft::snapshot & s, nuraft::async_result<bool>::handler_type & when_done) override;
|
||||
void create_snapshot(nuraft::snapshot & s, nuraft::async_result<bool>::handler_type & when_done) override = 0;
|
||||
|
||||
/// Save snapshot which was send by leader to us. After that we will apply it in apply_snapshot.
|
||||
void save_logical_snp_obj(nuraft::snapshot & s, uint64_t & obj_id, nuraft::buffer & data, bool is_first_obj, bool is_last_obj) override;
|
||||
void save_logical_snp_obj(nuraft::snapshot & s, uint64_t & obj_id, nuraft::buffer & data, bool is_first_obj, bool is_last_obj) override = 0;
|
||||
|
||||
/// Better name is `serialize snapshot` -- save existing snapshot (created by create_snapshot) into
|
||||
/// in-memory buffer data_out.
|
||||
int read_logical_snp_obj(
|
||||
nuraft::snapshot & s, void *& user_snp_ctx, uint64_t obj_id, nuraft::ptr<nuraft::buffer> & data_out, bool & is_last_obj) override;
|
||||
|
||||
// This should be used only for tests or keeper-data-dumper because it violates
|
||||
// TSA -- we can't acquire the lock outside of this class or return a storage under lock
|
||||
// in a reasonable way.
|
||||
KeeperStorage & getStorageUnsafe() TSA_NO_THREAD_SAFETY_ANALYSIS
|
||||
{
|
||||
return *storage;
|
||||
}
|
||||
|
||||
void shutdownStorage();
|
||||
virtual void shutdownStorage() = 0;
|
||||
|
||||
ClusterConfigPtr getClusterConfig() const;
|
||||
|
||||
/// Process local read request
|
||||
void processReadRequest(const KeeperStorage::RequestForSession & request_for_session);
|
||||
virtual void processReadRequest(const KeeperStorageBase::RequestForSession & request_for_session) = 0;
|
||||
|
||||
std::vector<int64_t> getDeadSessions();
|
||||
virtual std::vector<int64_t> getDeadSessions() = 0;
|
||||
|
||||
int64_t getNextZxid() const;
|
||||
virtual int64_t getNextZxid() const = 0;
|
||||
|
||||
KeeperStorage::Digest getNodesDigest() const;
|
||||
virtual KeeperStorageBase::Digest getNodesDigest() const = 0;
|
||||
|
||||
/// Introspection functions for 4lw commands
|
||||
uint64_t getLastProcessedZxid() const;
|
||||
virtual uint64_t getLastProcessedZxid() const = 0;
|
||||
|
||||
uint64_t getNodesCount() const;
|
||||
uint64_t getTotalWatchesCount() const;
|
||||
uint64_t getWatchedPathsCount() const;
|
||||
uint64_t getSessionsWithWatchesCount() const;
|
||||
virtual uint64_t getNodesCount() const = 0;
|
||||
virtual uint64_t getTotalWatchesCount() const = 0;
|
||||
virtual uint64_t getWatchedPathsCount() const = 0;
|
||||
virtual uint64_t getSessionsWithWatchesCount() const = 0;
|
||||
|
||||
void dumpWatches(WriteBufferFromOwnString & buf) const;
|
||||
void dumpWatchesByPath(WriteBufferFromOwnString & buf) const;
|
||||
void dumpSessionsAndEphemerals(WriteBufferFromOwnString & buf) const;
|
||||
virtual void dumpWatches(WriteBufferFromOwnString & buf) const = 0;
|
||||
virtual void dumpWatchesByPath(WriteBufferFromOwnString & buf) const = 0;
|
||||
virtual void dumpSessionsAndEphemerals(WriteBufferFromOwnString & buf) const = 0;
|
||||
|
||||
uint64_t getSessionWithEphemeralNodesCount() const;
|
||||
uint64_t getTotalEphemeralNodesCount() const;
|
||||
uint64_t getApproximateDataSize() const;
|
||||
uint64_t getKeyArenaSize() const;
|
||||
uint64_t getLatestSnapshotSize() const;
|
||||
virtual uint64_t getSessionWithEphemeralNodesCount() const = 0;
|
||||
virtual uint64_t getTotalEphemeralNodesCount() const = 0;
|
||||
virtual uint64_t getApproximateDataSize() const = 0;
|
||||
virtual uint64_t getKeyArenaSize() const = 0;
|
||||
virtual uint64_t getLatestSnapshotSize() const = 0;
|
||||
|
||||
void recalculateStorageStats();
|
||||
virtual void recalculateStorageStats() = 0;
|
||||
|
||||
void reconfigure(const KeeperStorage::RequestForSession& request_for_session);
|
||||
virtual void reconfigure(const KeeperStorageBase::RequestForSession& request_for_session) = 0;
|
||||
|
||||
private:
|
||||
protected:
|
||||
CommitCallback commit_callback;
|
||||
/// In our state machine we always have a single snapshot which is stored
|
||||
/// in memory in compressed (serialized) format.
|
||||
@ -137,12 +112,9 @@ private:
|
||||
std::shared_ptr<SnapshotFileInfo> latest_snapshot_info;
|
||||
nuraft::ptr<nuraft::buffer> latest_snapshot_buf = nullptr;
|
||||
|
||||
/// Main state machine logic
|
||||
KeeperStoragePtr storage TSA_PT_GUARDED_BY(storage_and_responses_lock);
|
||||
CoordinationSettingsPtr coordination_settings;
|
||||
|
||||
/// Save/Load and Serialize/Deserialize logic for snapshots.
|
||||
KeeperSnapshotManager snapshot_manager;
|
||||
|
||||
/// Put processed responses into this queue
|
||||
ResponsesQueue & responses_queue;
|
||||
|
||||
@ -159,7 +131,7 @@ private:
|
||||
/// for request.
|
||||
mutable std::mutex storage_and_responses_lock;
|
||||
|
||||
std::unordered_map<int64_t, std::unordered_map<Coordination::XID, std::shared_ptr<KeeperStorage::RequestForSession>>> parsed_request_cache;
|
||||
std::unordered_map<int64_t, std::unordered_map<Coordination::XID, std::shared_ptr<KeeperStorageBase::RequestForSession>>> parsed_request_cache;
|
||||
uint64_t min_request_size_to_cache{0};
|
||||
/// we only need to protect the access to the map itself
|
||||
/// requests can be modified from anywhere without lock because a single request
|
||||
@ -181,7 +153,104 @@ private:
|
||||
|
||||
KeeperSnapshotManagerS3 * snapshot_manager_s3;
|
||||
|
||||
KeeperStorage::ResponseForSession processReconfiguration(const KeeperStorage::RequestForSession & request_for_session)
|
||||
TSA_REQUIRES(storage_and_responses_lock);
|
||||
virtual KeeperStorageBase::ResponseForSession processReconfiguration(
|
||||
const KeeperStorageBase::RequestForSession& request_for_session)
|
||||
TSA_REQUIRES(storage_and_responses_lock) = 0;
|
||||
|
||||
};
|
||||
|
||||
/// ClickHouse Keeper state machine. Wrapper for KeeperStorage.
|
||||
/// Responsible for entries commit, snapshots creation and so on.
|
||||
template<typename Storage>
|
||||
class KeeperStateMachine : public IKeeperStateMachine
|
||||
{
|
||||
public:
|
||||
/// using CommitCallback = std::function<void(uint64_t, const KeeperStorage::RequestForSession &)>;
|
||||
|
||||
KeeperStateMachine(
|
||||
ResponsesQueue & responses_queue_,
|
||||
SnapshotsQueue & snapshots_queue_,
|
||||
/// const CoordinationSettingsPtr & coordination_settings_,
|
||||
const KeeperContextPtr & keeper_context_,
|
||||
KeeperSnapshotManagerS3 * snapshot_manager_s3_,
|
||||
CommitCallback commit_callback_ = {},
|
||||
const std::string & superdigest_ = "");
|
||||
|
||||
/// Read state from the latest snapshot
|
||||
void init() override;
|
||||
|
||||
bool preprocess(const KeeperStorageBase::RequestForSession & request_for_session) override;
|
||||
|
||||
nuraft::ptr<nuraft::buffer> pre_commit(uint64_t log_idx, nuraft::buffer & data) override;
|
||||
|
||||
nuraft::ptr<nuraft::buffer> commit(const uint64_t log_idx, nuraft::buffer & data) override; /// NOLINT
|
||||
|
||||
// allow_missing - whether the transaction we want to rollback can be missing from storage
|
||||
// (can happen in case of exception during preprocessing)
|
||||
void rollbackRequest(const KeeperStorageBase::RequestForSession & request_for_session, bool allow_missing) override;
|
||||
|
||||
void rollbackRequestNoLock(
|
||||
const KeeperStorageBase::RequestForSession & request_for_session,
|
||||
bool allow_missing) TSA_NO_THREAD_SAFETY_ANALYSIS;
|
||||
|
||||
/// Apply preliminarily saved (save_logical_snp_obj) snapshot to our state.
|
||||
bool apply_snapshot(nuraft::snapshot & s) override;
|
||||
|
||||
/// Create new snapshot from current state.
|
||||
void create_snapshot(nuraft::snapshot & s, nuraft::async_result<bool>::handler_type & when_done) override;
|
||||
|
||||
/// Save snapshot which was send by leader to us. After that we will apply it in apply_snapshot.
|
||||
void save_logical_snp_obj(nuraft::snapshot & s, uint64_t & obj_id, nuraft::buffer & data, bool is_first_obj, bool is_last_obj) override;
|
||||
|
||||
// This should be used only for tests or keeper-data-dumper because it violates
|
||||
// TSA -- we can't acquire the lock outside of this class or return a storage under lock
|
||||
// in a reasonable way.
|
||||
Storage & getStorageUnsafe() TSA_NO_THREAD_SAFETY_ANALYSIS
|
||||
{
|
||||
return *storage;
|
||||
}
|
||||
|
||||
void shutdownStorage() override;
|
||||
|
||||
/// Process local read request
|
||||
void processReadRequest(const KeeperStorageBase::RequestForSession & request_for_session) override;
|
||||
|
||||
std::vector<int64_t> getDeadSessions() override;
|
||||
|
||||
int64_t getNextZxid() const override;
|
||||
|
||||
KeeperStorageBase::Digest getNodesDigest() const override;
|
||||
|
||||
/// Introspection functions for 4lw commands
|
||||
uint64_t getLastProcessedZxid() const override;
|
||||
|
||||
uint64_t getNodesCount() const override;
|
||||
uint64_t getTotalWatchesCount() const override;
|
||||
uint64_t getWatchedPathsCount() const override;
|
||||
uint64_t getSessionsWithWatchesCount() const override;
|
||||
|
||||
void dumpWatches(WriteBufferFromOwnString & buf) const override;
|
||||
void dumpWatchesByPath(WriteBufferFromOwnString & buf) const override;
|
||||
void dumpSessionsAndEphemerals(WriteBufferFromOwnString & buf) const override;
|
||||
|
||||
uint64_t getSessionWithEphemeralNodesCount() const override;
|
||||
uint64_t getTotalEphemeralNodesCount() const override;
|
||||
uint64_t getApproximateDataSize() const override;
|
||||
uint64_t getKeyArenaSize() const override;
|
||||
uint64_t getLatestSnapshotSize() const override;
|
||||
|
||||
void recalculateStorageStats() override;
|
||||
|
||||
void reconfigure(const KeeperStorageBase::RequestForSession& request_for_session) override;
|
||||
|
||||
private:
|
||||
/// Main state machine logic
|
||||
std::unique_ptr<Storage> storage; //TSA_PT_GUARDED_BY(storage_and_responses_lock);
|
||||
|
||||
/// Save/Load and Serialize/Deserialize logic for snapshots.
|
||||
KeeperSnapshotManager<Storage> snapshot_manager;
|
||||
|
||||
KeeperStorageBase::ResponseForSession processReconfiguration(const KeeperStorageBase::RequestForSession & request_for_session)
|
||||
TSA_REQUIRES(storage_and_responses_lock) override;
|
||||
};
|
||||
}
|
||||
|
File diff suppressed because it is too large
Load Diff
@ -8,188 +8,384 @@
|
||||
|
||||
#include <absl/container/flat_hash_set.h>
|
||||
|
||||
#include "config.h"
|
||||
#if USE_ROCKSDB
|
||||
#include <Coordination/RocksDBContainer.h>
|
||||
#endif
|
||||
|
||||
namespace DB
|
||||
{
|
||||
|
||||
class KeeperContext;
|
||||
using KeeperContextPtr = std::shared_ptr<KeeperContext>;
|
||||
|
||||
struct KeeperStorageRequestProcessor;
|
||||
using KeeperStorageRequestProcessorPtr = std::shared_ptr<KeeperStorageRequestProcessor>;
|
||||
using ResponseCallback = std::function<void(const Coordination::ZooKeeperResponsePtr &)>;
|
||||
using ChildrenSet = absl::flat_hash_set<StringRef, StringRefHash>;
|
||||
using SessionAndTimeout = std::unordered_map<int64_t, int64_t>;
|
||||
|
||||
struct KeeperStorageSnapshot;
|
||||
|
||||
/// Keeper state machine almost equal to the ZooKeeper's state machine.
|
||||
/// Implements all logic of operations, data changes, sessions allocation.
|
||||
/// In-memory and not thread safe.
|
||||
class KeeperStorage
|
||||
/// KeeperRocksNodeInfo is used in RocksDB keeper.
|
||||
/// It is serialized directly as POD to RocksDB.
|
||||
struct KeeperRocksNodeInfo
|
||||
{
|
||||
public:
|
||||
/// Node should have as minimal size as possible to reduce memory footprint
|
||||
/// of stored nodes
|
||||
/// New fields should be added to the struct only if it's really necessary
|
||||
struct Node
|
||||
int64_t czxid{0};
|
||||
int64_t mzxid{0};
|
||||
int64_t pzxid{0};
|
||||
uint64_t acl_id = 0; /// 0 -- no ACL by default
|
||||
|
||||
int64_t mtime{0};
|
||||
|
||||
int32_t version{0};
|
||||
int32_t cversion{0};
|
||||
int32_t aversion{0};
|
||||
|
||||
int32_t seq_num = 0;
|
||||
mutable UInt64 digest = 0; /// we cached digest for this node.
|
||||
|
||||
/// as ctime can't be negative because it stores the timestamp when the
|
||||
/// node was created, we can use the MSB for a bool
|
||||
struct
|
||||
{
|
||||
int64_t czxid{0};
|
||||
int64_t mzxid{0};
|
||||
int64_t pzxid{0};
|
||||
uint64_t acl_id = 0; /// 0 -- no ACL by default
|
||||
bool is_ephemeral : 1;
|
||||
int64_t ctime : 63;
|
||||
} is_ephemeral_and_ctime{false, 0};
|
||||
|
||||
int64_t mtime{0};
|
||||
|
||||
std::unique_ptr<char[]> data{nullptr};
|
||||
uint32_t data_size{0};
|
||||
|
||||
int32_t version{0};
|
||||
int32_t cversion{0};
|
||||
int32_t aversion{0};
|
||||
|
||||
mutable uint64_t cached_digest = 0;
|
||||
|
||||
Node() = default;
|
||||
|
||||
Node & operator=(const Node & other);
|
||||
Node(const Node & other);
|
||||
|
||||
Node & operator=(Node && other) noexcept;
|
||||
Node(Node && other) noexcept;
|
||||
|
||||
bool empty() const;
|
||||
|
||||
bool isEphemeral() const
|
||||
{
|
||||
return is_ephemeral_and_ctime.is_ephemeral;
|
||||
}
|
||||
|
||||
int64_t ephemeralOwner() const
|
||||
{
|
||||
if (isEphemeral())
|
||||
return ephemeral_or_children_data.ephemeral_owner;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
void setEphemeralOwner(int64_t ephemeral_owner)
|
||||
{
|
||||
is_ephemeral_and_ctime.is_ephemeral = ephemeral_owner != 0;
|
||||
ephemeral_or_children_data.ephemeral_owner = ephemeral_owner;
|
||||
}
|
||||
|
||||
int32_t numChildren() const
|
||||
{
|
||||
if (isEphemeral())
|
||||
return 0;
|
||||
|
||||
return ephemeral_or_children_data.children_info.num_children;
|
||||
}
|
||||
|
||||
void setNumChildren(int32_t num_children)
|
||||
{
|
||||
ephemeral_or_children_data.children_info.num_children = num_children;
|
||||
}
|
||||
|
||||
void increaseNumChildren()
|
||||
{
|
||||
chassert(!isEphemeral());
|
||||
++ephemeral_or_children_data.children_info.num_children;
|
||||
}
|
||||
|
||||
void decreaseNumChildren()
|
||||
{
|
||||
chassert(!isEphemeral());
|
||||
--ephemeral_or_children_data.children_info.num_children;
|
||||
}
|
||||
|
||||
int32_t seqNum() const
|
||||
{
|
||||
if (isEphemeral())
|
||||
return 0;
|
||||
|
||||
return ephemeral_or_children_data.children_info.seq_num;
|
||||
}
|
||||
|
||||
void setSeqNum(int32_t seq_num)
|
||||
{
|
||||
ephemeral_or_children_data.children_info.seq_num = seq_num;
|
||||
}
|
||||
|
||||
void increaseSeqNum()
|
||||
{
|
||||
chassert(!isEphemeral());
|
||||
++ephemeral_or_children_data.children_info.seq_num;
|
||||
}
|
||||
|
||||
int64_t ctime() const
|
||||
{
|
||||
return is_ephemeral_and_ctime.ctime;
|
||||
}
|
||||
|
||||
void setCtime(uint64_t ctime)
|
||||
{
|
||||
is_ephemeral_and_ctime.ctime = ctime;
|
||||
}
|
||||
|
||||
void copyStats(const Coordination::Stat & stat);
|
||||
|
||||
void setResponseStat(Coordination::Stat & response_stat) const;
|
||||
|
||||
/// Object memory size
|
||||
uint64_t sizeInBytes() const;
|
||||
|
||||
void setData(const String & new_data);
|
||||
|
||||
std::string_view getData() const noexcept { return {data.get(), data_size}; }
|
||||
|
||||
void addChild(StringRef child_path);
|
||||
|
||||
void removeChild(StringRef child_path);
|
||||
|
||||
const auto & getChildren() const noexcept { return children; }
|
||||
auto & getChildren() { return children; }
|
||||
|
||||
// Invalidate the calculated digest so it's recalculated again on the next
|
||||
// getDigest call
|
||||
void invalidateDigestCache() const;
|
||||
|
||||
// get the calculated digest of the node
|
||||
UInt64 getDigest(std::string_view path) const;
|
||||
|
||||
// copy only necessary information for preprocessing and digest calculation
|
||||
// (e.g. we don't need to copy list of children)
|
||||
void shallowCopy(const Node & other);
|
||||
private:
|
||||
/// as ctime can't be negative because it stores the timestamp when the
|
||||
/// node was created, we can use the MSB for a bool
|
||||
/// ephemeral notes cannot have children so a node can set either
|
||||
/// ephemeral_owner OR seq_num + num_children
|
||||
union
|
||||
{
|
||||
int64_t ephemeral_owner;
|
||||
struct
|
||||
{
|
||||
bool is_ephemeral : 1;
|
||||
int64_t ctime : 63;
|
||||
} is_ephemeral_and_ctime{false, 0};
|
||||
int32_t seq_num;
|
||||
int32_t num_children;
|
||||
} children_info;
|
||||
} ephemeral_or_children_data{0};
|
||||
|
||||
/// ephemeral notes cannot have children so a node can set either
|
||||
/// ephemeral_owner OR seq_num + num_children
|
||||
union
|
||||
{
|
||||
int64_t ephemeral_owner;
|
||||
struct
|
||||
{
|
||||
int32_t seq_num;
|
||||
int32_t num_children;
|
||||
} children_info;
|
||||
} ephemeral_or_children_data{0};
|
||||
bool isEphemeral() const
|
||||
{
|
||||
return is_ephemeral_and_ctime.is_ephemeral;
|
||||
}
|
||||
|
||||
ChildrenSet children{};
|
||||
};
|
||||
int64_t ephemeralOwner() const
|
||||
{
|
||||
if (isEphemeral())
|
||||
return ephemeral_or_children_data.ephemeral_owner;
|
||||
|
||||
#if !defined(ADDRESS_SANITIZER) && !defined(MEMORY_SANITIZER)
|
||||
static_assert(
|
||||
sizeof(ListNode<Node>) <= 144,
|
||||
"std::list node containing ListNode<Node> is > 160 bytes (sizeof(ListNode<Node>) + 16 bytes for pointers) which will increase "
|
||||
"memory consumption");
|
||||
return 0;
|
||||
}
|
||||
|
||||
void setEphemeralOwner(int64_t ephemeral_owner)
|
||||
{
|
||||
is_ephemeral_and_ctime.is_ephemeral = ephemeral_owner != 0;
|
||||
ephemeral_or_children_data.ephemeral_owner = ephemeral_owner;
|
||||
}
|
||||
|
||||
int32_t numChildren() const
|
||||
{
|
||||
if (isEphemeral())
|
||||
return 0;
|
||||
|
||||
return ephemeral_or_children_data.children_info.num_children;
|
||||
}
|
||||
|
||||
void setNumChildren(int32_t num_children)
|
||||
{
|
||||
ephemeral_or_children_data.children_info.num_children = num_children;
|
||||
}
|
||||
|
||||
/// dummy interface for test
|
||||
void addChild(StringRef) {}
|
||||
auto getChildren() const
|
||||
{
|
||||
return std::vector<int>(numChildren());
|
||||
}
|
||||
|
||||
void increaseNumChildren()
|
||||
{
|
||||
chassert(!isEphemeral());
|
||||
++ephemeral_or_children_data.children_info.num_children;
|
||||
}
|
||||
|
||||
void decreaseNumChildren()
|
||||
{
|
||||
chassert(!isEphemeral());
|
||||
--ephemeral_or_children_data.children_info.num_children;
|
||||
}
|
||||
|
||||
int32_t seqNum() const
|
||||
{
|
||||
if (isEphemeral())
|
||||
return 0;
|
||||
|
||||
return ephemeral_or_children_data.children_info.seq_num;
|
||||
}
|
||||
|
||||
void setSeqNum(int32_t seq_num_)
|
||||
{
|
||||
ephemeral_or_children_data.children_info.seq_num = seq_num_;
|
||||
}
|
||||
|
||||
void increaseSeqNum()
|
||||
{
|
||||
chassert(!isEphemeral());
|
||||
++ephemeral_or_children_data.children_info.seq_num;
|
||||
}
|
||||
|
||||
int64_t ctime() const
|
||||
{
|
||||
return is_ephemeral_and_ctime.ctime;
|
||||
}
|
||||
|
||||
void setCtime(uint64_t ctime)
|
||||
{
|
||||
is_ephemeral_and_ctime.ctime = ctime;
|
||||
}
|
||||
|
||||
void copyStats(const Coordination::Stat & stat);
|
||||
};
|
||||
|
||||
/// KeeperRocksNode is the memory structure used by RocksDB
|
||||
struct KeeperRocksNode : public KeeperRocksNodeInfo
|
||||
{
|
||||
#if USE_ROCKSDB
|
||||
friend struct RocksDBContainer<KeeperRocksNode>;
|
||||
#endif
|
||||
using Meta = KeeperRocksNodeInfo;
|
||||
|
||||
uint64_t size_bytes = 0; // only for compatible, should be deprecated
|
||||
|
||||
uint64_t sizeInBytes() const { return data_size + sizeof(KeeperRocksNodeInfo); }
|
||||
void setData(String new_data)
|
||||
{
|
||||
data_size = static_cast<uint32_t>(new_data.size());
|
||||
if (data_size != 0)
|
||||
{
|
||||
data = std::unique_ptr<char[]>(new char[new_data.size()]);
|
||||
memcpy(data.get(), new_data.data(), data_size);
|
||||
}
|
||||
}
|
||||
|
||||
void shallowCopy(const KeeperRocksNode & other)
|
||||
{
|
||||
czxid = other.czxid;
|
||||
mzxid = other.mzxid;
|
||||
pzxid = other.pzxid;
|
||||
acl_id = other.acl_id; /// 0 -- no ACL by default
|
||||
|
||||
mtime = other.mtime;
|
||||
|
||||
is_ephemeral_and_ctime = other.is_ephemeral_and_ctime;
|
||||
|
||||
ephemeral_or_children_data = other.ephemeral_or_children_data;
|
||||
|
||||
data_size = other.data_size;
|
||||
if (data_size != 0)
|
||||
{
|
||||
data = std::unique_ptr<char[]>(new char[data_size]);
|
||||
memcpy(data.get(), other.data.get(), data_size);
|
||||
}
|
||||
|
||||
version = other.version;
|
||||
cversion = other.cversion;
|
||||
aversion = other.aversion;
|
||||
|
||||
/// cached_digest = other.cached_digest;
|
||||
}
|
||||
void invalidateDigestCache() const;
|
||||
UInt64 getDigest(std::string_view path) const;
|
||||
String getEncodedString();
|
||||
void decodeFromString(const String & buffer_str);
|
||||
void recalculateSize() {}
|
||||
std::string_view getData() const noexcept { return {data.get(), data_size}; }
|
||||
|
||||
void setResponseStat(Coordination::Stat & response_stat) const
|
||||
{
|
||||
response_stat.czxid = czxid;
|
||||
response_stat.mzxid = mzxid;
|
||||
response_stat.ctime = ctime();
|
||||
response_stat.mtime = mtime;
|
||||
response_stat.version = version;
|
||||
response_stat.cversion = cversion;
|
||||
response_stat.aversion = aversion;
|
||||
response_stat.ephemeralOwner = ephemeralOwner();
|
||||
response_stat.dataLength = static_cast<int32_t>(data_size);
|
||||
response_stat.numChildren = numChildren();
|
||||
response_stat.pzxid = pzxid;
|
||||
}
|
||||
|
||||
void reset()
|
||||
{
|
||||
serialized = false;
|
||||
}
|
||||
bool empty() const
|
||||
{
|
||||
return data_size == 0 && mzxid == 0;
|
||||
}
|
||||
std::unique_ptr<char[]> data{nullptr};
|
||||
uint32_t data_size{0};
|
||||
private:
|
||||
bool serialized = false;
|
||||
};
|
||||
|
||||
/// KeeperMemNode should have as minimal size as possible to reduce memory footprint
|
||||
/// of stored nodes
|
||||
/// New fields should be added to the struct only if it's really necessary
|
||||
struct KeeperMemNode
|
||||
{
|
||||
int64_t czxid{0};
|
||||
int64_t mzxid{0};
|
||||
int64_t pzxid{0};
|
||||
uint64_t acl_id = 0; /// 0 -- no ACL by default
|
||||
|
||||
int64_t mtime{0};
|
||||
|
||||
std::unique_ptr<char[]> data{nullptr};
|
||||
uint32_t data_size{0};
|
||||
|
||||
int32_t version{0};
|
||||
int32_t cversion{0};
|
||||
int32_t aversion{0};
|
||||
|
||||
mutable uint64_t cached_digest = 0;
|
||||
|
||||
KeeperMemNode() = default;
|
||||
|
||||
KeeperMemNode & operator=(const KeeperMemNode & other);
|
||||
KeeperMemNode(const KeeperMemNode & other);
|
||||
|
||||
KeeperMemNode & operator=(KeeperMemNode && other) noexcept;
|
||||
KeeperMemNode(KeeperMemNode && other) noexcept;
|
||||
|
||||
bool empty() const;
|
||||
|
||||
bool isEphemeral() const
|
||||
{
|
||||
return is_ephemeral_and_ctime.is_ephemeral;
|
||||
}
|
||||
|
||||
int64_t ephemeralOwner() const
|
||||
{
|
||||
if (isEphemeral())
|
||||
return ephemeral_or_children_data.ephemeral_owner;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
void setEphemeralOwner(int64_t ephemeral_owner)
|
||||
{
|
||||
is_ephemeral_and_ctime.is_ephemeral = ephemeral_owner != 0;
|
||||
ephemeral_or_children_data.ephemeral_owner = ephemeral_owner;
|
||||
}
|
||||
|
||||
int32_t numChildren() const
|
||||
{
|
||||
if (isEphemeral())
|
||||
return 0;
|
||||
|
||||
return ephemeral_or_children_data.children_info.num_children;
|
||||
}
|
||||
|
||||
void setNumChildren(int32_t num_children)
|
||||
{
|
||||
ephemeral_or_children_data.children_info.num_children = num_children;
|
||||
}
|
||||
|
||||
void increaseNumChildren()
|
||||
{
|
||||
chassert(!isEphemeral());
|
||||
++ephemeral_or_children_data.children_info.num_children;
|
||||
}
|
||||
|
||||
void decreaseNumChildren()
|
||||
{
|
||||
chassert(!isEphemeral());
|
||||
--ephemeral_or_children_data.children_info.num_children;
|
||||
}
|
||||
|
||||
int32_t seqNum() const
|
||||
{
|
||||
if (isEphemeral())
|
||||
return 0;
|
||||
|
||||
return ephemeral_or_children_data.children_info.seq_num;
|
||||
}
|
||||
|
||||
void setSeqNum(int32_t seq_num)
|
||||
{
|
||||
ephemeral_or_children_data.children_info.seq_num = seq_num;
|
||||
}
|
||||
|
||||
void increaseSeqNum()
|
||||
{
|
||||
chassert(!isEphemeral());
|
||||
++ephemeral_or_children_data.children_info.seq_num;
|
||||
}
|
||||
|
||||
int64_t ctime() const
|
||||
{
|
||||
return is_ephemeral_and_ctime.ctime;
|
||||
}
|
||||
|
||||
void setCtime(uint64_t ctime)
|
||||
{
|
||||
is_ephemeral_and_ctime.ctime = ctime;
|
||||
}
|
||||
|
||||
void copyStats(const Coordination::Stat & stat);
|
||||
|
||||
void setResponseStat(Coordination::Stat & response_stat) const;
|
||||
|
||||
/// Object memory size
|
||||
uint64_t sizeInBytes() const;
|
||||
|
||||
void setData(const String & new_data);
|
||||
|
||||
std::string_view getData() const noexcept { return {data.get(), data_size}; }
|
||||
|
||||
void addChild(StringRef child_path);
|
||||
|
||||
void removeChild(StringRef child_path);
|
||||
|
||||
const auto & getChildren() const noexcept { return children; }
|
||||
auto & getChildren() { return children; }
|
||||
|
||||
// Invalidate the calculated digest so it's recalculated again on the next
|
||||
// getDigest call
|
||||
void invalidateDigestCache() const;
|
||||
|
||||
// get the calculated digest of the node
|
||||
UInt64 getDigest(std::string_view path) const;
|
||||
|
||||
// copy only necessary information for preprocessing and digest calculation
|
||||
// (e.g. we don't need to copy list of children)
|
||||
void shallowCopy(const KeeperMemNode & other);
|
||||
private:
|
||||
/// as ctime can't be negative because it stores the timestamp when the
|
||||
/// node was created, we can use the MSB for a bool
|
||||
struct
|
||||
{
|
||||
bool is_ephemeral : 1;
|
||||
int64_t ctime : 63;
|
||||
} is_ephemeral_and_ctime{false, 0};
|
||||
|
||||
/// ephemeral notes cannot have children so a node can set either
|
||||
/// ephemeral_owner OR seq_num + num_children
|
||||
union
|
||||
{
|
||||
int64_t ephemeral_owner;
|
||||
struct
|
||||
{
|
||||
int32_t seq_num;
|
||||
int32_t num_children;
|
||||
} children_info;
|
||||
} ephemeral_or_children_data{0};
|
||||
|
||||
ChildrenSet children{};
|
||||
};
|
||||
|
||||
class KeeperStorageBase
|
||||
{
|
||||
public:
|
||||
|
||||
enum DigestVersion : uint8_t
|
||||
{
|
||||
@ -200,7 +396,11 @@ public:
|
||||
V4 = 4 // 0 is not a valid digest value
|
||||
};
|
||||
|
||||
static constexpr auto CURRENT_DIGEST_VERSION = DigestVersion::V4;
|
||||
struct Digest
|
||||
{
|
||||
DigestVersion version{DigestVersion::NO_DIGEST};
|
||||
uint64_t value{0};
|
||||
};
|
||||
|
||||
struct ResponseForSession
|
||||
{
|
||||
@ -210,16 +410,6 @@ public:
|
||||
};
|
||||
using ResponsesForSessions = std::vector<ResponseForSession>;
|
||||
|
||||
struct Digest
|
||||
{
|
||||
DigestVersion version{DigestVersion::NO_DIGEST};
|
||||
uint64_t value{0};
|
||||
};
|
||||
|
||||
static bool checkDigest(const Digest & first, const Digest & second);
|
||||
|
||||
static String generateDigest(const String & userdata);
|
||||
|
||||
struct RequestForSession
|
||||
{
|
||||
int64_t session_id;
|
||||
@ -229,6 +419,7 @@ public:
|
||||
std::optional<Digest> digest;
|
||||
int64_t log_idx{0};
|
||||
};
|
||||
using RequestsForSessions = std::vector<RequestForSession>;
|
||||
|
||||
struct AuthID
|
||||
{
|
||||
@ -238,9 +429,6 @@ public:
|
||||
bool operator==(const AuthID & other) const { return scheme == other.scheme && id == other.id; }
|
||||
};
|
||||
|
||||
using RequestsForSessions = std::vector<RequestForSession>;
|
||||
|
||||
using Container = SnapshotableHashTable<Node>;
|
||||
using Ephemerals = std::unordered_map<int64_t, std::unordered_set<std::string>>;
|
||||
using SessionAndWatcher = std::unordered_map<int64_t, std::unordered_set<std::string>>;
|
||||
using SessionIDs = std::unordered_set<int64_t>;
|
||||
@ -250,6 +438,38 @@ public:
|
||||
using SessionAndAuth = std::unordered_map<int64_t, AuthIDs>;
|
||||
using Watches = std::unordered_map<String /* path, relative of root_path */, SessionIDs>;
|
||||
|
||||
static bool checkDigest(const Digest & first, const Digest & second);
|
||||
|
||||
};
|
||||
|
||||
/// Keeper state machine almost equal to the ZooKeeper's state machine.
|
||||
/// Implements all logic of operations, data changes, sessions allocation.
|
||||
/// In-memory and not thread safe.
|
||||
template<typename Container_>
|
||||
class KeeperStorage : public KeeperStorageBase
|
||||
{
|
||||
public:
|
||||
using Container = Container_;
|
||||
using Node = Container::Node;
|
||||
|
||||
#if !defined(ADDRESS_SANITIZER) && !defined(MEMORY_SANITIZER)
|
||||
static_assert(
|
||||
sizeof(ListNode<Node>) <= 144,
|
||||
"std::list node containing ListNode<Node> is > 160 bytes (sizeof(ListNode<Node>) + 16 bytes for pointers) which will increase "
|
||||
"memory consumption");
|
||||
#endif
|
||||
|
||||
|
||||
#if USE_ROCKSDB
|
||||
static constexpr bool use_rocksdb = std::is_same_v<Container_, RocksDBContainer<KeeperRocksNode>>;
|
||||
#else
|
||||
static constexpr bool use_rocksdb = false;
|
||||
#endif
|
||||
|
||||
static constexpr auto CURRENT_DIGEST_VERSION = DigestVersion::V4;
|
||||
|
||||
static String generateDigest(const String & userdata);
|
||||
|
||||
int64_t session_id_counter{1};
|
||||
|
||||
SessionAndAuth session_and_auth;
|
||||
@ -393,7 +613,7 @@ public:
|
||||
std::unordered_map<std::string, std::list<const Delta *>, Hash, Equal> deltas_for_path;
|
||||
|
||||
std::list<Delta> deltas;
|
||||
KeeperStorage & storage;
|
||||
KeeperStorage<Container> & storage;
|
||||
};
|
||||
|
||||
UncommittedState uncommitted_state{*this};
|
||||
@ -530,10 +750,16 @@ public:
|
||||
/// Set of methods for creating snapshots
|
||||
|
||||
/// Turn on snapshot mode, so data inside Container is not deleted, but replaced with new version.
|
||||
void enableSnapshotMode(size_t up_to_version) { container.enableSnapshotMode(up_to_version); }
|
||||
void enableSnapshotMode(size_t up_to_version)
|
||||
{
|
||||
container.enableSnapshotMode(up_to_version);
|
||||
}
|
||||
|
||||
/// Turn off snapshot mode.
|
||||
void disableSnapshotMode() { container.disableSnapshotMode(); }
|
||||
void disableSnapshotMode()
|
||||
{
|
||||
container.disableSnapshotMode();
|
||||
}
|
||||
|
||||
Container::const_iterator getSnapshotIteratorBegin() const { return container.begin(); }
|
||||
|
||||
@ -572,6 +798,9 @@ private:
|
||||
void addDigest(const Node & node, std::string_view path);
|
||||
};
|
||||
|
||||
using KeeperStoragePtr = std::unique_ptr<KeeperStorage>;
|
||||
using KeeperMemoryStorage = KeeperStorage<SnapshotableHashTable<KeeperMemNode>>;
|
||||
#if USE_ROCKSDB
|
||||
using KeeperRocksStorage = KeeperStorage<RocksDBContainer<KeeperRocksNode>>;
|
||||
#endif
|
||||
|
||||
}
|
||||
|
460
src/Coordination/RocksDBContainer.h
Normal file
460
src/Coordination/RocksDBContainer.h
Normal file
@ -0,0 +1,460 @@
|
||||
#pragma once
|
||||
#include <base/StringRef.h>
|
||||
#include <Coordination/CoordinationSettings.h>
|
||||
#include <Coordination/KeeperContext.h>
|
||||
#include <Common/SipHash.h>
|
||||
#include <Disks/DiskLocal.h>
|
||||
#include <IO/WriteBufferFromString.h>
|
||||
#include <IO/ReadBufferFromString.h>
|
||||
|
||||
#include <rocksdb/convenience.h>
|
||||
#include <rocksdb/options.h>
|
||||
#include <rocksdb/status.h>
|
||||
#include <rocksdb/table.h>
|
||||
#include <rocksdb/snapshot.h>
|
||||
|
||||
namespace DB
|
||||
{
|
||||
|
||||
namespace ErrorCodes
|
||||
{
|
||||
extern const int ROCKSDB_ERROR;
|
||||
extern const int LOGICAL_ERROR;
|
||||
}
|
||||
|
||||
/// The key-value format of rocks db will be
|
||||
/// - key: Int8 (depth of the path) + String (path)
|
||||
/// - value: SizeOf(keeperRocksNodeInfo) (meta of the node) + String (data)
|
||||
|
||||
template <class Node_>
|
||||
struct RocksDBContainer
|
||||
{
|
||||
using Node = Node_;
|
||||
|
||||
private:
|
||||
/// MockNode is only use in test to mock `getChildren()` and `getData()`
|
||||
struct MockNode
|
||||
{
|
||||
std::vector<int> children;
|
||||
std::string data;
|
||||
MockNode(size_t children_num, std::string_view data_)
|
||||
: children(std::vector<int>(children_num)),
|
||||
data(data_)
|
||||
{
|
||||
}
|
||||
|
||||
std::vector<int> getChildren() { return children; }
|
||||
std::string getData() { return data; }
|
||||
};
|
||||
|
||||
UInt16 getKeyDepth(const std::string & key)
|
||||
{
|
||||
UInt16 depth = 0;
|
||||
for (size_t i = 0; i < key.size(); i++)
|
||||
{
|
||||
if (key[i] == '/' && i + 1 != key.size())
|
||||
depth ++;
|
||||
}
|
||||
return depth;
|
||||
}
|
||||
|
||||
std::string getEncodedKey(const std::string & key, bool child_prefix = false)
|
||||
{
|
||||
WriteBufferFromOwnString key_buffer;
|
||||
UInt16 depth = getKeyDepth(key) + (child_prefix ? 1 : 0);
|
||||
writeIntBinary(depth, key_buffer);
|
||||
writeString(key, key_buffer);
|
||||
return key_buffer.str();
|
||||
}
|
||||
|
||||
static std::string_view getDecodedKey(const std::string_view & key)
|
||||
{
|
||||
return std::string_view(key.begin() + 2, key.end());
|
||||
}
|
||||
|
||||
|
||||
struct KVPair
|
||||
{
|
||||
StringRef key;
|
||||
Node value;
|
||||
};
|
||||
|
||||
using ValueUpdater = std::function<void(Node & node)>;
|
||||
|
||||
public:
|
||||
|
||||
/// This is an iterator wrapping rocksdb iterator and the kv result.
|
||||
struct const_iterator
|
||||
{
|
||||
std::shared_ptr<rocksdb::Iterator> iter;
|
||||
|
||||
std::shared_ptr<const KVPair> pair;
|
||||
|
||||
const_iterator() = default;
|
||||
|
||||
explicit const_iterator(std::shared_ptr<KVPair> pair_) : pair(std::move(pair_)) {}
|
||||
|
||||
explicit const_iterator(rocksdb::Iterator * iter_) : iter(iter_)
|
||||
{
|
||||
updatePairFromIter();
|
||||
}
|
||||
|
||||
const KVPair & operator * () const
|
||||
{
|
||||
return *pair;
|
||||
}
|
||||
|
||||
const KVPair * operator->() const
|
||||
{
|
||||
return pair.get();
|
||||
}
|
||||
|
||||
bool operator != (const const_iterator & other) const
|
||||
{
|
||||
return !(*this == other);
|
||||
}
|
||||
|
||||
bool operator == (const const_iterator & other) const
|
||||
{
|
||||
if (pair == nullptr && other == nullptr)
|
||||
return true;
|
||||
if (pair == nullptr || other == nullptr)
|
||||
return false;
|
||||
return pair->key.toView() == other->key.toView() && iter == other.iter;
|
||||
}
|
||||
|
||||
bool operator == (std::nullptr_t) const
|
||||
{
|
||||
return iter == nullptr;
|
||||
}
|
||||
|
||||
bool operator != (std::nullptr_t) const
|
||||
{
|
||||
return iter != nullptr;
|
||||
}
|
||||
|
||||
explicit operator bool() const
|
||||
{
|
||||
return iter != nullptr;
|
||||
}
|
||||
|
||||
const_iterator & operator ++()
|
||||
{
|
||||
iter->Next();
|
||||
updatePairFromIter();
|
||||
return *this;
|
||||
}
|
||||
|
||||
private:
|
||||
void updatePairFromIter()
|
||||
{
|
||||
if (iter && iter->Valid())
|
||||
{
|
||||
auto new_pair = std::make_shared<KVPair>();
|
||||
new_pair->key = StringRef(getDecodedKey(iter->key().ToStringView()));
|
||||
ReadBufferFromOwnString buffer(iter->value().ToStringView());
|
||||
typename Node::Meta & meta = new_pair->value;
|
||||
readPODBinary(meta, buffer);
|
||||
readVarUInt(new_pair->value.data_size, buffer);
|
||||
if (new_pair->value.data_size)
|
||||
{
|
||||
new_pair->value.data = std::unique_ptr<char[]>(new char[new_pair->value.data_size]);
|
||||
buffer.readStrict(new_pair->value.data.get(), new_pair->value.data_size);
|
||||
}
|
||||
pair = new_pair;
|
||||
}
|
||||
else
|
||||
{
|
||||
pair = nullptr;
|
||||
iter = nullptr;
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
bool initialized = false;
|
||||
|
||||
const const_iterator end_ptr;
|
||||
|
||||
void initialize(const KeeperContextPtr & context)
|
||||
{
|
||||
DiskPtr disk = context->getTemporaryRocksDBDisk();
|
||||
if (disk == nullptr)
|
||||
{
|
||||
throw Exception(ErrorCodes::LOGICAL_ERROR, "Cannot get rocksdb disk");
|
||||
}
|
||||
auto options = context->getRocksDBOptions();
|
||||
if (options == nullptr)
|
||||
{
|
||||
throw Exception(ErrorCodes::LOGICAL_ERROR, "Cannot get rocksdb options");
|
||||
}
|
||||
rocksdb_dir = disk->getPath();
|
||||
rocksdb::DB * db;
|
||||
auto status = rocksdb::DB::Open(*options, rocksdb_dir, &db);
|
||||
if (!status.ok())
|
||||
{
|
||||
throw Exception(ErrorCodes::ROCKSDB_ERROR, "Failed to open rocksdb path at: {}: {}",
|
||||
rocksdb_dir, status.ToString());
|
||||
}
|
||||
rocksdb_ptr = std::unique_ptr<rocksdb::DB>(db);
|
||||
write_options.disableWAL = true;
|
||||
initialized = true;
|
||||
}
|
||||
|
||||
~RocksDBContainer()
|
||||
{
|
||||
if (initialized)
|
||||
{
|
||||
rocksdb_ptr->Close();
|
||||
rocksdb_ptr = nullptr;
|
||||
|
||||
std::filesystem::remove_all(rocksdb_dir);
|
||||
}
|
||||
}
|
||||
|
||||
std::vector<std::pair<std::string, Node>> getChildren(const std::string & key_)
|
||||
{
|
||||
rocksdb::ReadOptions read_options;
|
||||
read_options.total_order_seek = true;
|
||||
|
||||
std::string key = key_;
|
||||
if (!key.ends_with('/'))
|
||||
key += '/';
|
||||
size_t len = key.size() + 2;
|
||||
|
||||
auto iter = std::unique_ptr<rocksdb::Iterator>(rocksdb_ptr->NewIterator(read_options));
|
||||
std::string encoded_string = getEncodedKey(key, true);
|
||||
rocksdb::Slice prefix(encoded_string);
|
||||
std::vector<std::pair<std::string, Node>> result;
|
||||
for (iter->Seek(prefix); iter->Valid() && iter->key().starts_with(prefix); iter->Next())
|
||||
{
|
||||
Node node;
|
||||
ReadBufferFromOwnString buffer(iter->value().ToStringView());
|
||||
typename Node::Meta & meta = node;
|
||||
/// We do not read data here
|
||||
readPODBinary(meta, buffer);
|
||||
std::string real_key(iter->key().data() + len, iter->key().size() - len);
|
||||
// std::cout << "real key: " << real_key << std::endl;
|
||||
result.emplace_back(std::move(real_key), std::move(node));
|
||||
}
|
||||
|
||||
return result;
|
||||
}
|
||||
|
||||
bool contains(const std::string & path)
|
||||
{
|
||||
const std::string & encoded_key = getEncodedKey(path);
|
||||
std::string buffer_str;
|
||||
rocksdb::Status status = rocksdb_ptr->Get(rocksdb::ReadOptions(), encoded_key, &buffer_str);
|
||||
if (status.IsNotFound())
|
||||
return false;
|
||||
if (!status.ok())
|
||||
throw Exception(ErrorCodes::ROCKSDB_ERROR, "Got rocksdb error during executing contains. The error message is {}.", status.ToString());
|
||||
return true;
|
||||
}
|
||||
|
||||
const_iterator find(StringRef key_)
|
||||
{
|
||||
/// rocksdb::PinnableSlice slice;
|
||||
const std::string & encoded_key = getEncodedKey(key_.toString());
|
||||
std::string buffer_str;
|
||||
rocksdb::Status status = rocksdb_ptr->Get(rocksdb::ReadOptions(), encoded_key, &buffer_str);
|
||||
if (status.IsNotFound())
|
||||
return end();
|
||||
if (!status.ok())
|
||||
throw Exception(ErrorCodes::ROCKSDB_ERROR, "Got rocksdb error during executing find. The error message is {}.", status.ToString());
|
||||
ReadBufferFromOwnString buffer(buffer_str);
|
||||
auto kv = std::make_shared<KVPair>();
|
||||
kv->key = key_;
|
||||
typename Node::Meta & meta = kv->value;
|
||||
readPODBinary(meta, buffer);
|
||||
/// TODO: Sometimes we don't need to load data.
|
||||
readVarUInt(kv->value.data_size, buffer);
|
||||
if (kv->value.data_size)
|
||||
{
|
||||
kv->value.data = std::unique_ptr<char[]>(new char[kv->value.data_size]);
|
||||
buffer.readStrict(kv->value.data.get(), kv->value.data_size);
|
||||
}
|
||||
return const_iterator(kv);
|
||||
}
|
||||
|
||||
MockNode getValue(StringRef key)
|
||||
{
|
||||
auto it = find(key);
|
||||
chassert(it != end());
|
||||
return MockNode(it->value.numChildren(), it->value.getData());
|
||||
}
|
||||
|
||||
const_iterator updateValue(StringRef key_, ValueUpdater updater)
|
||||
{
|
||||
/// rocksdb::PinnableSlice slice;
|
||||
const std::string & key = key_.toString();
|
||||
const std::string & encoded_key = getEncodedKey(key);
|
||||
std::string buffer_str;
|
||||
rocksdb::Status status = rocksdb_ptr->Get(rocksdb::ReadOptions(), encoded_key, &buffer_str);
|
||||
if (!status.ok())
|
||||
throw Exception(ErrorCodes::ROCKSDB_ERROR, "Got rocksdb error during find. The error message is {}.", status.ToString());
|
||||
auto kv = std::make_shared<KVPair>();
|
||||
kv->key = key_;
|
||||
kv->value.decodeFromString(buffer_str);
|
||||
/// storage->removeDigest(node, key);
|
||||
updater(kv->value);
|
||||
insertOrReplace(key, kv->value);
|
||||
return const_iterator(kv);
|
||||
}
|
||||
|
||||
bool insert(const std::string & key, Node & value)
|
||||
{
|
||||
std::string value_str;
|
||||
const std::string & encoded_key = getEncodedKey(key);
|
||||
rocksdb::Status status = rocksdb_ptr->Get(rocksdb::ReadOptions(), encoded_key, &value_str);
|
||||
if (status.ok())
|
||||
{
|
||||
return false;
|
||||
}
|
||||
else if (status.IsNotFound())
|
||||
{
|
||||
status = rocksdb_ptr->Put(write_options, encoded_key, value.getEncodedString());
|
||||
if (status.ok())
|
||||
{
|
||||
counter++;
|
||||
return true;
|
||||
}
|
||||
}
|
||||
|
||||
throw Exception(ErrorCodes::ROCKSDB_ERROR, "Got rocksdb error during insert. The error message is {}.", status.ToString());
|
||||
}
|
||||
|
||||
void insertOrReplace(const std::string & key, Node & value)
|
||||
{
|
||||
const std::string & encoded_key = getEncodedKey(key);
|
||||
/// storage->addDigest(value, key);
|
||||
std::string value_str;
|
||||
rocksdb::Status status = rocksdb_ptr->Get(rocksdb::ReadOptions(), encoded_key, &value_str);
|
||||
bool increase_counter = false;
|
||||
if (status.IsNotFound())
|
||||
increase_counter = true;
|
||||
else if (!status.ok())
|
||||
throw Exception(ErrorCodes::ROCKSDB_ERROR, "Got rocksdb error during get. The error message is {}.", status.ToString());
|
||||
|
||||
status = rocksdb_ptr->Put(write_options, encoded_key, value.getEncodedString());
|
||||
if (status.ok())
|
||||
counter += increase_counter;
|
||||
else
|
||||
throw Exception(ErrorCodes::ROCKSDB_ERROR, "Got rocksdb error during insert. The error message is {}.", status.ToString());
|
||||
}
|
||||
|
||||
using KeyPtr = std::unique_ptr<char[]>;
|
||||
|
||||
/// To be compatible with SnapshotableHashTable, will remove later;
|
||||
KeyPtr allocateKey(size_t size)
|
||||
{
|
||||
return KeyPtr{new char[size]};
|
||||
}
|
||||
|
||||
void insertOrReplace(KeyPtr key_data, size_t key_size, Node value)
|
||||
{
|
||||
std::string key(key_data.get(), key_size);
|
||||
insertOrReplace(key, value);
|
||||
}
|
||||
|
||||
bool erase(const std::string & key)
|
||||
{
|
||||
/// storage->removeDigest(value, key);
|
||||
const std::string & encoded_key = getEncodedKey(key);
|
||||
|
||||
auto status = rocksdb_ptr->Delete(write_options, encoded_key);
|
||||
if (status.IsNotFound())
|
||||
return false;
|
||||
if (status.ok())
|
||||
{
|
||||
counter--;
|
||||
return true;
|
||||
}
|
||||
throw Exception(ErrorCodes::ROCKSDB_ERROR, "Got rocksdb error during erase. The error message is {}.", status.ToString());
|
||||
}
|
||||
|
||||
void recalculateDataSize() {}
|
||||
void reverse(size_t size_) {(void)size_;}
|
||||
|
||||
uint64_t getApproximateDataSize() const
|
||||
{
|
||||
/// use statistics from rocksdb
|
||||
return counter * sizeof(Node);
|
||||
}
|
||||
|
||||
void enableSnapshotMode(size_t version)
|
||||
{
|
||||
chassert(!snapshot_mode);
|
||||
snapshot_mode = true;
|
||||
snapshot_up_to_version = version;
|
||||
snapshot_size = counter;
|
||||
++current_version;
|
||||
|
||||
snapshot = rocksdb_ptr->GetSnapshot();
|
||||
}
|
||||
|
||||
void disableSnapshotMode()
|
||||
{
|
||||
chassert(snapshot_mode);
|
||||
snapshot_mode = false;
|
||||
rocksdb_ptr->ReleaseSnapshot(snapshot);
|
||||
}
|
||||
|
||||
void clearOutdatedNodes() {}
|
||||
|
||||
std::pair<size_t, size_t> snapshotSizeWithVersion() const
|
||||
{
|
||||
if (!snapshot_mode)
|
||||
return std::make_pair(counter, current_version);
|
||||
else
|
||||
return std::make_pair(snapshot_size, current_version);
|
||||
}
|
||||
|
||||
const_iterator begin() const
|
||||
{
|
||||
rocksdb::ReadOptions read_options;
|
||||
read_options.total_order_seek = true;
|
||||
if (snapshot_mode)
|
||||
read_options.snapshot = snapshot;
|
||||
auto * iter = rocksdb_ptr->NewIterator(read_options);
|
||||
iter->SeekToFirst();
|
||||
return const_iterator(iter);
|
||||
}
|
||||
|
||||
const_iterator end() const
|
||||
{
|
||||
return end_ptr;
|
||||
}
|
||||
|
||||
size_t size() const
|
||||
{
|
||||
return counter;
|
||||
}
|
||||
|
||||
uint64_t getArenaDataSize() const
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
uint64_t keyArenaSize() const
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
private:
|
||||
String rocksdb_dir;
|
||||
|
||||
std::unique_ptr<rocksdb::DB> rocksdb_ptr;
|
||||
rocksdb::WriteOptions write_options;
|
||||
|
||||
const rocksdb::Snapshot * snapshot;
|
||||
|
||||
bool snapshot_mode{false};
|
||||
size_t current_version{0};
|
||||
size_t snapshot_up_to_version{0};
|
||||
size_t snapshot_size{0};
|
||||
size_t counter{0};
|
||||
|
||||
};
|
||||
|
||||
}
|
@ -212,9 +212,9 @@ private:
|
||||
updateDataSize(INSERT_OR_REPLACE, key.size, new_value_size, old_value_size, !snapshot_mode);
|
||||
}
|
||||
|
||||
|
||||
public:
|
||||
|
||||
using Node = V;
|
||||
using iterator = typename List::iterator;
|
||||
using const_iterator = typename List::const_iterator;
|
||||
using ValueUpdater = std::function<void(V & value)>;
|
||||
@ -364,6 +364,7 @@ public:
|
||||
{
|
||||
auto map_it = map.find(key);
|
||||
if (map_it != map.end())
|
||||
/// return std::make_shared<KVPair>(KVPair{map_it->getMapped()->key, map_it->getMapped()->value});
|
||||
return map_it->getMapped();
|
||||
return list.end();
|
||||
}
|
||||
|
@ -43,7 +43,8 @@ void deserializeSnapshotMagic(ReadBuffer & in)
|
||||
throw Exception(ErrorCodes::CORRUPTED_DATA, "Incorrect magic header in file, expected {}, got {}", SNP_HEADER, magic_header);
|
||||
}
|
||||
|
||||
int64_t deserializeSessionAndTimeout(KeeperStorage & storage, ReadBuffer & in)
|
||||
template<typename Storage>
|
||||
int64_t deserializeSessionAndTimeout(Storage & storage, ReadBuffer & in)
|
||||
{
|
||||
int32_t count;
|
||||
Coordination::read(count, in);
|
||||
@ -62,7 +63,8 @@ int64_t deserializeSessionAndTimeout(KeeperStorage & storage, ReadBuffer & in)
|
||||
return max_session_id;
|
||||
}
|
||||
|
||||
void deserializeACLMap(KeeperStorage & storage, ReadBuffer & in)
|
||||
template<typename Storage>
|
||||
void deserializeACLMap(Storage & storage, ReadBuffer & in)
|
||||
{
|
||||
int32_t count;
|
||||
Coordination::read(count, in);
|
||||
@ -90,7 +92,8 @@ void deserializeACLMap(KeeperStorage & storage, ReadBuffer & in)
|
||||
}
|
||||
}
|
||||
|
||||
int64_t deserializeStorageData(KeeperStorage & storage, ReadBuffer & in, LoggerPtr log)
|
||||
template<typename Storage>
|
||||
int64_t deserializeStorageData(Storage & storage, ReadBuffer & in, LoggerPtr log)
|
||||
{
|
||||
int64_t max_zxid = 0;
|
||||
std::string path;
|
||||
@ -98,7 +101,7 @@ int64_t deserializeStorageData(KeeperStorage & storage, ReadBuffer & in, LoggerP
|
||||
size_t count = 0;
|
||||
while (path != "/")
|
||||
{
|
||||
KeeperStorage::Node node{};
|
||||
typename Storage::Node node{};
|
||||
String data;
|
||||
Coordination::read(data, in);
|
||||
node.setData(data);
|
||||
@ -146,14 +149,15 @@ int64_t deserializeStorageData(KeeperStorage & storage, ReadBuffer & in, LoggerP
|
||||
if (itr.key != "/")
|
||||
{
|
||||
auto parent_path = parentNodePath(itr.key);
|
||||
storage.container.updateValue(parent_path, [my_path = itr.key] (KeeperStorage::Node & value) { value.addChild(getBaseNodeName(my_path)); value.increaseNumChildren(); });
|
||||
storage.container.updateValue(parent_path, [my_path = itr.key] (typename Storage::Node & value) { value.addChild(getBaseNodeName(my_path)); value.increaseNumChildren(); });
|
||||
}
|
||||
}
|
||||
|
||||
return max_zxid;
|
||||
}
|
||||
|
||||
void deserializeKeeperStorageFromSnapshot(KeeperStorage & storage, const std::string & snapshot_path, LoggerPtr log)
|
||||
template<typename Storage>
|
||||
void deserializeKeeperStorageFromSnapshot(Storage & storage, const std::string & snapshot_path, LoggerPtr log)
|
||||
{
|
||||
LOG_INFO(log, "Deserializing storage snapshot {}", snapshot_path);
|
||||
int64_t zxid = getZxidFromName(snapshot_path);
|
||||
@ -192,9 +196,11 @@ void deserializeKeeperStorageFromSnapshot(KeeperStorage & storage, const std::st
|
||||
LOG_INFO(log, "Finished, snapshot ZXID {}", storage.zxid);
|
||||
}
|
||||
|
||||
void deserializeKeeperStorageFromSnapshotsDir(KeeperStorage & storage, const std::string & path, LoggerPtr log)
|
||||
namespace fs = std::filesystem;
|
||||
|
||||
template<typename Storage>
|
||||
void deserializeKeeperStorageFromSnapshotsDir(Storage & storage, const std::string & path, LoggerPtr log)
|
||||
{
|
||||
namespace fs = std::filesystem;
|
||||
std::map<int64_t, std::string> existing_snapshots;
|
||||
for (const auto & p : fs::directory_iterator(path))
|
||||
{
|
||||
@ -480,7 +486,8 @@ bool hasErrorsInMultiRequest(Coordination::ZooKeeperRequestPtr request)
|
||||
|
||||
}
|
||||
|
||||
bool deserializeTxn(KeeperStorage & storage, ReadBuffer & in, LoggerPtr /*log*/)
|
||||
template<typename Storage>
|
||||
bool deserializeTxn(Storage & storage, ReadBuffer & in, LoggerPtr /*log*/)
|
||||
{
|
||||
int64_t checksum;
|
||||
Coordination::read(checksum, in);
|
||||
@ -535,7 +542,8 @@ bool deserializeTxn(KeeperStorage & storage, ReadBuffer & in, LoggerPtr /*log*/)
|
||||
return true;
|
||||
}
|
||||
|
||||
void deserializeLogAndApplyToStorage(KeeperStorage & storage, const std::string & log_path, LoggerPtr log)
|
||||
template<typename Storage>
|
||||
void deserializeLogAndApplyToStorage(Storage & storage, const std::string & log_path, LoggerPtr log)
|
||||
{
|
||||
ReadBufferFromFile reader(log_path);
|
||||
|
||||
@ -559,9 +567,9 @@ void deserializeLogAndApplyToStorage(KeeperStorage & storage, const std::string
|
||||
LOG_INFO(log, "Finished {} deserialization, totally read {} records", log_path, counter);
|
||||
}
|
||||
|
||||
void deserializeLogsAndApplyToStorage(KeeperStorage & storage, const std::string & path, LoggerPtr log)
|
||||
template<typename Storage>
|
||||
void deserializeLogsAndApplyToStorage(Storage & storage, const std::string & path, LoggerPtr log)
|
||||
{
|
||||
namespace fs = std::filesystem;
|
||||
std::map<int64_t, std::string> existing_logs;
|
||||
for (const auto & p : fs::directory_iterator(path))
|
||||
{
|
||||
@ -595,4 +603,9 @@ void deserializeLogsAndApplyToStorage(KeeperStorage & storage, const std::string
|
||||
}
|
||||
}
|
||||
|
||||
template void deserializeKeeperStorageFromSnapshot<KeeperMemoryStorage>(KeeperMemoryStorage & storage, const std::string & snapshot_path, LoggerPtr log);
|
||||
template void deserializeKeeperStorageFromSnapshotsDir<KeeperMemoryStorage>(KeeperMemoryStorage & storage, const std::string & path, LoggerPtr log);
|
||||
template void deserializeLogAndApplyToStorage<KeeperMemoryStorage>(KeeperMemoryStorage & storage, const std::string & log_path, LoggerPtr log);
|
||||
template void deserializeLogsAndApplyToStorage<KeeperMemoryStorage>(KeeperMemoryStorage & storage, const std::string & path, LoggerPtr log);
|
||||
|
||||
}
|
||||
|
@ -5,12 +5,16 @@
|
||||
namespace DB
|
||||
{
|
||||
|
||||
void deserializeKeeperStorageFromSnapshot(KeeperStorage & storage, const std::string & snapshot_path, LoggerPtr log);
|
||||
template<typename Storage>
|
||||
void deserializeKeeperStorageFromSnapshot(Storage & storage, const std::string & snapshot_path, LoggerPtr log);
|
||||
|
||||
void deserializeKeeperStorageFromSnapshotsDir(KeeperStorage & storage, const std::string & path, LoggerPtr log);
|
||||
template<typename Storage>
|
||||
void deserializeKeeperStorageFromSnapshotsDir(Storage & storage, const std::string & path, LoggerPtr log);
|
||||
|
||||
void deserializeLogAndApplyToStorage(KeeperStorage & storage, const std::string & log_path, LoggerPtr log);
|
||||
template<typename Storage>
|
||||
void deserializeLogAndApplyToStorage(Storage & storage, const std::string & log_path, LoggerPtr log);
|
||||
|
||||
void deserializeLogsAndApplyToStorage(KeeperStorage & storage, const std::string & path, LoggerPtr log);
|
||||
template<typename Storage>
|
||||
void deserializeLogsAndApplyToStorage(Storage & storage, const std::string & path, LoggerPtr log);
|
||||
|
||||
}
|
||||
|
File diff suppressed because it is too large
Load Diff
@ -47,54 +47,85 @@ bool allArgumentsAreConstants(const ColumnsWithTypeAndName & args)
|
||||
return true;
|
||||
}
|
||||
|
||||
/// Replaces single low cardinality column in a function call by its dictionary
|
||||
/// This can only happen after the arguments have been adapted in IFunctionOverloadResolver::getReturnType
|
||||
/// as it's only possible if there is one low cardinality column and, optionally, const columns
|
||||
ColumnPtr replaceLowCardinalityColumnsByNestedAndGetDictionaryIndexes(
|
||||
ColumnsWithTypeAndName & args, bool can_be_executed_on_default_arguments, size_t input_rows_count)
|
||||
{
|
||||
size_t num_rows = input_rows_count;
|
||||
/// We return the LC indexes so the LC can be reconstructed with the function result
|
||||
ColumnPtr indexes;
|
||||
|
||||
/// Find first LowCardinality column and replace it to nested dictionary.
|
||||
for (auto & column : args)
|
||||
size_t number_low_cardinality_columns = 0;
|
||||
size_t last_low_cardinality = 0;
|
||||
size_t number_const_columns = 0;
|
||||
size_t number_full_columns = 0;
|
||||
|
||||
for (size_t i = 0; i < args.size(); i++)
|
||||
{
|
||||
if (const auto * low_cardinality_column = checkAndGetColumn<ColumnLowCardinality>(column.column.get()))
|
||||
auto const & arg = args[i];
|
||||
if (checkAndGetColumn<ColumnLowCardinality>(arg.column.get()))
|
||||
{
|
||||
/// Single LowCardinality column is supported now.
|
||||
if (indexes)
|
||||
throw Exception(ErrorCodes::LOGICAL_ERROR, "Expected single dictionary argument for function.");
|
||||
|
||||
const auto * low_cardinality_type = checkAndGetDataType<DataTypeLowCardinality>(column.type.get());
|
||||
|
||||
if (!low_cardinality_type)
|
||||
throw Exception(ErrorCodes::LOGICAL_ERROR,
|
||||
"Incompatible type for LowCardinality column: {}",
|
||||
column.type->getName());
|
||||
|
||||
if (can_be_executed_on_default_arguments)
|
||||
{
|
||||
/// Normal case, when function can be executed on values' default.
|
||||
column.column = low_cardinality_column->getDictionary().getNestedColumn();
|
||||
indexes = low_cardinality_column->getIndexesPtr();
|
||||
}
|
||||
else
|
||||
{
|
||||
/// Special case when default value can't be used. Example: 1 % LowCardinality(Int).
|
||||
/// LowCardinality always contains default, so 1 % 0 will throw exception in normal case.
|
||||
auto dict_encoded = low_cardinality_column->getMinimalDictionaryEncodedColumn(0, low_cardinality_column->size());
|
||||
column.column = dict_encoded.dictionary;
|
||||
indexes = dict_encoded.indexes;
|
||||
}
|
||||
|
||||
num_rows = column.column->size();
|
||||
column.type = low_cardinality_type->getDictionaryType();
|
||||
number_low_cardinality_columns++;
|
||||
last_low_cardinality = i;
|
||||
}
|
||||
else if (checkAndGetColumn<ColumnConst>(arg.column.get()))
|
||||
number_const_columns++;
|
||||
else
|
||||
number_full_columns++;
|
||||
}
|
||||
|
||||
/// Change size of constants.
|
||||
if (!number_low_cardinality_columns && !number_const_columns)
|
||||
return nullptr;
|
||||
|
||||
if (number_full_columns > 0 || number_low_cardinality_columns > 1)
|
||||
{
|
||||
/// This should not be possible but currently there are multiple tests in CI failing because of it
|
||||
/// TODO: Fix those cases, then enable this exception
|
||||
#if 0
|
||||
throw Exception(ErrorCodes::LOGICAL_ERROR, "Unexpected low cardinality types found. Low cardinality: {}. Full {}. Const {}",
|
||||
number_low_cardinality_columns, number_full_columns, number_const_columns);
|
||||
#else
|
||||
return nullptr;
|
||||
#endif
|
||||
}
|
||||
else if (number_low_cardinality_columns == 1)
|
||||
{
|
||||
auto & lc_arg = args[last_low_cardinality];
|
||||
|
||||
const auto * low_cardinality_type = checkAndGetDataType<DataTypeLowCardinality>(lc_arg.type.get());
|
||||
if (!low_cardinality_type)
|
||||
throw Exception(ErrorCodes::LOGICAL_ERROR, "Incompatible type for LowCardinality column: {}", lc_arg.type->getName());
|
||||
|
||||
const auto * low_cardinality_column = checkAndGetColumn<ColumnLowCardinality>(lc_arg.column.get());
|
||||
chassert(low_cardinality_column);
|
||||
|
||||
if (can_be_executed_on_default_arguments)
|
||||
{
|
||||
/// Normal case, when function can be executed on values' default.
|
||||
lc_arg.column = low_cardinality_column->getDictionary().getNestedColumn();
|
||||
indexes = low_cardinality_column->getIndexesPtr();
|
||||
}
|
||||
else
|
||||
{
|
||||
/// Special case when default value can't be used. Example: 1 % LowCardinality(Int).
|
||||
/// LowCardinality always contains default, so 1 % 0 will throw exception in normal case.
|
||||
auto dict_encoded = low_cardinality_column->getMinimalDictionaryEncodedColumn(0, low_cardinality_column->size());
|
||||
lc_arg.column = dict_encoded.dictionary;
|
||||
indexes = dict_encoded.indexes;
|
||||
}
|
||||
|
||||
/// The new column will have a different number of rows, normally less but occasionally it might be more (NULL)
|
||||
input_rows_count = lc_arg.column->size();
|
||||
lc_arg.type = low_cardinality_type->getDictionaryType();
|
||||
}
|
||||
|
||||
/// Change size of constants
|
||||
for (auto & column : args)
|
||||
{
|
||||
if (const auto * column_const = checkAndGetColumn<ColumnConst>(column.column.get()))
|
||||
{
|
||||
column.column = ColumnConst::create(recursiveRemoveLowCardinality(column_const->getDataColumnPtr()), num_rows);
|
||||
column.column = ColumnConst::create(recursiveRemoveLowCardinality(column_const->getDataColumnPtr()), input_rows_count);
|
||||
column.type = recursiveRemoveLowCardinality(column.type);
|
||||
}
|
||||
}
|
||||
@ -270,6 +301,8 @@ ColumnPtr IExecutableFunction::executeWithoutSparseColumns(const ColumnsWithType
|
||||
bool can_be_executed_on_default_arguments = canBeExecutedOnDefaultArguments();
|
||||
|
||||
const auto & dictionary_type = res_low_cardinality_type->getDictionaryType();
|
||||
/// The arguments should have been adapted in IFunctionOverloadResolver::getReturnType
|
||||
/// So there is only one low cardinality column (and optionally some const columns) and no full column
|
||||
ColumnPtr indexes = replaceLowCardinalityColumnsByNestedAndGetDictionaryIndexes(
|
||||
columns_without_low_cardinality, can_be_executed_on_default_arguments, input_rows_count);
|
||||
|
||||
|
@ -42,6 +42,10 @@ public:
|
||||
|
||||
bool useDefaultImplementationForNulls() const override { return false; }
|
||||
|
||||
bool useDefaultImplementationForLowCardinalityColumns() const override { return false; }
|
||||
|
||||
bool useDefaultImplementationForSparseColumns() const override { return false; }
|
||||
|
||||
bool isSuitableForConstantFolding() const override { return false; }
|
||||
|
||||
bool isSuitableForShortCircuitArgumentsExecution(const DataTypesWithConstInfo & /*arguments*/) const override { return false; }
|
||||
|
@ -25,8 +25,6 @@ namespace ProfileEvents
|
||||
extern const Event ReadBufferFromS3InitMicroseconds;
|
||||
extern const Event ReadBufferFromS3Bytes;
|
||||
extern const Event ReadBufferFromS3RequestsErrors;
|
||||
extern const Event ReadBufferFromS3ResetSessions;
|
||||
extern const Event ReadBufferFromS3PreservedSessions;
|
||||
extern const Event ReadBufferSeekCancelConnection;
|
||||
extern const Event S3GetObject;
|
||||
extern const Event DiskS3GetObject;
|
||||
|
@ -310,7 +310,7 @@ IColumn::Selector ConcurrentHashJoin::selectDispatchBlock(const Strings & key_co
|
||||
{
|
||||
const auto & key_col = from_block.getByName(key_name).column->convertToFullColumnIfConst();
|
||||
const auto & key_col_no_lc = recursiveRemoveLowCardinality(recursiveRemoveSparse(key_col));
|
||||
key_col_no_lc->updateWeakHash32(hash);
|
||||
hash.update(key_col_no_lc->getWeakHash32());
|
||||
}
|
||||
return hashToSelector(hash, num_shards);
|
||||
}
|
||||
|
@ -121,9 +121,18 @@ String InterpreterShowTablesQuery::getRewrittenQuery()
|
||||
if (query.merges)
|
||||
{
|
||||
WriteBufferFromOwnString rewritten_query;
|
||||
rewritten_query << "SELECT table, database, round((elapsed * (1 / merges.progress)) - merges.elapsed, 2) AS estimate_complete, round(elapsed,2) elapsed, "
|
||||
"round(progress*100, 2) AS progress, is_mutation, formatReadableSize(total_size_bytes_compressed) AS size_compressed, "
|
||||
"formatReadableSize(memory_usage) AS memory_usage FROM system.merges";
|
||||
rewritten_query << R"(
|
||||
SELECT
|
||||
table,
|
||||
database,
|
||||
merges.progress > 0 ? round(merges.elapsed * (1 - merges.progress) / merges.progress, 2) : NULL AS estimate_complete,
|
||||
round(elapsed, 2) AS elapsed,
|
||||
round(progress * 100, 2) AS progress,
|
||||
is_mutation,
|
||||
formatReadableSize(total_size_bytes_compressed) AS size_compressed,
|
||||
formatReadableSize(memory_usage) AS memory_usage
|
||||
FROM system.merges
|
||||
)";
|
||||
|
||||
if (!query.like.empty())
|
||||
{
|
||||
|
@ -554,7 +554,7 @@ static Blocks scatterBlockByHashImpl(const Strings & key_columns_names, const Bl
|
||||
for (const auto & key_name : key_columns_names)
|
||||
{
|
||||
ColumnPtr key_col = materializeColumn(block, key_name);
|
||||
key_col->updateWeakHash32(hash);
|
||||
hash.update(key_col->getWeakHash32());
|
||||
}
|
||||
auto selector = hashToSelector(hash, sharder);
|
||||
|
||||
|
@ -7,7 +7,6 @@
|
||||
#include <Common/FieldVisitorToString.h>
|
||||
#include <Common/KnownObjectNames.h>
|
||||
#include <Common/SipHash.h>
|
||||
#include <Common/typeid_cast.h>
|
||||
#include <IO/Operators.h>
|
||||
#include <IO/WriteBufferFromString.h>
|
||||
#include <IO/WriteHelpers.h>
|
||||
@ -19,9 +18,6 @@
|
||||
#include <Parsers/queryToString.h>
|
||||
#include <Parsers/ASTSetQuery.h>
|
||||
#include <Parsers/FunctionSecretArgumentsFinderAST.h>
|
||||
#include <Core/QualifiedTableName.h>
|
||||
|
||||
#include <boost/algorithm/string.hpp>
|
||||
|
||||
|
||||
using namespace std::literals;
|
||||
@ -632,6 +628,7 @@ void ASTFunction::formatImplWithoutAlias(const FormatSettings & settings, Format
|
||||
settings.ostr << ", ";
|
||||
if (arguments->children[i]->as<ASTSetQuery>())
|
||||
settings.ostr << "SETTINGS ";
|
||||
nested_dont_need_parens.list_element_index = i;
|
||||
arguments->children[i]->formatImpl(settings, state, nested_dont_need_parens);
|
||||
}
|
||||
settings.ostr << (settings.hilite ? hilite_operator : "") << ']' << (settings.hilite ? hilite_none : "");
|
||||
@ -642,12 +639,14 @@ void ASTFunction::formatImplWithoutAlias(const FormatSettings & settings, Format
|
||||
{
|
||||
settings.ostr << (settings.hilite ? hilite_operator : "") << ((frame.need_parens && !alias.empty()) ? "tuple" : "") << '('
|
||||
<< (settings.hilite ? hilite_none : "");
|
||||
|
||||
for (size_t i = 0; i < arguments->children.size(); ++i)
|
||||
{
|
||||
if (i != 0)
|
||||
settings.ostr << ", ";
|
||||
if (arguments->children[i]->as<ASTSetQuery>())
|
||||
settings.ostr << "SETTINGS ";
|
||||
nested_dont_need_parens.list_element_index = i;
|
||||
arguments->children[i]->formatImpl(settings, state, nested_dont_need_parens);
|
||||
}
|
||||
settings.ostr << (settings.hilite ? hilite_operator : "") << ')' << (settings.hilite ? hilite_none : "");
|
||||
@ -663,6 +662,7 @@ void ASTFunction::formatImplWithoutAlias(const FormatSettings & settings, Format
|
||||
settings.ostr << ", ";
|
||||
if (arguments->children[i]->as<ASTSetQuery>())
|
||||
settings.ostr << "SETTINGS ";
|
||||
nested_dont_need_parens.list_element_index = i;
|
||||
arguments->children[i]->formatImpl(settings, state, nested_dont_need_parens);
|
||||
}
|
||||
settings.ostr << (settings.hilite ? hilite_operator : "") << ')' << (settings.hilite ? hilite_none : "");
|
||||
|
@ -745,7 +745,12 @@ void addWithFillStepIfNeeded(QueryPlan & query_plan,
|
||||
{
|
||||
auto & interpolate_node_typed = interpolate_node->as<InterpolateNode &>();
|
||||
|
||||
PlannerActionsVisitor planner_actions_visitor(planner_context);
|
||||
PlannerActionsVisitor planner_actions_visitor(
|
||||
planner_context,
|
||||
/* use_column_identifier_as_action_node_name_, (default value)*/ true,
|
||||
/// Prefer the INPUT to CONSTANT nodes (actions must be non constant)
|
||||
/* always_use_const_column_for_constant_nodes */ false);
|
||||
|
||||
auto expression_to_interpolate_expression_nodes = planner_actions_visitor.visit(*interpolate_actions_dag,
|
||||
interpolate_node_typed.getExpression());
|
||||
if (expression_to_interpolate_expression_nodes.size() != 1)
|
||||
|
@ -487,16 +487,33 @@ public:
|
||||
return node;
|
||||
}
|
||||
|
||||
const ActionsDAG::Node * addConstantIfNecessary(const std::string & node_name, const ColumnWithTypeAndName & column)
|
||||
[[nodiscard]] String addConstantIfNecessary(
|
||||
const std::string & node_name, const ColumnWithTypeAndName & column, bool always_use_const_column_for_constant_nodes)
|
||||
{
|
||||
chassert(column.column != nullptr);
|
||||
auto it = node_name_to_node.find(node_name);
|
||||
if (it != node_name_to_node.end() && (!always_use_const_column_for_constant_nodes || it->second->column))
|
||||
return {node_name};
|
||||
|
||||
if (it != node_name_to_node.end())
|
||||
return it->second;
|
||||
{
|
||||
/// There is a node with this name, but it doesn't have a column
|
||||
/// This likely happens because we executed the query until WithMergeableState with a const node in the
|
||||
/// WHERE clause and, as the results of headers are materialized, the column was removed
|
||||
/// Let's add a new column and keep this
|
||||
String dupped_name{node_name + "_dupped"};
|
||||
if (node_name_to_node.find(dupped_name) != node_name_to_node.end())
|
||||
return dupped_name;
|
||||
|
||||
const auto * node = &actions_dag.addColumn(column);
|
||||
node_name_to_node[dupped_name] = node;
|
||||
return dupped_name;
|
||||
}
|
||||
|
||||
const auto * node = &actions_dag.addColumn(column);
|
||||
node_name_to_node[node->result_name] = node;
|
||||
|
||||
return node;
|
||||
return {node_name};
|
||||
}
|
||||
|
||||
template <typename FunctionOrOverloadResolver>
|
||||
@ -525,7 +542,7 @@ public:
|
||||
}
|
||||
|
||||
private:
|
||||
std::unordered_map<std::string_view, const ActionsDAG::Node *> node_name_to_node;
|
||||
std::unordered_map<String, const ActionsDAG::Node *> node_name_to_node;
|
||||
ActionsDAG & actions_dag;
|
||||
QueryTreeNodePtr scope_node;
|
||||
};
|
||||
@ -533,9 +550,11 @@ private:
|
||||
class PlannerActionsVisitorImpl
|
||||
{
|
||||
public:
|
||||
PlannerActionsVisitorImpl(ActionsDAG & actions_dag,
|
||||
PlannerActionsVisitorImpl(
|
||||
ActionsDAG & actions_dag,
|
||||
const PlannerContextPtr & planner_context_,
|
||||
bool use_column_identifier_as_action_node_name_);
|
||||
bool use_column_identifier_as_action_node_name_,
|
||||
bool always_use_const_column_for_constant_nodes_);
|
||||
|
||||
ActionsDAG::NodeRawConstPtrs visit(QueryTreeNodePtr expression_node);
|
||||
|
||||
@ -595,14 +614,18 @@ private:
|
||||
const PlannerContextPtr planner_context;
|
||||
ActionNodeNameHelper action_node_name_helper;
|
||||
bool use_column_identifier_as_action_node_name;
|
||||
bool always_use_const_column_for_constant_nodes;
|
||||
};
|
||||
|
||||
PlannerActionsVisitorImpl::PlannerActionsVisitorImpl(ActionsDAG & actions_dag,
|
||||
PlannerActionsVisitorImpl::PlannerActionsVisitorImpl(
|
||||
ActionsDAG & actions_dag,
|
||||
const PlannerContextPtr & planner_context_,
|
||||
bool use_column_identifier_as_action_node_name_)
|
||||
bool use_column_identifier_as_action_node_name_,
|
||||
bool always_use_const_column_for_constant_nodes_)
|
||||
: planner_context(planner_context_)
|
||||
, action_node_name_helper(node_to_node_name, *planner_context, use_column_identifier_as_action_node_name_)
|
||||
, use_column_identifier_as_action_node_name(use_column_identifier_as_action_node_name_)
|
||||
, always_use_const_column_for_constant_nodes(always_use_const_column_for_constant_nodes_)
|
||||
{
|
||||
actions_stack.emplace_back(actions_dag, nullptr);
|
||||
}
|
||||
@ -725,17 +748,16 @@ PlannerActionsVisitorImpl::NodeNameAndNodeMinLevel PlannerActionsVisitorImpl::vi
|
||||
column.type = constant_type;
|
||||
column.column = column.type->createColumnConst(1, constant_literal);
|
||||
|
||||
actions_stack[0].addConstantIfNecessary(constant_node_name, column);
|
||||
String final_name = actions_stack[0].addConstantIfNecessary(constant_node_name, column, always_use_const_column_for_constant_nodes);
|
||||
|
||||
size_t actions_stack_size = actions_stack.size();
|
||||
for (size_t i = 1; i < actions_stack_size; ++i)
|
||||
{
|
||||
auto & actions_stack_node = actions_stack[i];
|
||||
actions_stack_node.addInputConstantColumnIfNecessary(constant_node_name, column);
|
||||
actions_stack_node.addInputConstantColumnIfNecessary(final_name, column);
|
||||
}
|
||||
|
||||
return {constant_node_name, Levels(0)};
|
||||
|
||||
return {final_name, Levels(0)};
|
||||
}
|
||||
|
||||
PlannerActionsVisitorImpl::NodeNameAndNodeMinLevel PlannerActionsVisitorImpl::visitLambda(const QueryTreeNodePtr & node)
|
||||
@ -864,16 +886,16 @@ PlannerActionsVisitorImpl::NodeNameAndNodeMinLevel PlannerActionsVisitorImpl::ma
|
||||
else
|
||||
column.column = std::move(column_set);
|
||||
|
||||
actions_stack[0].addConstantIfNecessary(column.name, column);
|
||||
String final_name = actions_stack[0].addConstantIfNecessary(column.name, column, always_use_const_column_for_constant_nodes);
|
||||
|
||||
size_t actions_stack_size = actions_stack.size();
|
||||
for (size_t i = 1; i < actions_stack_size; ++i)
|
||||
{
|
||||
auto & actions_stack_node = actions_stack[i];
|
||||
actions_stack_node.addInputConstantColumnIfNecessary(column.name, column);
|
||||
actions_stack_node.addInputConstantColumnIfNecessary(final_name, column);
|
||||
}
|
||||
|
||||
return {column.name, Levels(0)};
|
||||
return {final_name, Levels(0)};
|
||||
}
|
||||
|
||||
PlannerActionsVisitorImpl::NodeNameAndNodeMinLevel PlannerActionsVisitorImpl::visitIndexHintFunction(const QueryTreeNodePtr & node)
|
||||
@ -1010,14 +1032,19 @@ PlannerActionsVisitorImpl::NodeNameAndNodeMinLevel PlannerActionsVisitorImpl::vi
|
||||
|
||||
}
|
||||
|
||||
PlannerActionsVisitor::PlannerActionsVisitor(const PlannerContextPtr & planner_context_, bool use_column_identifier_as_action_node_name_)
|
||||
PlannerActionsVisitor::PlannerActionsVisitor(
|
||||
const PlannerContextPtr & planner_context_,
|
||||
bool use_column_identifier_as_action_node_name_,
|
||||
bool always_use_const_column_for_constant_nodes_)
|
||||
: planner_context(planner_context_)
|
||||
, use_column_identifier_as_action_node_name(use_column_identifier_as_action_node_name_)
|
||||
, always_use_const_column_for_constant_nodes(always_use_const_column_for_constant_nodes_)
|
||||
{}
|
||||
|
||||
ActionsDAG::NodeRawConstPtrs PlannerActionsVisitor::visit(ActionsDAG & actions_dag, QueryTreeNodePtr expression_node)
|
||||
{
|
||||
PlannerActionsVisitorImpl actions_visitor_impl(actions_dag, planner_context, use_column_identifier_as_action_node_name);
|
||||
PlannerActionsVisitorImpl actions_visitor_impl(
|
||||
actions_dag, planner_context, use_column_identifier_as_action_node_name, always_use_const_column_for_constant_nodes);
|
||||
return actions_visitor_impl.visit(expression_node);
|
||||
}
|
||||
|
||||
|
@ -27,11 +27,17 @@ using PlannerContextPtr = std::shared_ptr<PlannerContext>;
|
||||
* During actions build, there is special handling for following functions:
|
||||
* 1. Aggregate functions are added in actions dag as INPUT nodes. Aggregate functions arguments are not added.
|
||||
* 2. For function `in` and its variants, already collected sets from planner context are used.
|
||||
* 3. When building actions that use CONSTANT nodes, by default we ignore pre-existing INPUTs if those don't have
|
||||
* a column (a const column always has a column). This is for compatibility with previous headers. We disable this
|
||||
* behaviour when we explicitly want to override CONSTANT nodes with the input (resolving InterpolateNode for example)
|
||||
*/
|
||||
class PlannerActionsVisitor
|
||||
{
|
||||
public:
|
||||
explicit PlannerActionsVisitor(const PlannerContextPtr & planner_context_, bool use_column_identifier_as_action_node_name_ = true);
|
||||
explicit PlannerActionsVisitor(
|
||||
const PlannerContextPtr & planner_context_,
|
||||
bool use_column_identifier_as_action_node_name_ = true,
|
||||
bool always_use_const_column_for_constant_nodes_ = true);
|
||||
|
||||
/** Add actions necessary to calculate expression node into expression dag.
|
||||
* Necessary actions are not added in actions dag output.
|
||||
@ -42,6 +48,7 @@ public:
|
||||
private:
|
||||
const PlannerContextPtr planner_context;
|
||||
bool use_column_identifier_as_action_node_name = true;
|
||||
bool always_use_const_column_for_constant_nodes = true;
|
||||
};
|
||||
|
||||
/** Calculate query tree expression node action dag name and add them into node to name map.
|
||||
|
@ -109,7 +109,7 @@ void ScatterByPartitionTransform::generateOutputChunks()
|
||||
hash.reset(num_rows);
|
||||
|
||||
for (const auto & column_number : key_columns)
|
||||
columns[column_number]->updateWeakHash32(hash);
|
||||
hash.update(columns[column_number]->getWeakHash32());
|
||||
|
||||
const auto & hash_data = hash.getData();
|
||||
IColumn::Selector selector(num_rows);
|
||||
|
@ -155,6 +155,10 @@ void printExceptionWithRespectToAbort(LoggerPtr log, const String & query_id)
|
||||
{
|
||||
std::rethrow_exception(ex);
|
||||
}
|
||||
catch (const TestException &) // NOLINT
|
||||
{
|
||||
/// Exception from a unit test, ignore it.
|
||||
}
|
||||
catch (const Exception & e)
|
||||
{
|
||||
NOEXCEPT_SCOPE({
|
||||
|
@ -233,7 +233,7 @@ static bool isConditionGood(const RPNBuilderTreeNode & condition, const NameSet
|
||||
else if (type == Field::Types::Float64)
|
||||
{
|
||||
const auto value = output_value.get<Float64>();
|
||||
return value < threshold || threshold < value;
|
||||
return value < -threshold || threshold < value;
|
||||
}
|
||||
|
||||
return false;
|
||||
|
@ -34,7 +34,7 @@ public:
|
||||
|
||||
auto choice = distribution(generator);
|
||||
if (choice == 0)
|
||||
throw std::runtime_error("Unlucky...");
|
||||
throw TestException();
|
||||
|
||||
return false;
|
||||
}
|
||||
@ -48,7 +48,7 @@ public:
|
||||
{
|
||||
auto choice = distribution(generator);
|
||||
if (choice == 0)
|
||||
throw std::runtime_error("Unlucky...");
|
||||
throw TestException();
|
||||
}
|
||||
|
||||
Priority getPriority() const override { return {}; }
|
||||
|
@ -69,9 +69,7 @@ ASTPtr getASTForExternalDatabaseFromQueryTree(const QueryTreeNodePtr & query_tre
|
||||
bool allow_where = true;
|
||||
if (const auto * join_node = join_tree->as<JoinNode>())
|
||||
{
|
||||
if (join_node->getStrictness() != JoinStrictness::All)
|
||||
allow_where = false;
|
||||
else if (join_node->getKind() == JoinKind::Left)
|
||||
if (join_node->getKind() == JoinKind::Left)
|
||||
allow_where = join_node->getLeftTableExpression()->isEqual(*table_expression);
|
||||
else if (join_node->getKind() == JoinKind::Right)
|
||||
allow_where = join_node->getRightTableExpression()->isEqual(*table_expression);
|
||||
|
@ -3,8 +3,13 @@ import time
|
||||
from pathlib import Path
|
||||
from typing import Optional
|
||||
from shutil import copy2
|
||||
from create_release import PackageDownloader, ReleaseInfo, ShellRunner
|
||||
from ci_utils import WithIter
|
||||
from create_release import (
|
||||
PackageDownloader,
|
||||
ReleaseInfo,
|
||||
ReleaseContextManager,
|
||||
ReleaseProgress,
|
||||
)
|
||||
from ci_utils import WithIter, Shell
|
||||
|
||||
|
||||
class MountPointApp(metaclass=WithIter):
|
||||
@ -76,19 +81,20 @@ class R2MountPoint:
|
||||
)
|
||||
|
||||
_TEST_MOUNT_CMD = f"mount | grep -q {self.MOUNT_POINT}"
|
||||
ShellRunner.run(_CLEAN_LOG_FILE_CMD)
|
||||
ShellRunner.run(_UNMOUNT_CMD)
|
||||
ShellRunner.run(_MKDIR_CMD)
|
||||
ShellRunner.run(_MKDIR_FOR_CACHE)
|
||||
ShellRunner.run(self.mount_cmd, async_=self.async_mount)
|
||||
Shell.run(_CLEAN_LOG_FILE_CMD)
|
||||
Shell.run(_UNMOUNT_CMD)
|
||||
Shell.run(_MKDIR_CMD)
|
||||
Shell.run(_MKDIR_FOR_CACHE)
|
||||
# didn't manage to use simple run() and not block or fail
|
||||
Shell.run_as_daemon(self.mount_cmd)
|
||||
if self.async_mount:
|
||||
time.sleep(3)
|
||||
ShellRunner.run(_TEST_MOUNT_CMD)
|
||||
Shell.run(_TEST_MOUNT_CMD, check=True)
|
||||
|
||||
@classmethod
|
||||
def teardown(cls):
|
||||
print(f"Unmount [{cls.MOUNT_POINT}]")
|
||||
ShellRunner.run(f"umount {cls.MOUNT_POINT}")
|
||||
Shell.run(f"umount {cls.MOUNT_POINT}")
|
||||
|
||||
|
||||
class RepoCodenames(metaclass=WithIter):
|
||||
@ -124,8 +130,8 @@ class DebianArtifactory:
|
||||
cmd = f"{REPREPRO_CMD_PREFIX} includedeb {self.codename} {' '.join(paths)}"
|
||||
print("Running export command:")
|
||||
print(f" {cmd}")
|
||||
ShellRunner.run(cmd)
|
||||
ShellRunner.run("sync")
|
||||
Shell.run(cmd, check=True)
|
||||
Shell.run("sync")
|
||||
|
||||
if self.codename == RepoCodenames.LTS:
|
||||
packages_with_version = [
|
||||
@ -137,16 +143,20 @@ class DebianArtifactory:
|
||||
cmd = f"{REPREPRO_CMD_PREFIX} copy {RepoCodenames.STABLE} {RepoCodenames.LTS} {' '.join(packages_with_version)}"
|
||||
print("Running copy command:")
|
||||
print(f" {cmd}")
|
||||
ShellRunner.run(cmd)
|
||||
ShellRunner.run("sync")
|
||||
Shell.run(cmd, check=True)
|
||||
Shell.run("sync")
|
||||
|
||||
def test_packages(self):
|
||||
ShellRunner.run("docker pull ubuntu:latest")
|
||||
Shell.run("docker pull ubuntu:latest")
|
||||
print(f"Test packages installation, version [{self.version}]")
|
||||
cmd = f"docker run --rm ubuntu:latest bash -c \"apt update -y; apt install -y sudo gnupg ca-certificates; apt-key adv --keyserver hkp://keyserver.ubuntu.com:80 --recv 8919F6BD2B48D754; echo 'deb {self.repo_url} stable main' | tee /etc/apt/sources.list.d/clickhouse.list; apt update -y; apt-get install -y clickhouse-client={self.version}\""
|
||||
debian_command = f"echo 'deb {self.repo_url} stable main' | tee /etc/apt/sources.list.d/clickhouse.list; apt update -y; apt-get install -y clickhouse-common-static={self.version} clickhouse-client={self.version}"
|
||||
cmd = f'docker run --rm ubuntu:latest bash -c "apt update -y; apt install -y sudo gnupg ca-certificates; apt-key adv --keyserver hkp://keyserver.ubuntu.com:80 --recv 8919F6BD2B48D754; {debian_command}"'
|
||||
print("Running test command:")
|
||||
print(f" {cmd}")
|
||||
ShellRunner.run(cmd)
|
||||
Shell.run(cmd, check=True)
|
||||
release_info = ReleaseInfo.from_file()
|
||||
release_info.debian_command = debian_command
|
||||
release_info.dump()
|
||||
|
||||
|
||||
def _copy_if_not_exists(src: Path, dst: Path) -> Path:
|
||||
@ -202,23 +212,27 @@ class RpmArtifactory:
|
||||
for command in commands:
|
||||
print("Running command:")
|
||||
print(f" {command}")
|
||||
ShellRunner.run(command)
|
||||
Shell.run(command, check=True)
|
||||
|
||||
update_public_key = f"gpg --armor --export {self._SIGN_KEY}"
|
||||
pub_key_path = dest_dir / "repodata" / "repomd.xml.key"
|
||||
print("Updating repomd.xml.key")
|
||||
pub_key_path.write_text(ShellRunner.run(update_public_key)[1])
|
||||
pub_key_path.write_text(Shell.run(update_public_key, check=True))
|
||||
if codename == RepoCodenames.LTS:
|
||||
self.export_packages(RepoCodenames.STABLE)
|
||||
ShellRunner.run("sync")
|
||||
Shell.run("sync")
|
||||
|
||||
def test_packages(self):
|
||||
ShellRunner.run("docker pull fedora:latest")
|
||||
Shell.run("docker pull fedora:latest")
|
||||
print(f"Test package installation, version [{self.version}]")
|
||||
cmd = f'docker run --rm fedora:latest /bin/bash -c "dnf -y install dnf-plugins-core && dnf config-manager --add-repo={self.repo_url} && dnf makecache && dnf -y install clickhouse-client-{self.version}-1"'
|
||||
rpm_command = f"dnf config-manager --add-repo={self.repo_url} && dnf makecache && dnf -y install clickhouse-client-{self.version}-1"
|
||||
cmd = f'docker run --rm fedora:latest /bin/bash -c "dnf -y install dnf-plugins-core && dnf config-manager --add-repo={self.repo_url} && {rpm_command}"'
|
||||
print("Running test command:")
|
||||
print(f" {cmd}")
|
||||
ShellRunner.run(cmd)
|
||||
Shell.run(cmd, check=True)
|
||||
release_info = ReleaseInfo.from_file()
|
||||
release_info.rpm_command = rpm_command
|
||||
release_info.dump()
|
||||
|
||||
|
||||
class TgzArtifactory:
|
||||
@ -256,23 +270,29 @@ class TgzArtifactory:
|
||||
|
||||
if codename == RepoCodenames.LTS:
|
||||
self.export_packages(RepoCodenames.STABLE)
|
||||
ShellRunner.run("sync")
|
||||
Shell.run("sync")
|
||||
|
||||
def test_packages(self):
|
||||
tgz_file = "/tmp/tmp.tgz"
|
||||
tgz_sha_file = "/tmp/tmp.tgz.sha512"
|
||||
ShellRunner.run(
|
||||
f"curl -o {tgz_file} -f0 {self.repo_url}/stable/clickhouse-client-{self.version}-arm64.tgz"
|
||||
cmd = f"curl -o {tgz_file} -f0 {self.repo_url}/stable/clickhouse-client-{self.version}-arm64.tgz"
|
||||
Shell.run(
|
||||
cmd,
|
||||
check=True,
|
||||
)
|
||||
ShellRunner.run(
|
||||
f"curl -o {tgz_sha_file} -f0 {self.repo_url}/stable/clickhouse-client-{self.version}-arm64.tgz.sha512"
|
||||
Shell.run(
|
||||
f"curl -o {tgz_sha_file} -f0 {self.repo_url}/stable/clickhouse-client-{self.version}-arm64.tgz.sha512",
|
||||
check=True,
|
||||
)
|
||||
expected_checksum = ShellRunner.run(f"cut -d ' ' -f 1 {tgz_sha_file}")
|
||||
actual_checksum = ShellRunner.run(f"sha512sum {tgz_file} | cut -d ' ' -f 1")
|
||||
expected_checksum = Shell.run(f"cut -d ' ' -f 1 {tgz_sha_file}", check=True)
|
||||
actual_checksum = Shell.run(f"sha512sum {tgz_file} | cut -d ' ' -f 1")
|
||||
assert (
|
||||
expected_checksum == actual_checksum
|
||||
), f"[{actual_checksum} != {expected_checksum}]"
|
||||
ShellRunner.run("rm /tmp/tmp.tgz*")
|
||||
Shell.run("rm /tmp/tmp.tgz*")
|
||||
release_info = ReleaseInfo.from_file()
|
||||
release_info.tgz_command = cmd
|
||||
release_info.dump()
|
||||
|
||||
|
||||
def parse_args() -> argparse.Namespace:
|
||||
@ -280,12 +300,6 @@ def parse_args() -> argparse.Namespace:
|
||||
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
|
||||
description="Adds release packages to the repository",
|
||||
)
|
||||
parser.add_argument(
|
||||
"--infile",
|
||||
type=str,
|
||||
required=True,
|
||||
help="input file with release info",
|
||||
)
|
||||
parser.add_argument(
|
||||
"--export-debian",
|
||||
action="store_true",
|
||||
@ -328,7 +342,7 @@ if __name__ == "__main__":
|
||||
args = parse_args()
|
||||
assert args.dry_run
|
||||
|
||||
release_info = ReleaseInfo.from_file(args.infile)
|
||||
release_info = ReleaseInfo.from_file()
|
||||
"""
|
||||
Use S3FS. RCLONE has some errors with r2 remote which I didn't figure out how to resolve:
|
||||
ERROR : IO error: NotImplemented: versionId not implemented
|
||||
@ -336,20 +350,26 @@ if __name__ == "__main__":
|
||||
"""
|
||||
mp = R2MountPoint(MountPointApp.S3FS, dry_run=args.dry_run)
|
||||
if args.export_debian:
|
||||
mp.init()
|
||||
DebianArtifactory(release_info, dry_run=args.dry_run).export_packages()
|
||||
mp.teardown()
|
||||
with ReleaseContextManager(release_progress=ReleaseProgress.EXPORT_DEB) as _:
|
||||
mp.init()
|
||||
DebianArtifactory(release_info, dry_run=args.dry_run).export_packages()
|
||||
mp.teardown()
|
||||
if args.export_rpm:
|
||||
mp.init()
|
||||
RpmArtifactory(release_info, dry_run=args.dry_run).export_packages()
|
||||
mp.teardown()
|
||||
with ReleaseContextManager(release_progress=ReleaseProgress.EXPORT_RPM) as _:
|
||||
mp.init()
|
||||
RpmArtifactory(release_info, dry_run=args.dry_run).export_packages()
|
||||
mp.teardown()
|
||||
if args.export_tgz:
|
||||
mp.init()
|
||||
TgzArtifactory(release_info, dry_run=args.dry_run).export_packages()
|
||||
mp.teardown()
|
||||
with ReleaseContextManager(release_progress=ReleaseProgress.EXPORT_TGZ) as _:
|
||||
mp.init()
|
||||
TgzArtifactory(release_info, dry_run=args.dry_run).export_packages()
|
||||
mp.teardown()
|
||||
if args.test_debian:
|
||||
DebianArtifactory(release_info, dry_run=args.dry_run).test_packages()
|
||||
with ReleaseContextManager(release_progress=ReleaseProgress.TEST_DEB) as _:
|
||||
DebianArtifactory(release_info, dry_run=args.dry_run).test_packages()
|
||||
if args.test_tgz:
|
||||
TgzArtifactory(release_info, dry_run=args.dry_run).test_packages()
|
||||
with ReleaseContextManager(release_progress=ReleaseProgress.TEST_TGZ) as _:
|
||||
TgzArtifactory(release_info, dry_run=args.dry_run).test_packages()
|
||||
if args.test_rpm:
|
||||
RpmArtifactory(release_info, dry_run=args.dry_run).test_packages()
|
||||
with ReleaseContextManager(release_progress=ReleaseProgress.TEST_RPM) as _:
|
||||
RpmArtifactory(release_info, dry_run=args.dry_run).test_packages()
|
||||
|
@ -1,17 +1,17 @@
|
||||
import argparse
|
||||
from datetime import timedelta, datetime
|
||||
import logging
|
||||
import dataclasses
|
||||
import json
|
||||
import os
|
||||
from commit_status_helper import get_commit_filtered_statuses
|
||||
import sys
|
||||
from typing import List
|
||||
|
||||
from get_robot_token import get_best_robot_token
|
||||
from github_helper import GitHub
|
||||
from release import Release, Repo as ReleaseRepo, RELEASE_READY_STATUS
|
||||
from ci_utils import Shell
|
||||
from env_helper import GITHUB_REPOSITORY
|
||||
from report import SUCCESS
|
||||
from ssh import SSHKey
|
||||
|
||||
LOGGER_NAME = __name__
|
||||
HELPER_LOGGERS = ["github_helper", LOGGER_NAME]
|
||||
logger = logging.getLogger(LOGGER_NAME)
|
||||
from ci_buddy import CIBuddy
|
||||
from ci_config import CI
|
||||
|
||||
|
||||
def parse_args():
|
||||
@ -21,120 +21,198 @@ def parse_args():
|
||||
)
|
||||
parser.add_argument("--token", help="GitHub token, if not set, used from smm")
|
||||
parser.add_argument(
|
||||
"--repo", default="ClickHouse/ClickHouse", help="Repo owner/name"
|
||||
)
|
||||
parser.add_argument("--dry-run", action="store_true", help="Do not create anything")
|
||||
parser.add_argument(
|
||||
"--release-after-days",
|
||||
type=int,
|
||||
default=3,
|
||||
help="Do automatic release on the latest green commit after the latest "
|
||||
"release if the newest release is older than the specified days",
|
||||
)
|
||||
parser.add_argument(
|
||||
"--debug-helpers",
|
||||
"--post-status",
|
||||
action="store_true",
|
||||
help="Add debug logging for this script and github_helper",
|
||||
help="Post release branch statuses",
|
||||
)
|
||||
parser.add_argument(
|
||||
"--remote-protocol",
|
||||
"-p",
|
||||
default="ssh",
|
||||
choices=ReleaseRepo.VALID,
|
||||
help="repo protocol for git commands remote, 'origin' is a special case and "
|
||||
"uses 'origin' as a remote",
|
||||
"--post-auto-release-complete",
|
||||
action="store_true",
|
||||
help="Post autorelease completion status",
|
||||
)
|
||||
parser.add_argument(
|
||||
"--prepare",
|
||||
action="store_true",
|
||||
help="Prepare autorelease info",
|
||||
)
|
||||
parser.add_argument(
|
||||
"--wf-status",
|
||||
type=str,
|
||||
default="",
|
||||
help="overall workflow status [success|failure]",
|
||||
)
|
||||
return parser.parse_args(), parser
|
||||
|
||||
return parser.parse_args()
|
||||
|
||||
MAX_NUMBER_OF_COMMITS_TO_CONSIDER_FOR_RELEASE = 5
|
||||
AUTORELEASE_INFO_FILE = "/tmp/autorelease_info.json"
|
||||
|
||||
|
||||
@dataclasses.dataclass
|
||||
class ReleaseParams:
|
||||
ready: bool
|
||||
ci_status: str
|
||||
num_patches: int
|
||||
release_branch: str
|
||||
commit_sha: str
|
||||
commits_to_branch_head: int
|
||||
latest: bool
|
||||
|
||||
def to_dict(self):
|
||||
return dataclasses.asdict(self)
|
||||
|
||||
|
||||
@dataclasses.dataclass
|
||||
class AutoReleaseInfo:
|
||||
releases: List[ReleaseParams]
|
||||
|
||||
def add_release(self, release_params: ReleaseParams) -> None:
|
||||
self.releases.append(release_params)
|
||||
|
||||
def dump(self):
|
||||
print(f"Dump release info into [{AUTORELEASE_INFO_FILE}]")
|
||||
with open(AUTORELEASE_INFO_FILE, "w", encoding="utf-8") as f:
|
||||
print(json.dumps(dataclasses.asdict(self), indent=2), file=f)
|
||||
|
||||
@staticmethod
|
||||
def from_file() -> "AutoReleaseInfo":
|
||||
with open(AUTORELEASE_INFO_FILE, "r", encoding="utf-8") as json_file:
|
||||
res = json.load(json_file)
|
||||
releases = [ReleaseParams(**release) for release in res["releases"]]
|
||||
return AutoReleaseInfo(releases=releases)
|
||||
|
||||
|
||||
def _prepare(token):
|
||||
assert len(token) > 10
|
||||
os.environ["GH_TOKEN"] = token
|
||||
Shell.run("gh auth status", check=True)
|
||||
|
||||
gh = GitHub(token)
|
||||
prs = gh.get_release_pulls(GITHUB_REPOSITORY)
|
||||
prs.sort(key=lambda x: x.head.ref)
|
||||
branch_names = [pr.head.ref for pr in prs]
|
||||
print(f"Found release branches [{branch_names}]")
|
||||
|
||||
repo = gh.get_repo(GITHUB_REPOSITORY)
|
||||
autoRelease_info = AutoReleaseInfo(releases=[])
|
||||
|
||||
for pr in prs:
|
||||
print(f"\nChecking PR [{pr.head.ref}]")
|
||||
|
||||
refs = list(repo.get_git_matching_refs(f"tags/v{pr.head.ref}"))
|
||||
assert refs
|
||||
|
||||
refs.sort(key=lambda ref: ref.ref)
|
||||
latest_release_tag_ref = refs[-1]
|
||||
latest_release_tag = repo.get_git_tag(latest_release_tag_ref.object.sha)
|
||||
|
||||
commits = Shell.run(
|
||||
f"git rev-list --first-parent {latest_release_tag.tag}..origin/{pr.head.ref}",
|
||||
check=True,
|
||||
).split("\n")
|
||||
commit_num = len(commits)
|
||||
print(
|
||||
f"Previous release [{latest_release_tag.tag}] was [{commit_num}] commits ago, date [{latest_release_tag.tagger.date}]"
|
||||
)
|
||||
|
||||
commits_to_check = commits[:-1] # Exclude the version bump commit
|
||||
commit_sha = ""
|
||||
commit_ci_status = ""
|
||||
commits_to_branch_head = 0
|
||||
|
||||
for idx, commit in enumerate(
|
||||
commits_to_check[:MAX_NUMBER_OF_COMMITS_TO_CONSIDER_FOR_RELEASE]
|
||||
):
|
||||
print(
|
||||
f"Check commit [{commit}] [{pr.head.ref}~{idx+1}] as release candidate"
|
||||
)
|
||||
commit_num -= 1
|
||||
|
||||
is_completed = CI.GHActions.check_wf_completed(
|
||||
token=token, commit_sha=commit
|
||||
)
|
||||
if not is_completed:
|
||||
print(f"CI is in progress for [{commit}] - check previous commit")
|
||||
commits_to_branch_head += 1
|
||||
continue
|
||||
|
||||
commit_ci_status = CI.GHActions.get_commit_status_by_name(
|
||||
token=token,
|
||||
commit_sha=commit,
|
||||
status_name=(CI.JobNames.BUILD_CHECK, "ClickHouse build check"),
|
||||
)
|
||||
commit_sha = commit
|
||||
if commit_ci_status == SUCCESS:
|
||||
break
|
||||
|
||||
print(f"CI status [{commit_ci_status}] - skip")
|
||||
commits_to_branch_head += 1
|
||||
|
||||
ready = False
|
||||
if commit_ci_status == SUCCESS and commit_sha:
|
||||
print(
|
||||
f"Add release ready info for commit [{commit_sha}] and release branch [{pr.head.ref}]"
|
||||
)
|
||||
ready = True
|
||||
else:
|
||||
print(f"WARNING: No ready commits found for release branch [{pr.head.ref}]")
|
||||
|
||||
autoRelease_info.add_release(
|
||||
ReleaseParams(
|
||||
release_branch=pr.head.ref,
|
||||
commit_sha=commit_sha,
|
||||
ready=ready,
|
||||
ci_status=commit_ci_status,
|
||||
num_patches=commit_num,
|
||||
commits_to_branch_head=commits_to_branch_head,
|
||||
latest=False,
|
||||
)
|
||||
)
|
||||
|
||||
if autoRelease_info.releases:
|
||||
autoRelease_info.releases[-1].latest = True
|
||||
|
||||
autoRelease_info.dump()
|
||||
|
||||
|
||||
def main():
|
||||
args = parse_args()
|
||||
logging.basicConfig(level=logging.INFO)
|
||||
if args.debug_helpers:
|
||||
for logger_name in HELPER_LOGGERS:
|
||||
logging.getLogger(logger_name).setLevel(logging.DEBUG)
|
||||
args, parser = parse_args()
|
||||
|
||||
token = args.token or get_best_robot_token()
|
||||
days_as_timedelta = timedelta(days=args.release_after_days)
|
||||
now = datetime.now()
|
||||
|
||||
gh = GitHub(token)
|
||||
prs = gh.get_release_pulls(args.repo)
|
||||
branch_names = [pr.head.ref for pr in prs]
|
||||
|
||||
logger.info("Found release branches: %s\n ", " \n".join(branch_names))
|
||||
repo = gh.get_repo(args.repo)
|
||||
|
||||
# In general there is no guarantee on which order the refs/commits are
|
||||
# returned from the API, so we have to order them.
|
||||
for pr in prs:
|
||||
logger.info("Checking PR %s", pr.head.ref)
|
||||
|
||||
refs = list(repo.get_git_matching_refs(f"tags/v{pr.head.ref}"))
|
||||
refs.sort(key=lambda ref: ref.ref)
|
||||
|
||||
latest_release_tag_ref = refs[-1]
|
||||
latest_release_tag = repo.get_git_tag(latest_release_tag_ref.object.sha)
|
||||
logger.info("That last release was done at %s", latest_release_tag.tagger.date)
|
||||
|
||||
if latest_release_tag.tagger.date + days_as_timedelta > now:
|
||||
logger.info(
|
||||
"Not enough days since the last release %s,"
|
||||
" no automatic release can be done",
|
||||
latest_release_tag.tag,
|
||||
if args.post_status:
|
||||
info = AutoReleaseInfo.from_file()
|
||||
for release_info in info.releases:
|
||||
if release_info.ready:
|
||||
CIBuddy(dry_run=False).post_info(
|
||||
title=f"Auto Release Status for {release_info.release_branch}",
|
||||
body=release_info.to_dict(),
|
||||
)
|
||||
else:
|
||||
CIBuddy(dry_run=False).post_warning(
|
||||
title=f"Auto Release Status for {release_info.release_branch}",
|
||||
body=release_info.to_dict(),
|
||||
)
|
||||
if args.post_auto_release_complete:
|
||||
assert args.wf_status, "--wf-status Required with --post-auto-release-complete"
|
||||
if args.wf_status != SUCCESS:
|
||||
CIBuddy(dry_run=False).post_job_error(
|
||||
error_description="Autorelease workflow failed",
|
||||
job_name="Autorelease",
|
||||
with_instance_info=False,
|
||||
with_wf_link=True,
|
||||
critical=True,
|
||||
)
|
||||
continue
|
||||
|
||||
unreleased_commits = list(
|
||||
repo.get_commits(sha=pr.head.ref, since=latest_release_tag.tagger.date)
|
||||
)
|
||||
unreleased_commits.sort(
|
||||
key=lambda commit: commit.commit.committer.date, reverse=True
|
||||
)
|
||||
|
||||
for commit in unreleased_commits:
|
||||
logger.info("Checking statuses of commit %s", commit.sha)
|
||||
statuses = get_commit_filtered_statuses(commit)
|
||||
all_success = all(st.state == SUCCESS for st in statuses)
|
||||
passed_ready_for_release_check = any(
|
||||
st.context == RELEASE_READY_STATUS and st.state == SUCCESS
|
||||
for st in statuses
|
||||
else:
|
||||
CIBuddy(dry_run=False).post_info(
|
||||
title=f"Autorelease completed",
|
||||
body="",
|
||||
with_wf_link=True,
|
||||
)
|
||||
if not (all_success and passed_ready_for_release_check):
|
||||
logger.info("Commit is not green, thus not suitable for release")
|
||||
continue
|
||||
|
||||
logger.info("Commit is ready for release, let's release!")
|
||||
|
||||
release = Release(
|
||||
ReleaseRepo(args.repo, args.remote_protocol),
|
||||
commit.sha,
|
||||
"patch",
|
||||
args.dry_run,
|
||||
True,
|
||||
)
|
||||
try:
|
||||
release.do(True, True, True)
|
||||
except:
|
||||
if release.has_rollback:
|
||||
logging.error(
|
||||
"!!The release process finished with error, read the output carefully!!"
|
||||
)
|
||||
logging.error(
|
||||
"Probably, rollback finished with error. "
|
||||
"If you don't see any of the following commands in the output, "
|
||||
"execute them manually:"
|
||||
)
|
||||
release.log_rollback()
|
||||
raise
|
||||
logging.info("New release is done!")
|
||||
break
|
||||
elif args.prepare:
|
||||
_prepare(token=args.token or get_best_robot_token())
|
||||
else:
|
||||
parser.print_help()
|
||||
sys.exit(2)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
if os.getenv("ROBOT_CLICKHOUSE_SSH_KEY", ""):
|
||||
with SSHKey("ROBOT_CLICKHOUSE_SSH_KEY"):
|
||||
main()
|
||||
else:
|
||||
main()
|
||||
main()
|
||||
|
@ -1110,13 +1110,14 @@ def main() -> int:
|
||||
ci_cache.print_status()
|
||||
|
||||
if IS_CI and not pr_info.is_merge_queue:
|
||||
# wait for pending jobs to be finished, await_jobs is a long blocking call
|
||||
ci_cache.await_pending_jobs(pr_info.is_release)
|
||||
|
||||
if pr_info.is_release:
|
||||
print("Release/master: CI Cache add pending records for all todo jobs")
|
||||
ci_cache.push_pending_all(pr_info.is_release)
|
||||
|
||||
# wait for pending jobs to be finished, await_jobs is a long blocking call
|
||||
ci_cache.await_pending_jobs(pr_info.is_release)
|
||||
|
||||
# conclude results
|
||||
result["git_ref"] = git_ref
|
||||
result["version"] = version
|
||||
@ -1292,10 +1293,11 @@ def main() -> int:
|
||||
pass
|
||||
if Utils.is_killed_with_oom():
|
||||
print("WARNING: OOM while job execution")
|
||||
print(subprocess.run("sudo dmesg -T", check=False))
|
||||
error_description = f"Out Of Memory, exit_code {job_report.exit_code}"
|
||||
else:
|
||||
error_description = f"Unknown, exit_code {job_report.exit_code}"
|
||||
CIBuddy().post_error(
|
||||
CIBuddy().post_job_error(
|
||||
error_description + f" after {int(job_report.duration)}s",
|
||||
job_name=_get_ext_check_name(args.job_name),
|
||||
)
|
||||
|
@ -1,5 +1,6 @@
|
||||
import json
|
||||
import os
|
||||
from typing import Union, Dict
|
||||
|
||||
import boto3
|
||||
import requests
|
||||
@ -60,14 +61,64 @@ class CIBuddy:
|
||||
except Exception as e:
|
||||
print(f"ERROR: Failed to post message, ex {e}")
|
||||
|
||||
def post_error(self, error_description, job_name="", with_instance_info=True):
|
||||
def _post_formatted(
|
||||
self, title: str, body: Union[Dict, str], with_wf_link: bool
|
||||
) -> None:
|
||||
message = title
|
||||
if isinstance(body, dict):
|
||||
for name, value in body.items():
|
||||
if "commit_sha" in name:
|
||||
value = (
|
||||
f"<https://github.com/{self.repo}/commit/{value}|{value[:8]}>"
|
||||
)
|
||||
message += f" *{name}*: {value}\n"
|
||||
else:
|
||||
message += body + "\n"
|
||||
run_id = os.getenv("GITHUB_RUN_ID", "")
|
||||
if with_wf_link and run_id:
|
||||
message += f" *workflow*: <https://github.com/{self.repo}/actions/runs/{run_id}|{run_id}>\n"
|
||||
self.post(message)
|
||||
|
||||
def post_info(
|
||||
self, title: str, body: Union[Dict, str], with_wf_link: bool = True
|
||||
) -> None:
|
||||
title_extended = f":white_circle: *{title}*\n\n"
|
||||
self._post_formatted(title_extended, body, with_wf_link)
|
||||
|
||||
def post_done(
|
||||
self, title: str, body: Union[Dict, str], with_wf_link: bool = True
|
||||
) -> None:
|
||||
title_extended = f":white_check_mark: *{title}*\n\n"
|
||||
self._post_formatted(title_extended, body, with_wf_link)
|
||||
|
||||
def post_warning(
|
||||
self, title: str, body: Union[Dict, str], with_wf_link: bool = True
|
||||
) -> None:
|
||||
title_extended = f":warning: *{title}*\n\n"
|
||||
self._post_formatted(title_extended, body, with_wf_link)
|
||||
|
||||
def post_critical(
|
||||
self, title: str, body: Union[Dict, str], with_wf_link: bool = True
|
||||
) -> None:
|
||||
title_extended = f":black_circle: *{title}*\n\n"
|
||||
self._post_formatted(title_extended, body, with_wf_link)
|
||||
|
||||
def post_job_error(
|
||||
self,
|
||||
error_description: str,
|
||||
job_name: str = "",
|
||||
with_instance_info: bool = True,
|
||||
with_wf_link: bool = True,
|
||||
critical: bool = False,
|
||||
) -> None:
|
||||
instance_id, instance_type = "unknown", "unknown"
|
||||
if with_instance_info:
|
||||
instance_id = Shell.run("ec2metadata --instance-id") or instance_id
|
||||
instance_type = Shell.run("ec2metadata --instance-type") or instance_type
|
||||
if not job_name:
|
||||
job_name = os.getenv("CHECK_NAME", "unknown")
|
||||
line_err = f":red_circle: *Error: {error_description}*\n\n"
|
||||
sign = ":red_circle:" if not critical else ":black_circle:"
|
||||
line_err = f"{sign} *Error: {error_description}*\n\n"
|
||||
line_ghr = f" *Runner:* `{instance_type}`, `{instance_id}`\n"
|
||||
line_job = f" *Job:* `{job_name}`\n"
|
||||
line_pr_ = f" *PR:* <https://github.com/{self.repo}/pull/{self.pr_number}|#{self.pr_number}>, <{self.commit_url}|{self.sha}>\n"
|
||||
@ -82,10 +133,13 @@ class CIBuddy:
|
||||
message += line_pr_
|
||||
else:
|
||||
message += line_br_
|
||||
run_id = os.getenv("GITHUB_RUN_ID", "")
|
||||
if with_wf_link and run_id:
|
||||
message += f" *workflow*: <https://github.com/{self.repo}/actions/runs/{run_id}|{run_id}>\n"
|
||||
self.post(message)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
# test
|
||||
buddy = CIBuddy(dry_run=True)
|
||||
buddy.post_error("TEst")
|
||||
buddy.post_job_error("TEst")
|
||||
|
@ -638,7 +638,14 @@ class CiCache:
|
||||
pushes pending records for all jobs that supposed to be run
|
||||
"""
|
||||
for job, job_config in self.jobs_to_do.items():
|
||||
if not job_config.has_digest():
|
||||
if (
|
||||
job in self.jobs_to_wait
|
||||
or not job_config.has_digest()
|
||||
or job_config.disable_await
|
||||
):
|
||||
# 1. "job in self.jobs_to_wait" - this job already has a pending record in cache
|
||||
# 2. "not job_config.has_digest()" - cache is not used for these jobs
|
||||
# 3. "job_config.disable_await" - await is explicitly disabled
|
||||
continue
|
||||
pending_state = PendingState(time.time(), run_url=GITHUB_RUN_URL)
|
||||
assert job_config.batches
|
||||
@ -708,7 +715,7 @@ class CiCache:
|
||||
Filter is to be applied in PRs to remove jobs that are not affected by the change
|
||||
:return:
|
||||
"""
|
||||
remove_from_to_do = []
|
||||
remove_from_workflow = []
|
||||
required_builds = []
|
||||
has_test_jobs_to_skip = False
|
||||
for job_name, job_config in self.jobs_to_do.items():
|
||||
@ -723,26 +730,41 @@ class CiCache:
|
||||
job=reference_name,
|
||||
job_config=reference_config,
|
||||
):
|
||||
remove_from_to_do.append(job_name)
|
||||
remove_from_workflow.append(job_name)
|
||||
has_test_jobs_to_skip = True
|
||||
else:
|
||||
required_builds += (
|
||||
job_config.required_builds if job_config.required_builds else []
|
||||
)
|
||||
if has_test_jobs_to_skip:
|
||||
# If there are tests to skip, it means build digest has not been changed.
|
||||
# If there are tests to skip, it means builds are not affected as well.
|
||||
# No need to test builds. Let's keep all builds required for test jobs and skip the others
|
||||
for job_name, job_config in self.jobs_to_do.items():
|
||||
if CI.is_build_job(job_name):
|
||||
if job_name not in required_builds:
|
||||
remove_from_to_do.append(job_name)
|
||||
remove_from_workflow.append(job_name)
|
||||
|
||||
for job in remove_from_to_do:
|
||||
for job in remove_from_workflow:
|
||||
print(f"Filter job [{job}] - not affected by the change")
|
||||
if job in self.jobs_to_do:
|
||||
del self.jobs_to_do[job]
|
||||
if job in self.jobs_to_wait:
|
||||
del self.jobs_to_wait[job]
|
||||
if job in self.jobs_to_skip:
|
||||
self.jobs_to_skip.remove(job)
|
||||
|
||||
# special handling for the special job: BUILD_CHECK
|
||||
has_builds = False
|
||||
for job in list(self.jobs_to_do) + self.jobs_to_skip:
|
||||
if CI.is_build_job(job):
|
||||
has_builds = True
|
||||
break
|
||||
if not has_builds:
|
||||
if CI.JobNames.BUILD_CHECK in self.jobs_to_do:
|
||||
print(
|
||||
f"Filter job [{CI.JobNames.BUILD_CHECK}] - no builds are required in the workflow"
|
||||
)
|
||||
del self.jobs_to_do[CI.JobNames.BUILD_CHECK]
|
||||
|
||||
def await_pending_jobs(self, is_release: bool, dry_run: bool = False) -> None:
|
||||
"""
|
||||
@ -884,3 +906,87 @@ class CiCache:
|
||||
self.jobs_to_wait[job] = job_config
|
||||
|
||||
return self
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
# for testing
|
||||
job_digest = {
|
||||
"package_release": "bbbd3519d1",
|
||||
"package_aarch64": "bbbd3519d1",
|
||||
"package_asan": "bbbd3519d1",
|
||||
"package_ubsan": "bbbd3519d1",
|
||||
"package_tsan": "bbbd3519d1",
|
||||
"package_msan": "bbbd3519d1",
|
||||
"package_debug": "bbbd3519d1",
|
||||
"package_release_coverage": "bbbd3519d1",
|
||||
"binary_release": "bbbd3519d1",
|
||||
"binary_tidy": "bbbd3519d1",
|
||||
"binary_darwin": "bbbd3519d1",
|
||||
"binary_aarch64": "bbbd3519d1",
|
||||
"binary_aarch64_v80compat": "bbbd3519d1",
|
||||
"binary_freebsd": "bbbd3519d1",
|
||||
"binary_darwin_aarch64": "bbbd3519d1",
|
||||
"binary_ppc64le": "bbbd3519d1",
|
||||
"binary_amd64_compat": "bbbd3519d1",
|
||||
"binary_amd64_musl": "bbbd3519d1",
|
||||
"binary_riscv64": "bbbd3519d1",
|
||||
"binary_s390x": "bbbd3519d1",
|
||||
"binary_loongarch64": "bbbd3519d1",
|
||||
"Builds": "f5dffeecb8",
|
||||
"Install packages (release)": "ba0c89660e",
|
||||
"Install packages (aarch64)": "ba0c89660e",
|
||||
"Stateful tests (asan)": "32a9a1aba9",
|
||||
"Stateful tests (tsan)": "32a9a1aba9",
|
||||
"Stateful tests (msan)": "32a9a1aba9",
|
||||
"Stateful tests (ubsan)": "32a9a1aba9",
|
||||
"Stateful tests (debug)": "32a9a1aba9",
|
||||
"Stateful tests (release)": "32a9a1aba9",
|
||||
"Stateful tests (coverage)": "32a9a1aba9",
|
||||
"Stateful tests (aarch64)": "32a9a1aba9",
|
||||
"Stateful tests (release, ParallelReplicas)": "32a9a1aba9",
|
||||
"Stateful tests (debug, ParallelReplicas)": "32a9a1aba9",
|
||||
"Stateless tests (asan)": "deb6778b88",
|
||||
"Stateless tests (tsan)": "deb6778b88",
|
||||
"Stateless tests (msan)": "deb6778b88",
|
||||
"Stateless tests (ubsan)": "deb6778b88",
|
||||
"Stateless tests (debug)": "deb6778b88",
|
||||
"Stateless tests (release)": "deb6778b88",
|
||||
"Stateless tests (coverage)": "deb6778b88",
|
||||
"Stateless tests (aarch64)": "deb6778b88",
|
||||
"Stateless tests (release, old analyzer, s3, DatabaseReplicated)": "deb6778b88",
|
||||
"Stateless tests (debug, s3 storage)": "deb6778b88",
|
||||
"Stateless tests (tsan, s3 storage)": "deb6778b88",
|
||||
"Stress test (debug)": "aa298abf10",
|
||||
"Stress test (tsan)": "aa298abf10",
|
||||
"Upgrade check (debug)": "5ce4d3ee02",
|
||||
"Integration tests (asan, old analyzer)": "42e58be3aa",
|
||||
"Integration tests (tsan)": "42e58be3aa",
|
||||
"Integration tests (aarch64)": "42e58be3aa",
|
||||
"Integration tests flaky check (asan)": "42e58be3aa",
|
||||
"Compatibility check (release)": "ecb69d8c4b",
|
||||
"Compatibility check (aarch64)": "ecb69d8c4b",
|
||||
"Unit tests (release)": "09d00b702e",
|
||||
"Unit tests (asan)": "09d00b702e",
|
||||
"Unit tests (msan)": "09d00b702e",
|
||||
"Unit tests (tsan)": "09d00b702e",
|
||||
"Unit tests (ubsan)": "09d00b702e",
|
||||
"AST fuzzer (debug)": "c38ebf947f",
|
||||
"AST fuzzer (asan)": "c38ebf947f",
|
||||
"AST fuzzer (msan)": "c38ebf947f",
|
||||
"AST fuzzer (tsan)": "c38ebf947f",
|
||||
"AST fuzzer (ubsan)": "c38ebf947f",
|
||||
"Stateless tests flaky check (asan)": "deb6778b88",
|
||||
"Performance Comparison (release)": "a8a7179258",
|
||||
"ClickBench (release)": "45c07c4aa6",
|
||||
"ClickBench (aarch64)": "45c07c4aa6",
|
||||
"Docker server image": "6a24d5b187",
|
||||
"Docker keeper image": "6a24d5b187",
|
||||
"Docs check": "4764154c62",
|
||||
"Fast test": "cb269133f2",
|
||||
"Style check": "ffffffffff",
|
||||
"Stateful tests (ubsan, ParallelReplicas)": "32a9a1aba9",
|
||||
"Stress test (msan)": "aa298abf10",
|
||||
"Upgrade check (asan)": "5ce4d3ee02",
|
||||
}
|
||||
ci_cache = CiCache(job_digests=job_digest, cache_enabled=True, s3=S3Helper())
|
||||
ci_cache.update()
|
||||
|
@ -32,6 +32,9 @@ class CI:
|
||||
from ci_definitions import MQ_JOBS as MQ_JOBS
|
||||
from ci_definitions import WorkflowStages as WorkflowStages
|
||||
from ci_definitions import Runners as Runners
|
||||
from ci_utils import Envs as Envs
|
||||
from ci_utils import Utils as Utils
|
||||
from ci_utils import GHActions as GHActions
|
||||
from ci_definitions import Labels as Labels
|
||||
from ci_definitions import TRUSTED_CONTRIBUTORS as TRUSTED_CONTRIBUTORS
|
||||
from ci_utils import CATEGORY_TO_LABEL as CATEGORY_TO_LABEL
|
||||
@ -48,24 +51,14 @@ class CI:
|
||||
JobNames.INTEGRATION_TEST_ARM,
|
||||
]
|
||||
),
|
||||
Tags.CI_SET_REQUIRED: LabelConfig(run_jobs=REQUIRED_CHECKS),
|
||||
Tags.CI_SET_REQUIRED: LabelConfig(
|
||||
run_jobs=REQUIRED_CHECKS
|
||||
+ [build for build in BuildNames if build != BuildNames.FUZZERS]
|
||||
),
|
||||
Tags.CI_SET_BUILDS: LabelConfig(
|
||||
run_jobs=[JobNames.STYLE_CHECK, JobNames.BUILD_CHECK]
|
||||
+ [build for build in BuildNames if build != BuildNames.FUZZERS]
|
||||
),
|
||||
Tags.CI_SET_NON_REQUIRED: LabelConfig(
|
||||
run_jobs=[job for job in JobNames if job not in REQUIRED_CHECKS]
|
||||
),
|
||||
Tags.CI_SET_OLD_ANALYZER: LabelConfig(
|
||||
run_jobs=[
|
||||
JobNames.STYLE_CHECK,
|
||||
JobNames.FAST_TEST,
|
||||
BuildNames.PACKAGE_RELEASE,
|
||||
BuildNames.PACKAGE_ASAN,
|
||||
JobNames.STATELESS_TEST_OLD_ANALYZER_S3_REPLICATED_RELEASE,
|
||||
JobNames.INTEGRATION_TEST_ASAN_OLD_ANALYZER,
|
||||
]
|
||||
),
|
||||
Tags.CI_SET_SYNC: LabelConfig(
|
||||
run_jobs=[
|
||||
BuildNames.PACKAGE_ASAN,
|
||||
@ -320,13 +313,13 @@ class CI:
|
||||
required_builds=[BuildNames.PACKAGE_ASAN], num_batches=2
|
||||
),
|
||||
JobNames.STATELESS_TEST_TSAN: CommonJobConfigs.STATELESS_TEST.with_properties(
|
||||
required_builds=[BuildNames.PACKAGE_TSAN], num_batches=2
|
||||
required_builds=[BuildNames.PACKAGE_TSAN], num_batches=4
|
||||
),
|
||||
JobNames.STATELESS_TEST_MSAN: CommonJobConfigs.STATELESS_TEST.with_properties(
|
||||
required_builds=[BuildNames.PACKAGE_MSAN], num_batches=3
|
||||
required_builds=[BuildNames.PACKAGE_MSAN], num_batches=4
|
||||
),
|
||||
JobNames.STATELESS_TEST_UBSAN: CommonJobConfigs.STATELESS_TEST.with_properties(
|
||||
required_builds=[BuildNames.PACKAGE_UBSAN], num_batches=1
|
||||
required_builds=[BuildNames.PACKAGE_UBSAN], num_batches=2
|
||||
),
|
||||
JobNames.STATELESS_TEST_DEBUG: CommonJobConfigs.STATELESS_TEST.with_properties(
|
||||
required_builds=[BuildNames.PACKAGE_DEBUG], num_batches=2
|
||||
@ -335,24 +328,24 @@ class CI:
|
||||
required_builds=[BuildNames.PACKAGE_RELEASE],
|
||||
),
|
||||
JobNames.STATELESS_TEST_RELEASE_COVERAGE: CommonJobConfigs.STATELESS_TEST.with_properties(
|
||||
required_builds=[BuildNames.PACKAGE_RELEASE_COVERAGE], num_batches=5
|
||||
required_builds=[BuildNames.PACKAGE_RELEASE_COVERAGE], num_batches=6
|
||||
),
|
||||
JobNames.STATELESS_TEST_AARCH64: CommonJobConfigs.STATELESS_TEST.with_properties(
|
||||
required_builds=[BuildNames.PACKAGE_AARCH64],
|
||||
runner_type=Runners.FUNC_TESTER_ARM,
|
||||
),
|
||||
JobNames.STATELESS_TEST_OLD_ANALYZER_S3_REPLICATED_RELEASE: CommonJobConfigs.STATELESS_TEST.with_properties(
|
||||
required_builds=[BuildNames.PACKAGE_RELEASE], num_batches=3
|
||||
required_builds=[BuildNames.PACKAGE_RELEASE], num_batches=4
|
||||
),
|
||||
JobNames.STATELESS_TEST_S3_DEBUG: CommonJobConfigs.STATELESS_TEST.with_properties(
|
||||
required_builds=[BuildNames.PACKAGE_DEBUG], num_batches=2
|
||||
),
|
||||
JobNames.STATELESS_TEST_AZURE_ASAN: CommonJobConfigs.STATELESS_TEST.with_properties(
|
||||
required_builds=[BuildNames.PACKAGE_ASAN], num_batches=2, release_only=True
|
||||
required_builds=[BuildNames.PACKAGE_ASAN], num_batches=3, release_only=True
|
||||
),
|
||||
JobNames.STATELESS_TEST_S3_TSAN: CommonJobConfigs.STATELESS_TEST.with_properties(
|
||||
required_builds=[BuildNames.PACKAGE_TSAN],
|
||||
num_batches=3,
|
||||
num_batches=4,
|
||||
),
|
||||
JobNames.STRESS_TEST_DEBUG: CommonJobConfigs.STRESS_TEST.with_properties(
|
||||
required_builds=[BuildNames.PACKAGE_DEBUG],
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user