mirror of
https://github.com/ClickHouse/ClickHouse.git
synced 2024-11-24 16:42:05 +00:00
Merge remote-tracking branch 'upstream/master' into interactive-metrics-table
This commit is contained in:
commit
3c5569514c
3
.github/PULL_REQUEST_TEMPLATE.md
vendored
3
.github/PULL_REQUEST_TEMPLATE.md
vendored
@ -59,6 +59,9 @@ At a minimum, the following information should be added (but add more as needed)
|
|||||||
- [ ] <!---ci_exclude_tsan|msan|ubsan|coverage--> Exclude: All with TSAN, MSAN, UBSAN, Coverage
|
- [ ] <!---ci_exclude_tsan|msan|ubsan|coverage--> Exclude: All with TSAN, MSAN, UBSAN, Coverage
|
||||||
- [ ] <!---ci_exclude_aarch64|release|debug--> Exclude: All with aarch64, release, debug
|
- [ ] <!---ci_exclude_aarch64|release|debug--> Exclude: All with aarch64, release, debug
|
||||||
---
|
---
|
||||||
|
- [ ] <!---ci_include_fuzzer--> Run only fuzzers related jobs (libFuzzer fuzzers, AST fuzzers, etc.)
|
||||||
|
- [ ] <!---ci_exclude_ast--> Exclude: AST fuzzers
|
||||||
|
---
|
||||||
- [ ] <!---do_not_test--> Do not test
|
- [ ] <!---do_not_test--> Do not test
|
||||||
- [ ] <!---woolen_wolfdog--> Woolen Wolfdog
|
- [ ] <!---woolen_wolfdog--> Woolen Wolfdog
|
||||||
- [ ] <!---upload_all--> Upload binaries for special builds
|
- [ ] <!---upload_all--> Upload binaries for special builds
|
||||||
|
21
.github/actions/check_workflow/action.yml
vendored
Normal file
21
.github/actions/check_workflow/action.yml
vendored
Normal file
@ -0,0 +1,21 @@
|
|||||||
|
name: CheckWorkflowResults
|
||||||
|
|
||||||
|
description: Check overall workflow status and post error to slack if any
|
||||||
|
|
||||||
|
inputs:
|
||||||
|
needs:
|
||||||
|
description: github needs context as a json string
|
||||||
|
required: true
|
||||||
|
type: string
|
||||||
|
|
||||||
|
runs:
|
||||||
|
using: "composite"
|
||||||
|
steps:
|
||||||
|
- name: Check Workflow
|
||||||
|
shell: bash
|
||||||
|
run: |
|
||||||
|
export WORKFLOW_RESULT_FILE="/tmp/workflow_results.json"
|
||||||
|
cat > "$WORKFLOW_RESULT_FILE" << 'EOF'
|
||||||
|
${{ inputs.needs }}
|
||||||
|
EOF
|
||||||
|
python3 ./tests/ci/ci_buddy.py --check-wf-status
|
18
.github/actions/debug/action.yml
vendored
Normal file
18
.github/actions/debug/action.yml
vendored
Normal file
@ -0,0 +1,18 @@
|
|||||||
|
name: DebugInfo
|
||||||
|
description: Prints workflow debug info
|
||||||
|
|
||||||
|
runs:
|
||||||
|
using: "composite"
|
||||||
|
steps:
|
||||||
|
- name: Print envs
|
||||||
|
shell: bash
|
||||||
|
run: |
|
||||||
|
echo "::group::Envs"
|
||||||
|
env
|
||||||
|
echo "::endgroup::"
|
||||||
|
- name: Print Event.json
|
||||||
|
shell: bash
|
||||||
|
run: |
|
||||||
|
echo "::group::Event.json"
|
||||||
|
python3 -m json.tool "$GITHUB_EVENT_PATH"
|
||||||
|
echo "::endgroup::"
|
45
.github/workflows/auto_release.yml
vendored
45
.github/workflows/auto_release.yml
vendored
@ -1,45 +0,0 @@
|
|||||||
name: AutoRelease
|
|
||||||
|
|
||||||
env:
|
|
||||||
# Force the stdout and stderr streams to be unbuffered
|
|
||||||
PYTHONUNBUFFERED: 1
|
|
||||||
|
|
||||||
concurrency:
|
|
||||||
group: auto-release
|
|
||||||
on: # yamllint disable-line rule:truthy
|
|
||||||
# schedule:
|
|
||||||
# - cron: '0 10-16 * * 1-5'
|
|
||||||
workflow_dispatch:
|
|
||||||
|
|
||||||
jobs:
|
|
||||||
CherryPick:
|
|
||||||
runs-on: [self-hosted, style-checker-aarch64]
|
|
||||||
steps:
|
|
||||||
- name: Set envs
|
|
||||||
# https://docs.github.com/en/actions/learn-github-actions/workflow-commands-for-github-actions#multiline-strings
|
|
||||||
run: |
|
|
||||||
cat >> "$GITHUB_ENV" << 'EOF'
|
|
||||||
TEMP_PATH=${{runner.temp}}/cherry_pick
|
|
||||||
ROBOT_CLICKHOUSE_SSH_KEY<<RCSK
|
|
||||||
${{secrets.ROBOT_CLICKHOUSE_SSH_KEY}}
|
|
||||||
RCSK
|
|
||||||
REPO_OWNER=ClickHouse
|
|
||||||
REPO_NAME=ClickHouse
|
|
||||||
REPO_TEAM=core
|
|
||||||
EOF
|
|
||||||
- name: Check out repository code
|
|
||||||
uses: ClickHouse/checkout@v1
|
|
||||||
with:
|
|
||||||
clear-repository: true
|
|
||||||
token: ${{secrets.ROBOT_CLICKHOUSE_COMMIT_TOKEN}}
|
|
||||||
fetch-depth: 0
|
|
||||||
- name: Auto-release
|
|
||||||
run: |
|
|
||||||
cd "$GITHUB_WORKSPACE/tests/ci"
|
|
||||||
python3 auto_release.py --release-after-days=3
|
|
||||||
- name: Cleanup
|
|
||||||
if: always()
|
|
||||||
run: |
|
|
||||||
docker ps --quiet | xargs --no-run-if-empty docker kill ||:
|
|
||||||
docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||:
|
|
||||||
sudo rm -fr "$TEMP_PATH"
|
|
109
.github/workflows/auto_releases.yml
vendored
Normal file
109
.github/workflows/auto_releases.yml
vendored
Normal file
@ -0,0 +1,109 @@
|
|||||||
|
name: AutoReleases
|
||||||
|
|
||||||
|
env:
|
||||||
|
PYTHONUNBUFFERED: 1
|
||||||
|
|
||||||
|
concurrency:
|
||||||
|
group: autoreleases
|
||||||
|
|
||||||
|
on:
|
||||||
|
# schedule:
|
||||||
|
# - cron: '0 9 * * *'
|
||||||
|
workflow_dispatch:
|
||||||
|
inputs:
|
||||||
|
dry-run:
|
||||||
|
description: 'Dry run'
|
||||||
|
required: false
|
||||||
|
default: true
|
||||||
|
type: boolean
|
||||||
|
|
||||||
|
jobs:
|
||||||
|
AutoReleaseInfo:
|
||||||
|
runs-on: [self-hosted, style-checker-aarch64]
|
||||||
|
outputs:
|
||||||
|
data: ${{ steps.info.outputs.AUTO_RELEASE_PARAMS }}
|
||||||
|
dry_run: ${{ steps.info.outputs.DRY_RUN }}
|
||||||
|
steps:
|
||||||
|
- name: Debug Info
|
||||||
|
uses: ./.github/actions/debug
|
||||||
|
- name: Set envs
|
||||||
|
run: |
|
||||||
|
cat >> "$GITHUB_ENV" << 'EOF'
|
||||||
|
ROBOT_CLICKHOUSE_SSH_KEY<<RCSK
|
||||||
|
${{secrets.ROBOT_CLICKHOUSE_SSH_KEY}}
|
||||||
|
RCSK
|
||||||
|
EOF
|
||||||
|
echo "DRY_RUN=true" >> "$GITHUB_ENV"
|
||||||
|
- name: Check out repository code
|
||||||
|
uses: ClickHouse/checkout@v1
|
||||||
|
- name: Prepare Info
|
||||||
|
id: info
|
||||||
|
run: |
|
||||||
|
cd "$GITHUB_WORKSPACE/tests/ci"
|
||||||
|
python3 auto_release.py --prepare
|
||||||
|
echo "::group::Auto Release Info"
|
||||||
|
python3 -m json.tool /tmp/autorelease_info.json
|
||||||
|
echo "::endgroup::"
|
||||||
|
{
|
||||||
|
echo 'AUTO_RELEASE_PARAMS<<EOF'
|
||||||
|
cat /tmp/autorelease_info.json
|
||||||
|
echo 'EOF'
|
||||||
|
} >> "$GITHUB_ENV"
|
||||||
|
{
|
||||||
|
echo 'AUTO_RELEASE_PARAMS<<EOF'
|
||||||
|
cat /tmp/autorelease_info.json
|
||||||
|
echo 'EOF'
|
||||||
|
} >> "$GITHUB_OUTPUT"
|
||||||
|
echo "DRY_RUN=true" >> "$GITHUB_OUTPUT"
|
||||||
|
- name: Post Release Branch statuses
|
||||||
|
run: |
|
||||||
|
cd "$GITHUB_WORKSPACE/tests/ci"
|
||||||
|
python3 auto_release.py --post-status
|
||||||
|
- name: Clean up
|
||||||
|
uses: ./.github/actions/clean
|
||||||
|
|
||||||
|
Release_0:
|
||||||
|
needs: AutoReleaseInfo
|
||||||
|
name: Release ${{ fromJson(needs.AutoReleaseInfo.outputs.data).releases[0].release_branch }}
|
||||||
|
if: ${{ fromJson(needs.AutoReleaseInfo.outputs.data).releases[0] && fromJson(needs.AutoReleaseInfo.outputs.data).releases[0].ready }}
|
||||||
|
uses: ./.github/workflows/create_release.yml
|
||||||
|
with:
|
||||||
|
ref: ${{ fromJson(needs.AutoReleaseInfo.outputs.data).releases[0].commit_sha }}
|
||||||
|
type: patch
|
||||||
|
dry-run: ${{ needs.AutoReleaseInfo.outputs.dry_run }}
|
||||||
|
#
|
||||||
|
# Release_1:
|
||||||
|
# needs: [AutoReleaseInfo, Release_0]
|
||||||
|
# name: Release ${{ fromJson(needs.AutoReleaseInfo.outputs.data).releases[1].release_branch }}
|
||||||
|
# if: ${{ fromJson(needs.AutoReleaseInfo.outputs.data).releases[1] && fromJson(needs.AutoReleaseInfo.outputs.data).releases[1].ready }}
|
||||||
|
# uses: ./.github/workflows/create_release.yml
|
||||||
|
# with:
|
||||||
|
# ref: ${{ fromJson(needs.AutoReleaseInfo.outputs.data).releases[1].commit_sha }}
|
||||||
|
# type: patch
|
||||||
|
# dry-run: ${{ env.DRY_RUN }}
|
||||||
|
#
|
||||||
|
# Release_2:
|
||||||
|
# needs: [AutoReleaseInfo, Release_1]
|
||||||
|
# name: Release ${{ fromJson(needs.AutoReleaseInfo.outputs.data).releases[2].release_branch }}
|
||||||
|
# if: ${{ fromJson(needs.AutoReleaseInfo.outputs.data).releases[0] && fromJson(needs.AutoReleaseInfo.outputs.data).releases[2].ready }}
|
||||||
|
# uses: ./.github/workflow/create_release.yml
|
||||||
|
# with:
|
||||||
|
# ref: ${{ fromJson(needs.AutoReleaseInfo.outputs.data).releases[0].commit_sha }}
|
||||||
|
# type: patch
|
||||||
|
# dry-run: ${{ env.DRY_RUN }}
|
||||||
|
#
|
||||||
|
# Release_3:
|
||||||
|
# needs: [AutoReleaseInfo, Release_2]
|
||||||
|
# name: Release ${{ fromJson(needs.AutoReleaseInfo.outputs.data).releases[3].release_branch }}
|
||||||
|
# if: ${{ fromJson(needs.AutoReleaseInfo.outputs.data).releases[3] && fromJson(needs.AutoReleaseInfo.outputs.data).releases[3].ready }}
|
||||||
|
# uses: ./.github/workflow/create_release.yml
|
||||||
|
# with:
|
||||||
|
# ref: ${{ fromJson(needs.AutoReleaseInfo.outputs.data).releases[3].commit_sha }}
|
||||||
|
# type: patch
|
||||||
|
# dry-run: ${{ env.DRY_RUN }}
|
||||||
|
|
||||||
|
# - name: Post Slack Message
|
||||||
|
# if: ${{ !cancelled() }}
|
||||||
|
# run: |
|
||||||
|
# cd "$GITHUB_WORKSPACE/tests/ci"
|
||||||
|
# python3 auto_release.py --post-auto-release-complete --wf-status ${{ job.status }}
|
18
.github/workflows/backport_branches.yml
vendored
18
.github/workflows/backport_branches.yml
vendored
@ -241,8 +241,9 @@ jobs:
|
|||||||
runner_type: stress-tester
|
runner_type: stress-tester
|
||||||
data: ${{ needs.RunConfig.outputs.data }}
|
data: ${{ needs.RunConfig.outputs.data }}
|
||||||
FinishCheck:
|
FinishCheck:
|
||||||
if: ${{ !failure() && !cancelled() }}
|
if: ${{ !cancelled() }}
|
||||||
needs:
|
needs:
|
||||||
|
- RunConfig
|
||||||
- Builds_Report
|
- Builds_Report
|
||||||
- FunctionalStatelessTestAsan
|
- FunctionalStatelessTestAsan
|
||||||
- FunctionalStatefulTestDebug
|
- FunctionalStatefulTestDebug
|
||||||
@ -257,10 +258,23 @@ jobs:
|
|||||||
with:
|
with:
|
||||||
clear-repository: true
|
clear-repository: true
|
||||||
- name: Finish label
|
- name: Finish label
|
||||||
|
if: ${{ !failure() }}
|
||||||
run: |
|
run: |
|
||||||
|
export WORKFLOW_RESULT_FILE="/tmp/workflow_results.json"
|
||||||
|
cat > "$WORKFLOW_RESULT_FILE" << 'EOF'
|
||||||
|
${{ toJson(needs) }}
|
||||||
|
EOF
|
||||||
cd "$GITHUB_WORKSPACE/tests/ci"
|
cd "$GITHUB_WORKSPACE/tests/ci"
|
||||||
# update mergeable check
|
# update mergeable check
|
||||||
python3 merge_pr.py --set-ci-status --wf-status ${{ contains(needs.*.result, 'failure') && 'failure' || 'success' }}
|
python3 merge_pr.py --set-ci-status
|
||||||
# update overall ci report
|
# update overall ci report
|
||||||
python3 finish_check.py --wf-status ${{ contains(needs.*.result, 'failure') && 'failure' || 'success' }}
|
python3 finish_check.py --wf-status ${{ contains(needs.*.result, 'failure') && 'failure' || 'success' }}
|
||||||
python3 merge_pr.py
|
python3 merge_pr.py
|
||||||
|
- name: Check Workflow results
|
||||||
|
if: ${{ !cancelled() }}
|
||||||
|
run: |
|
||||||
|
export WORKFLOW_RESULT_FILE="/tmp/workflow_results.json"
|
||||||
|
cat > "$WORKFLOW_RESULT_FILE" << 'EOF'
|
||||||
|
${{ toJson(needs) }}
|
||||||
|
EOF
|
||||||
|
python3 ./tests/ci/ci_buddy.py --check-wf-status
|
||||||
|
154
.github/workflows/create_release.yml
vendored
154
.github/workflows/create_release.yml
vendored
@ -17,10 +17,35 @@ concurrency:
|
|||||||
options:
|
options:
|
||||||
- patch
|
- patch
|
||||||
- new
|
- new
|
||||||
|
only-repo:
|
||||||
|
description: 'Run only repos updates including docker (repo-recovery, tests)'
|
||||||
|
required: false
|
||||||
|
default: false
|
||||||
|
type: boolean
|
||||||
dry-run:
|
dry-run:
|
||||||
description: 'Dry run'
|
description: 'Dry run'
|
||||||
required: false
|
required: false
|
||||||
default: true
|
default: false
|
||||||
|
type: boolean
|
||||||
|
workflow_call:
|
||||||
|
inputs:
|
||||||
|
ref:
|
||||||
|
description: 'Git reference (branch or commit sha) from which to create the release'
|
||||||
|
required: true
|
||||||
|
type: string
|
||||||
|
type:
|
||||||
|
description: 'The type of release: "new" for a new release or "patch" for a patch release'
|
||||||
|
required: true
|
||||||
|
type: string
|
||||||
|
only-repo:
|
||||||
|
description: 'Run only repos updates including docker (repo-recovery, tests)'
|
||||||
|
required: false
|
||||||
|
default: false
|
||||||
|
type: boolean
|
||||||
|
dry-run:
|
||||||
|
description: 'Dry run'
|
||||||
|
required: false
|
||||||
|
default: false
|
||||||
type: boolean
|
type: boolean
|
||||||
|
|
||||||
jobs:
|
jobs:
|
||||||
@ -31,54 +56,62 @@ jobs:
|
|||||||
steps:
|
steps:
|
||||||
- name: DebugInfo
|
- name: DebugInfo
|
||||||
uses: hmarr/debug-action@f7318c783045ac39ed9bb497e22ce835fdafbfe6
|
uses: hmarr/debug-action@f7318c783045ac39ed9bb497e22ce835fdafbfe6
|
||||||
- name: Set envs
|
|
||||||
# https://docs.github.com/en/actions/learn-github-actions/workflow-commands-for-github-actions#multiline-strings
|
|
||||||
run: |
|
|
||||||
cat >> "$GITHUB_ENV" << 'EOF'
|
|
||||||
ROBOT_CLICKHOUSE_SSH_KEY<<RCSK
|
|
||||||
${{secrets.ROBOT_CLICKHOUSE_SSH_KEY}}
|
|
||||||
RCSK
|
|
||||||
RELEASE_INFO_FILE=${{ runner.temp }}/release_info.json
|
|
||||||
EOF
|
|
||||||
- name: Check out repository code
|
- name: Check out repository code
|
||||||
uses: ClickHouse/checkout@v1
|
uses: ClickHouse/checkout@v1
|
||||||
with:
|
with:
|
||||||
token: ${{secrets.ROBOT_CLICKHOUSE_COMMIT_TOKEN}}
|
token: ${{secrets.ROBOT_CLICKHOUSE_COMMIT_TOKEN}}
|
||||||
fetch-depth: 0
|
fetch-depth: 0
|
||||||
- name: Prepare Release Info
|
- name: Prepare Release Info
|
||||||
|
shell: bash
|
||||||
run: |
|
run: |
|
||||||
|
if [ ${{ inputs.only-repo }} == "true" ]; then
|
||||||
|
git tag -l ${{ inputs.ref }} || { echo "With only-repo option ref must be a valid release tag"; exit 1; }
|
||||||
|
fi
|
||||||
python3 ./tests/ci/create_release.py --prepare-release-info \
|
python3 ./tests/ci/create_release.py --prepare-release-info \
|
||||||
--ref ${{ inputs.ref }} --release-type ${{ inputs.type }} \
|
--ref ${{ inputs.ref }} --release-type ${{ inputs.type }} \
|
||||||
--outfile ${{ env.RELEASE_INFO_FILE }} ${{ inputs.dry-run && '--dry-run' || '' }}
|
${{ inputs.dry-run == true && '--dry-run' || '' }} \
|
||||||
|
${{ inputs.only-repo == true && '--skip-tag-check' || '' }}
|
||||||
echo "::group::Release Info"
|
echo "::group::Release Info"
|
||||||
python3 -m json.tool "$RELEASE_INFO_FILE"
|
python3 -m json.tool /tmp/release_info.json
|
||||||
echo "::endgroup::"
|
echo "::endgroup::"
|
||||||
release_tag=$(jq -r '.release_tag' "$RELEASE_INFO_FILE")
|
release_tag=$(jq -r '.release_tag' /tmp/release_info.json)
|
||||||
commit_sha=$(jq -r '.commit_sha' "$RELEASE_INFO_FILE")
|
commit_sha=$(jq -r '.commit_sha' /tmp/release_info.json)
|
||||||
|
is_latest=$(jq -r '.latest' /tmp/release_info.json)
|
||||||
echo "Release Tag: $release_tag"
|
echo "Release Tag: $release_tag"
|
||||||
echo "RELEASE_TAG=$release_tag" >> "$GITHUB_ENV"
|
echo "RELEASE_TAG=$release_tag" >> "$GITHUB_ENV"
|
||||||
echo "COMMIT_SHA=$commit_sha" >> "$GITHUB_ENV"
|
echo "COMMIT_SHA=$commit_sha" >> "$GITHUB_ENV"
|
||||||
|
if [ "$is_latest" == "true" ]; then
|
||||||
|
echo "DOCKER_TAG_TYPE=release-latest" >> "$GITHUB_ENV"
|
||||||
|
else
|
||||||
|
echo "DOCKER_TAG_TYPE=release" >> "$GITHUB_ENV"
|
||||||
|
fi
|
||||||
- name: Download All Release Artifacts
|
- name: Download All Release Artifacts
|
||||||
if: ${{ inputs.type == 'patch' }}
|
if: ${{ inputs.type == 'patch' }}
|
||||||
|
shell: bash
|
||||||
run: |
|
run: |
|
||||||
python3 ./tests/ci/create_release.py --infile "$RELEASE_INFO_FILE" --download-packages ${{ inputs.dry-run && '--dry-run' || '' }}
|
python3 ./tests/ci/create_release.py --download-packages ${{ inputs.dry-run == true && '--dry-run' || '' }}
|
||||||
- name: Push Git Tag for the Release
|
- name: Push Git Tag for the Release
|
||||||
|
if: ${{ ! inputs.only-repo }}
|
||||||
|
shell: bash
|
||||||
run: |
|
run: |
|
||||||
python3 ./tests/ci/create_release.py --push-release-tag --infile "$RELEASE_INFO_FILE" ${{ inputs.dry-run && '--dry-run' || '' }}
|
python3 ./tests/ci/create_release.py --push-release-tag ${{ inputs.dry-run == true && '--dry-run' || '' }}
|
||||||
- name: Push New Release Branch
|
- name: Push New Release Branch
|
||||||
if: ${{ inputs.type == 'new' }}
|
if: ${{ inputs.type == 'new' && ! inputs.only-repo }}
|
||||||
|
shell: bash
|
||||||
run: |
|
run: |
|
||||||
python3 ./tests/ci/create_release.py --push-new-release-branch --infile "$RELEASE_INFO_FILE" ${{ inputs.dry-run && '--dry-run' || '' }}
|
python3 ./tests/ci/create_release.py --push-new-release-branch ${{ inputs.dry-run == true && '--dry-run' || '' }}
|
||||||
- name: Bump CH Version and Update Contributors' List
|
- name: Bump CH Version and Update Contributors' List
|
||||||
|
if: ${{ ! inputs.only-repo }}
|
||||||
|
shell: bash
|
||||||
run: |
|
run: |
|
||||||
python3 ./tests/ci/create_release.py --create-bump-version-pr --infile "$RELEASE_INFO_FILE" ${{ inputs.dry-run && '--dry-run' || '' }}
|
python3 ./tests/ci/create_release.py --create-bump-version-pr ${{ inputs.dry-run == true && '--dry-run' || '' }}
|
||||||
- name: Checkout master
|
|
||||||
run: |
|
|
||||||
git checkout master
|
|
||||||
- name: Bump Docker versions, Changelog, Security
|
- name: Bump Docker versions, Changelog, Security
|
||||||
if: ${{ inputs.type == 'patch' }}
|
if: ${{ inputs.type == 'patch' && ! inputs.only-repo }}
|
||||||
|
shell: bash
|
||||||
run: |
|
run: |
|
||||||
[ "$(git branch --show-current)" != "master" ] && echo "not on the master" && exit 1
|
python3 ./tests/ci/create_release.py --set-progress-started --progress "update changelog, docker version, security"
|
||||||
|
|
||||||
|
git checkout master # in case WF started from feature branch
|
||||||
echo "List versions"
|
echo "List versions"
|
||||||
./utils/list-versions/list-versions.sh > ./utils/list-versions/version_date.tsv
|
./utils/list-versions/list-versions.sh > ./utils/list-versions/version_date.tsv
|
||||||
echo "Update docker version"
|
echo "Update docker version"
|
||||||
@ -86,16 +119,18 @@ jobs:
|
|||||||
echo "Generate ChangeLog"
|
echo "Generate ChangeLog"
|
||||||
export CI=1
|
export CI=1
|
||||||
docker run -u "${UID}:${GID}" -e PYTHONUNBUFFERED=1 -e CI=1 --network=host \
|
docker run -u "${UID}:${GID}" -e PYTHONUNBUFFERED=1 -e CI=1 --network=host \
|
||||||
--volume=".:/ClickHouse" clickhouse/style-test \
|
--volume=".:/wd" --workdir="/wd" \
|
||||||
/ClickHouse/tests/ci/changelog.py -v --debug-helpers \
|
clickhouse/style-test \
|
||||||
--gh-user-or-token="$GH_TOKEN" --jobs=5 \
|
./tests/ci/changelog.py -v --debug-helpers \
|
||||||
--output="/ClickHouse/docs/changelogs/${{ env.RELEASE_TAG }}.md" ${{ env.RELEASE_TAG }}
|
--gh-user-or-token ${{ secrets.ROBOT_CLICKHOUSE_COMMIT_TOKEN }} \
|
||||||
|
--jobs=5 \
|
||||||
|
--output="./docs/changelogs/${{ env.RELEASE_TAG }}.md" ${{ env.RELEASE_TAG }}
|
||||||
git add ./docs/changelogs/${{ env.RELEASE_TAG }}.md
|
git add ./docs/changelogs/${{ env.RELEASE_TAG }}.md
|
||||||
echo "Generate Security"
|
echo "Generate Security"
|
||||||
python3 ./utils/security-generator/generate_security.py > SECURITY.md
|
python3 ./utils/security-generator/generate_security.py > SECURITY.md
|
||||||
git diff HEAD
|
git diff HEAD
|
||||||
- name: Create ChangeLog PR
|
- name: Create ChangeLog PR
|
||||||
if: ${{ inputs.type == 'patch' && ! inputs.dry-run }}
|
if: ${{ inputs.type == 'patch' && ! inputs.dry-run && ! inputs.only-repo }}
|
||||||
uses: peter-evans/create-pull-request@v6
|
uses: peter-evans/create-pull-request@v6
|
||||||
with:
|
with:
|
||||||
author: "robot-clickhouse <robot-clickhouse@users.noreply.github.com>"
|
author: "robot-clickhouse <robot-clickhouse@users.noreply.github.com>"
|
||||||
@ -103,6 +138,7 @@ jobs:
|
|||||||
committer: "robot-clickhouse <robot-clickhouse@users.noreply.github.com>"
|
committer: "robot-clickhouse <robot-clickhouse@users.noreply.github.com>"
|
||||||
commit-message: Update version_date.tsv and changelogs after ${{ env.RELEASE_TAG }}
|
commit-message: Update version_date.tsv and changelogs after ${{ env.RELEASE_TAG }}
|
||||||
branch: auto/${{ env.RELEASE_TAG }}
|
branch: auto/${{ env.RELEASE_TAG }}
|
||||||
|
base: master
|
||||||
assignees: ${{ github.event.sender.login }} # assign the PR to the tag pusher
|
assignees: ${{ github.event.sender.login }} # assign the PR to the tag pusher
|
||||||
delete-branch: true
|
delete-branch: true
|
||||||
title: Update version_date.tsv and changelog after ${{ env.RELEASE_TAG }}
|
title: Update version_date.tsv and changelog after ${{ env.RELEASE_TAG }}
|
||||||
@ -111,56 +147,78 @@ jobs:
|
|||||||
Update version_date.tsv and changelogs after ${{ env.RELEASE_TAG }}
|
Update version_date.tsv and changelogs after ${{ env.RELEASE_TAG }}
|
||||||
### Changelog category (leave one):
|
### Changelog category (leave one):
|
||||||
- Not for changelog (changelog entry is not required)
|
- Not for changelog (changelog entry is not required)
|
||||||
- name: Reset changes if Dry-run
|
- name: Complete previous steps and Restore git state
|
||||||
if: ${{ inputs.dry-run }}
|
if: ${{ inputs.type == 'patch' && ! inputs.only-repo }}
|
||||||
|
shell: bash
|
||||||
run: |
|
run: |
|
||||||
git reset --hard HEAD
|
git reset --hard HEAD
|
||||||
- name: Checkout back to GITHUB_REF
|
|
||||||
run: |
|
|
||||||
git checkout "$GITHUB_REF_NAME"
|
git checkout "$GITHUB_REF_NAME"
|
||||||
|
python3 ./tests/ci/create_release.py --set-progress-completed
|
||||||
- name: Create GH Release
|
- name: Create GH Release
|
||||||
if: ${{ inputs.type == 'patch' }}
|
if: ${{ inputs.type == 'patch' && ! inputs.only-repo }}
|
||||||
|
shell: bash
|
||||||
run: |
|
run: |
|
||||||
python3 ./tests/ci/create_release.py --create-gh-release \
|
python3 ./tests/ci/create_release.py --create-gh-release ${{ inputs.dry-run == true && '--dry-run' || '' }}
|
||||||
--infile ${{ env.RELEASE_INFO_FILE }} ${{ inputs.dry-run && '--dry-run' || '' }}
|
|
||||||
|
|
||||||
- name: Export TGZ Packages
|
- name: Export TGZ Packages
|
||||||
if: ${{ inputs.type == 'patch' }}
|
if: ${{ inputs.type == 'patch' }}
|
||||||
|
shell: bash
|
||||||
run: |
|
run: |
|
||||||
python3 ./tests/ci/artifactory.py --export-tgz --infile ${{ env.RELEASE_INFO_FILE }} ${{ inputs.dry-run && '--dry-run' || '' }}
|
python3 ./tests/ci/artifactory.py --export-tgz ${{ inputs.dry-run == true && '--dry-run' || '' }}
|
||||||
- name: Test TGZ Packages
|
- name: Test TGZ Packages
|
||||||
if: ${{ inputs.type == 'patch' }}
|
if: ${{ inputs.type == 'patch' }}
|
||||||
|
shell: bash
|
||||||
run: |
|
run: |
|
||||||
python3 ./tests/ci/artifactory.py --test-tgz --infile ${{ env.RELEASE_INFO_FILE }} ${{ inputs.dry-run && '--dry-run' || '' }}
|
python3 ./tests/ci/artifactory.py --test-tgz ${{ inputs.dry-run == true && '--dry-run' || '' }}
|
||||||
- name: Export RPM Packages
|
- name: Export RPM Packages
|
||||||
if: ${{ inputs.type == 'patch' }}
|
if: ${{ inputs.type == 'patch' }}
|
||||||
|
shell: bash
|
||||||
run: |
|
run: |
|
||||||
python3 ./tests/ci/artifactory.py --export-rpm --infile ${{ env.RELEASE_INFO_FILE }} ${{ inputs.dry-run && '--dry-run' || '' }}
|
python3 ./tests/ci/artifactory.py --export-rpm ${{ inputs.dry-run == true && '--dry-run' || '' }}
|
||||||
- name: Test RPM Packages
|
- name: Test RPM Packages
|
||||||
if: ${{ inputs.type == 'patch' }}
|
if: ${{ inputs.type == 'patch' }}
|
||||||
|
shell: bash
|
||||||
run: |
|
run: |
|
||||||
python3 ./tests/ci/artifactory.py --test-rpm --infile ${{ env.RELEASE_INFO_FILE }} ${{ inputs.dry-run && '--dry-run' || '' }}
|
python3 ./tests/ci/artifactory.py --test-rpm ${{ inputs.dry-run == true && '--dry-run' || '' }}
|
||||||
- name: Export Debian Packages
|
- name: Export Debian Packages
|
||||||
if: ${{ inputs.type == 'patch' }}
|
if: ${{ inputs.type == 'patch' }}
|
||||||
|
shell: bash
|
||||||
run: |
|
run: |
|
||||||
python3 ./tests/ci/artifactory.py --export-debian --infile ${{ env.RELEASE_INFO_FILE }} ${{ inputs.dry-run && '--dry-run' || '' }}
|
python3 ./tests/ci/artifactory.py --export-debian ${{ inputs.dry-run == true && '--dry-run' || '' }}
|
||||||
- name: Test Debian Packages
|
- name: Test Debian Packages
|
||||||
if: ${{ inputs.type == 'patch' }}
|
if: ${{ inputs.type == 'patch' }}
|
||||||
|
shell: bash
|
||||||
run: |
|
run: |
|
||||||
python3 ./tests/ci/artifactory.py --test-debian --infile ${{ env.RELEASE_INFO_FILE }} ${{ inputs.dry-run && '--dry-run' || '' }}
|
python3 ./tests/ci/artifactory.py --test-debian ${{ inputs.dry-run == true && '--dry-run' || '' }}
|
||||||
- name: Docker clickhouse/clickhouse-server building
|
- name: Docker clickhouse/clickhouse-server building
|
||||||
if: ${{ inputs.type == 'patch' }}
|
if: ${{ inputs.type == 'patch' }}
|
||||||
|
shell: bash
|
||||||
run: |
|
run: |
|
||||||
cd "./tests/ci"
|
cd "./tests/ci"
|
||||||
|
python3 ./create_release.py --set-progress-started --progress "docker server release"
|
||||||
export CHECK_NAME="Docker server image"
|
export CHECK_NAME="Docker server image"
|
||||||
python3 docker_server.py --release-type auto --version ${{ env.RELEASE_TAG }} --check-name "$CHECK_NAME" --sha ${{ env.COMMIT_SHA }} ${{ ! inputs.dry-run && '--push' || '' }}
|
python3 docker_server.py --tag-type ${{ env.DOCKER_TAG_TYPE }} --version ${{ env.RELEASE_TAG }} --check-name "$CHECK_NAME" --sha ${{ env.COMMIT_SHA }} ${{ ! inputs.dry-run && '--push' || '' }}
|
||||||
|
python3 ./create_release.py --set-progress-completed
|
||||||
- name: Docker clickhouse/clickhouse-keeper building
|
- name: Docker clickhouse/clickhouse-keeper building
|
||||||
if: ${{ inputs.type == 'patch' }}
|
if: ${{ inputs.type == 'patch' }}
|
||||||
|
shell: bash
|
||||||
run: |
|
run: |
|
||||||
cd "./tests/ci"
|
cd "./tests/ci"
|
||||||
|
python3 ./create_release.py --set-progress-started --progress "docker keeper release"
|
||||||
export CHECK_NAME="Docker keeper image"
|
export CHECK_NAME="Docker keeper image"
|
||||||
python3 docker_server.py --release-type auto --version ${{ env.RELEASE_TAG }} --check-name "$CHECK_NAME" --sha ${{ env.COMMIT_SHA }} ${{ ! inputs.dry-run && '--push' || '' }}
|
python3 docker_server.py --tag-type ${{ env.DOCKER_TAG_TYPE }} --version ${{ env.RELEASE_TAG }} --check-name "$CHECK_NAME" --sha ${{ env.COMMIT_SHA }} ${{ ! inputs.dry-run && '--push' || '' }}
|
||||||
- name: Post Slack Message
|
python3 ./create_release.py --set-progress-completed
|
||||||
if: always()
|
- name: Update release info. Merge created PRs
|
||||||
|
shell: bash
|
||||||
run: |
|
run: |
|
||||||
echo Slack Message
|
python3 ./tests/ci/create_release.py --merge-prs ${{ inputs.dry-run == true && '--dry-run' || '' }}
|
||||||
|
- name: Set current Release progress to Completed with OK
|
||||||
|
shell: bash
|
||||||
|
run: |
|
||||||
|
# dummy stage to finalize release info with "progress: completed; status: OK"
|
||||||
|
python3 ./tests/ci/create_release.py --set-progress-started --progress "completed"
|
||||||
|
python3 ./tests/ci/create_release.py --set-progress-completed
|
||||||
|
- name: Post Slack Message
|
||||||
|
if: ${{ !cancelled() }}
|
||||||
|
shell: bash
|
||||||
|
run: |
|
||||||
|
python3 ./tests/ci/create_release.py --post-status ${{ inputs.dry-run == true && '--dry-run' || '' }}
|
||||||
|
70
.github/workflows/jepsen.yml
vendored
70
.github/workflows/jepsen.yml
vendored
@ -9,19 +9,65 @@ on: # yamllint disable-line rule:truthy
|
|||||||
- cron: '0 */6 * * *'
|
- cron: '0 */6 * * *'
|
||||||
workflow_dispatch:
|
workflow_dispatch:
|
||||||
jobs:
|
jobs:
|
||||||
|
RunConfig:
|
||||||
|
runs-on: [self-hosted, style-checker-aarch64]
|
||||||
|
outputs:
|
||||||
|
data: ${{ steps.runconfig.outputs.CI_DATA }}
|
||||||
|
steps:
|
||||||
|
- name: DebugInfo
|
||||||
|
uses: hmarr/debug-action@f7318c783045ac39ed9bb497e22ce835fdafbfe6
|
||||||
|
- name: Check out repository code
|
||||||
|
uses: ClickHouse/checkout@v1
|
||||||
|
with:
|
||||||
|
clear-repository: true # to ensure correct digests
|
||||||
|
fetch-depth: 0 # to get version
|
||||||
|
filter: tree:0
|
||||||
|
- name: PrepareRunConfig
|
||||||
|
id: runconfig
|
||||||
|
run: |
|
||||||
|
echo "::group::configure CI run"
|
||||||
|
python3 "$GITHUB_WORKSPACE/tests/ci/ci.py" --configure --workflow "$GITHUB_WORKFLOW" --outfile ${{ runner.temp }}/ci_run_data.json
|
||||||
|
echo "::endgroup::"
|
||||||
|
|
||||||
|
echo "::group::CI run configure results"
|
||||||
|
python3 -m json.tool ${{ runner.temp }}/ci_run_data.json
|
||||||
|
echo "::endgroup::"
|
||||||
|
{
|
||||||
|
echo 'CI_DATA<<EOF'
|
||||||
|
cat ${{ runner.temp }}/ci_run_data.json
|
||||||
|
echo 'EOF'
|
||||||
|
} >> "$GITHUB_OUTPUT"
|
||||||
KeeperJepsenRelease:
|
KeeperJepsenRelease:
|
||||||
uses: ./.github/workflows/reusable_simple_job.yml
|
needs: [RunConfig]
|
||||||
|
uses: ./.github/workflows/reusable_test.yml
|
||||||
with:
|
with:
|
||||||
test_name: Jepsen keeper check
|
test_name: ClickHouse Keeper Jepsen
|
||||||
runner_type: style-checker
|
runner_type: style-checker-aarch64
|
||||||
report_required: true
|
data: ${{ needs.RunConfig.outputs.data }}
|
||||||
run_command: |
|
run_command: |
|
||||||
python3 jepsen_check.py keeper
|
python3 jepsen_check.py keeper
|
||||||
# ServerJepsenRelease:
|
ServerJepsenRelease:
|
||||||
# uses: ./.github/workflows/reusable_simple_job.yml
|
if: false # skip for server
|
||||||
# with:
|
needs: [RunConfig]
|
||||||
# test_name: Jepsen server check
|
uses: ./.github/workflows/reusable_test.yml
|
||||||
# runner_type: style-checker
|
with:
|
||||||
# run_command: |
|
test_name: ClickHouse Server Jepsen
|
||||||
# cd "$REPO_COPY/tests/ci"
|
runner_type: style-checker-aarch64
|
||||||
# python3 jepsen_check.py server
|
data: ${{ needs.RunConfig.outputs.data }}
|
||||||
|
run_command: |
|
||||||
|
python3 jepsen_check.py server
|
||||||
|
CheckWorkflow:
|
||||||
|
if: ${{ !cancelled() }}
|
||||||
|
needs: [RunConfig, ServerJepsenRelease, KeeperJepsenRelease]
|
||||||
|
runs-on: [self-hosted, style-checker-aarch64]
|
||||||
|
steps:
|
||||||
|
- name: Check out repository code
|
||||||
|
uses: ClickHouse/checkout@v1
|
||||||
|
- name: Check Workflow results
|
||||||
|
if: ${{ !cancelled() }}
|
||||||
|
run: |
|
||||||
|
export WORKFLOW_RESULT_FILE="/tmp/workflow_results.json"
|
||||||
|
cat > "$WORKFLOW_RESULT_FILE" << 'EOF'
|
||||||
|
${{ toJson(needs) }}
|
||||||
|
EOF
|
||||||
|
python3 ./tests/ci/ci_buddy.py --check-wf-status
|
||||||
|
56
.github/workflows/master.yml
vendored
56
.github/workflows/master.yml
vendored
@ -93,21 +93,21 @@ jobs:
|
|||||||
with:
|
with:
|
||||||
stage: Builds_2
|
stage: Builds_2
|
||||||
data: ${{ needs.RunConfig.outputs.data }}
|
data: ${{ needs.RunConfig.outputs.data }}
|
||||||
Tests_2:
|
Tests_2_ww:
|
||||||
needs: [RunConfig, Builds_2]
|
needs: [RunConfig, Builds_2]
|
||||||
|
if: ${{ !failure() && !cancelled() && contains(fromJson(needs.RunConfig.outputs.data).stages_data.stages_to_do, 'Tests_2_ww') }}
|
||||||
|
uses: ./.github/workflows/reusable_test_stage.yml
|
||||||
|
with:
|
||||||
|
stage: Tests_2_ww
|
||||||
|
data: ${{ needs.RunConfig.outputs.data }}
|
||||||
|
Tests_2:
|
||||||
|
# Test_3 should not wait for Test_1/Test_2 and should not be blocked by them on master branch since all jobs need to run there.
|
||||||
|
needs: [RunConfig, Builds_1]
|
||||||
if: ${{ !failure() && !cancelled() && contains(fromJson(needs.RunConfig.outputs.data).stages_data.stages_to_do, 'Tests_2') }}
|
if: ${{ !failure() && !cancelled() && contains(fromJson(needs.RunConfig.outputs.data).stages_data.stages_to_do, 'Tests_2') }}
|
||||||
uses: ./.github/workflows/reusable_test_stage.yml
|
uses: ./.github/workflows/reusable_test_stage.yml
|
||||||
with:
|
with:
|
||||||
stage: Tests_2
|
stage: Tests_2
|
||||||
data: ${{ needs.RunConfig.outputs.data }}
|
data: ${{ needs.RunConfig.outputs.data }}
|
||||||
Tests_3:
|
|
||||||
# Test_3 should not wait for Test_1/Test_2 and should not be blocked by them on master branch since all jobs need to run there.
|
|
||||||
needs: [RunConfig, Builds_1]
|
|
||||||
if: ${{ !failure() && !cancelled() && contains(fromJson(needs.RunConfig.outputs.data).stages_data.stages_to_do, 'Tests_3') }}
|
|
||||||
uses: ./.github/workflows/reusable_test_stage.yml
|
|
||||||
with:
|
|
||||||
stage: Tests_3
|
|
||||||
data: ${{ needs.RunConfig.outputs.data }}
|
|
||||||
|
|
||||||
################################# Reports #################################
|
################################# Reports #################################
|
||||||
# Reports should run even if Builds_1/2 fail - run them separately, not in Tests_1/2/3
|
# Reports should run even if Builds_1/2 fail - run them separately, not in Tests_1/2/3
|
||||||
@ -121,37 +121,9 @@ jobs:
|
|||||||
runner_type: style-checker-aarch64
|
runner_type: style-checker-aarch64
|
||||||
data: ${{ needs.RunConfig.outputs.data }}
|
data: ${{ needs.RunConfig.outputs.data }}
|
||||||
|
|
||||||
MarkReleaseReady:
|
|
||||||
if: ${{ !failure() && !cancelled() }}
|
|
||||||
needs: [RunConfig, Builds_1, Builds_2]
|
|
||||||
runs-on: [self-hosted, style-checker-aarch64]
|
|
||||||
steps:
|
|
||||||
- name: Debug
|
|
||||||
run: |
|
|
||||||
echo need with different filters
|
|
||||||
cat << 'EOF'
|
|
||||||
${{ toJSON(needs) }}
|
|
||||||
${{ toJSON(needs.*.result) }}
|
|
||||||
no failures ${{ !contains(needs.*.result, 'failure') }}
|
|
||||||
no skips ${{ !contains(needs.*.result, 'skipped') }}
|
|
||||||
no both ${{ !(contains(needs.*.result, 'skipped') || contains(needs.*.result, 'failure')) }}
|
|
||||||
EOF
|
|
||||||
- name: Not ready
|
|
||||||
# fail the job to be able to restart it
|
|
||||||
if: ${{ contains(needs.*.result, 'skipped') || contains(needs.*.result, 'failure') }}
|
|
||||||
run: exit 1
|
|
||||||
- name: Check out repository code
|
|
||||||
if: ${{ ! (contains(needs.*.result, 'skipped') || contains(needs.*.result, 'failure')) }}
|
|
||||||
uses: ClickHouse/checkout@v1
|
|
||||||
- name: Mark Commit Release Ready
|
|
||||||
if: ${{ ! (contains(needs.*.result, 'skipped') || contains(needs.*.result, 'failure')) }}
|
|
||||||
run: |
|
|
||||||
cd "$GITHUB_WORKSPACE/tests/ci"
|
|
||||||
python3 mark_release_ready.py
|
|
||||||
|
|
||||||
FinishCheck:
|
FinishCheck:
|
||||||
if: ${{ !cancelled() }}
|
if: ${{ !cancelled() }}
|
||||||
needs: [RunConfig, Builds_1, Builds_2, Builds_Report, Tests_1, Tests_2, Tests_3]
|
needs: [RunConfig, Builds_1, Builds_2, Builds_Report, Tests_1, Tests_2_ww, Tests_2]
|
||||||
runs-on: [self-hosted, style-checker-aarch64]
|
runs-on: [self-hosted, style-checker-aarch64]
|
||||||
steps:
|
steps:
|
||||||
- name: Check out repository code
|
- name: Check out repository code
|
||||||
@ -160,3 +132,11 @@ jobs:
|
|||||||
run: |
|
run: |
|
||||||
cd "$GITHUB_WORKSPACE/tests/ci"
|
cd "$GITHUB_WORKSPACE/tests/ci"
|
||||||
python3 finish_check.py --wf-status ${{ contains(needs.*.result, 'failure') && 'failure' || 'success' }}
|
python3 finish_check.py --wf-status ${{ contains(needs.*.result, 'failure') && 'failure' || 'success' }}
|
||||||
|
- name: Check Workflow results
|
||||||
|
if: ${{ !cancelled() }}
|
||||||
|
run: |
|
||||||
|
export WORKFLOW_RESULT_FILE="/tmp/workflow_results.json"
|
||||||
|
cat > "$WORKFLOW_RESULT_FILE" << 'EOF'
|
||||||
|
${{ toJson(needs) }}
|
||||||
|
EOF
|
||||||
|
python3 ./tests/ci/ci_buddy.py --check-wf-status
|
||||||
|
17
.github/workflows/merge_queue.yml
vendored
17
.github/workflows/merge_queue.yml
vendored
@ -93,7 +93,7 @@ jobs:
|
|||||||
data: ${{ needs.RunConfig.outputs.data }}
|
data: ${{ needs.RunConfig.outputs.data }}
|
||||||
|
|
||||||
CheckReadyForMerge:
|
CheckReadyForMerge:
|
||||||
if: ${{ !cancelled() && needs.StyleCheck.result == 'success' }}
|
if: ${{ !cancelled() }}
|
||||||
# Test_2 or Test_3 must not have jobs required for Mergeable check
|
# Test_2 or Test_3 must not have jobs required for Mergeable check
|
||||||
needs: [RunConfig, BuildDockers, StyleCheck, FastTest, Builds_1, Tests_1]
|
needs: [RunConfig, BuildDockers, StyleCheck, FastTest, Builds_1, Tests_1]
|
||||||
runs-on: [self-hosted, style-checker-aarch64]
|
runs-on: [self-hosted, style-checker-aarch64]
|
||||||
@ -101,6 +101,19 @@ jobs:
|
|||||||
- name: Check out repository code
|
- name: Check out repository code
|
||||||
uses: ClickHouse/checkout@v1
|
uses: ClickHouse/checkout@v1
|
||||||
- name: Check and set merge status
|
- name: Check and set merge status
|
||||||
|
if: ${{ needs.StyleCheck.result == 'success' }}
|
||||||
run: |
|
run: |
|
||||||
|
export WORKFLOW_RESULT_FILE="/tmp/workflow_results.json"
|
||||||
|
cat > "$WORKFLOW_RESULT_FILE" << 'EOF'
|
||||||
|
${{ toJson(needs) }}
|
||||||
|
EOF
|
||||||
cd "$GITHUB_WORKSPACE/tests/ci"
|
cd "$GITHUB_WORKSPACE/tests/ci"
|
||||||
python3 merge_pr.py --set-ci-status --wf-status ${{ contains(needs.*.result, 'failure') && 'failure' || 'success' }}
|
python3 merge_pr.py --set-ci-status
|
||||||
|
- name: Check Workflow results
|
||||||
|
if: ${{ !cancelled() }}
|
||||||
|
run: |
|
||||||
|
export WORKFLOW_RESULT_FILE="/tmp/workflow_results.json"
|
||||||
|
cat > "$WORKFLOW_RESULT_FILE" << 'EOF'
|
||||||
|
${{ toJson(needs) }}
|
||||||
|
EOF
|
||||||
|
python3 ./tests/ci/ci_buddy.py --check-wf-status
|
||||||
|
15
.github/workflows/nightly.yml
vendored
15
.github/workflows/nightly.yml
vendored
@ -44,3 +44,18 @@ jobs:
|
|||||||
with:
|
with:
|
||||||
data: "${{ needs.RunConfig.outputs.data }}"
|
data: "${{ needs.RunConfig.outputs.data }}"
|
||||||
set_latest: true
|
set_latest: true
|
||||||
|
CheckWorkflow:
|
||||||
|
if: ${{ !cancelled() }}
|
||||||
|
needs: [RunConfig, BuildDockers]
|
||||||
|
runs-on: [self-hosted, style-checker-aarch64]
|
||||||
|
steps:
|
||||||
|
- name: Check out repository code
|
||||||
|
uses: ClickHouse/checkout@v1
|
||||||
|
- name: Check Workflow results
|
||||||
|
if: ${{ !cancelled() }}
|
||||||
|
run: |
|
||||||
|
export WORKFLOW_RESULT_FILE="/tmp/workflow_results.json"
|
||||||
|
cat > "$WORKFLOW_RESULT_FILE" << 'EOF'
|
||||||
|
${{ toJson(needs) }}
|
||||||
|
EOF
|
||||||
|
python3 ./tests/ci/ci_buddy.py --check-wf-status
|
||||||
|
45
.github/workflows/pull_request.yml
vendored
45
.github/workflows/pull_request.yml
vendored
@ -123,27 +123,32 @@ jobs:
|
|||||||
stage: Builds_2
|
stage: Builds_2
|
||||||
data: ${{ needs.RunConfig.outputs.data }}
|
data: ${{ needs.RunConfig.outputs.data }}
|
||||||
# stage for running non-required checks without being blocked by required checks (Test_1) if corresponding settings is selected
|
# stage for running non-required checks without being blocked by required checks (Test_1) if corresponding settings is selected
|
||||||
Tests_2:
|
Tests_2_ww:
|
||||||
needs: [RunConfig, Builds_1]
|
needs: [RunConfig, Builds_1]
|
||||||
|
if: ${{ !failure() && !cancelled() && contains(fromJson(needs.RunConfig.outputs.data).stages_data.stages_to_do, 'Tests_2_ww') }}
|
||||||
|
uses: ./.github/workflows/reusable_test_stage.yml
|
||||||
|
with:
|
||||||
|
stage: Tests_2_ww
|
||||||
|
data: ${{ needs.RunConfig.outputs.data }}
|
||||||
|
Tests_2:
|
||||||
|
needs: [RunConfig, Builds_1, Tests_1]
|
||||||
if: ${{ !failure() && !cancelled() && contains(fromJson(needs.RunConfig.outputs.data).stages_data.stages_to_do, 'Tests_2') }}
|
if: ${{ !failure() && !cancelled() && contains(fromJson(needs.RunConfig.outputs.data).stages_data.stages_to_do, 'Tests_2') }}
|
||||||
uses: ./.github/workflows/reusable_test_stage.yml
|
uses: ./.github/workflows/reusable_test_stage.yml
|
||||||
with:
|
with:
|
||||||
stage: Tests_2
|
stage: Tests_2
|
||||||
data: ${{ needs.RunConfig.outputs.data }}
|
data: ${{ needs.RunConfig.outputs.data }}
|
||||||
Tests_3:
|
|
||||||
needs: [RunConfig, Builds_1, Tests_1]
|
|
||||||
if: ${{ !failure() && !cancelled() && contains(fromJson(needs.RunConfig.outputs.data).stages_data.stages_to_do, 'Tests_3') }}
|
|
||||||
uses: ./.github/workflows/reusable_test_stage.yml
|
|
||||||
with:
|
|
||||||
stage: Tests_3
|
|
||||||
data: ${{ needs.RunConfig.outputs.data }}
|
|
||||||
|
|
||||||
################################# Reports #################################
|
################################# Reports #################################
|
||||||
# Reports should run even if Builds_1/2 fail - run them separately (not in Tests_1/2/3)
|
# Reports should run even if Builds_1/2 fail - run them separately (not in Tests_1/2/3)
|
||||||
Builds_Report:
|
Builds_Report:
|
||||||
# run report check for failed builds to indicate the CI error
|
# run report check for failed builds to indicate the CI error
|
||||||
if: ${{ !cancelled() && needs.RunConfig.result == 'success' && contains(fromJson(needs.RunConfig.outputs.data).jobs_data.jobs_to_do, 'Builds') }}
|
if: ${{ !cancelled()
|
||||||
needs: [RunConfig, StyleCheck, Builds_1, Builds_2]
|
&& needs.RunConfig.result == 'success'
|
||||||
|
&& needs.StyleCheck.result != 'failure'
|
||||||
|
&& needs.FastTest.result != 'failure'
|
||||||
|
&& needs.BuildDockers.result != 'failure'
|
||||||
|
&& contains(fromJson(needs.RunConfig.outputs.data).jobs_data.jobs_to_do, 'Builds') }}
|
||||||
|
needs: [RunConfig, BuildDockers, StyleCheck, FastTest, Builds_1, Builds_2]
|
||||||
uses: ./.github/workflows/reusable_test.yml
|
uses: ./.github/workflows/reusable_test.yml
|
||||||
with:
|
with:
|
||||||
test_name: Builds
|
test_name: Builds
|
||||||
@ -151,9 +156,10 @@ jobs:
|
|||||||
data: ${{ needs.RunConfig.outputs.data }}
|
data: ${{ needs.RunConfig.outputs.data }}
|
||||||
|
|
||||||
CheckReadyForMerge:
|
CheckReadyForMerge:
|
||||||
if: ${{ !cancelled() && needs.StyleCheck.result == 'success' }}
|
if: ${{ !cancelled() }}
|
||||||
# Test_2 or Test_3 must not have jobs required for Mergeable check
|
# Test_2 or Test_3 do not have the jobs required for Mergeable check,
|
||||||
needs: [RunConfig, BuildDockers, StyleCheck, FastTest, Builds_1, Builds_2, Builds_Report, Tests_1]
|
# however, set them as "needs" to get all checks results before the automatic merge occurs.
|
||||||
|
needs: [RunConfig, BuildDockers, StyleCheck, FastTest, Builds_1, Builds_2, Builds_Report, Tests_1, Tests_2_ww, Tests_2]
|
||||||
runs-on: [self-hosted, style-checker-aarch64]
|
runs-on: [self-hosted, style-checker-aarch64]
|
||||||
steps:
|
steps:
|
||||||
- name: Check out repository code
|
- name: Check out repository code
|
||||||
@ -161,15 +167,24 @@ jobs:
|
|||||||
with:
|
with:
|
||||||
filter: tree:0
|
filter: tree:0
|
||||||
- name: Check and set merge status
|
- name: Check and set merge status
|
||||||
|
if: ${{ needs.StyleCheck.result == 'success' }}
|
||||||
run: |
|
run: |
|
||||||
cd "$GITHUB_WORKSPACE/tests/ci"
|
cd "$GITHUB_WORKSPACE/tests/ci"
|
||||||
python3 merge_pr.py --set-ci-status --wf-status ${{ contains(needs.*.result, 'failure') && 'failure' || 'success' }}
|
export WORKFLOW_RESULT_FILE="/tmp/workflow_results.json"
|
||||||
|
cat > "$WORKFLOW_RESULT_FILE" << 'EOF'
|
||||||
|
${{ toJson(needs) }}
|
||||||
|
EOF
|
||||||
|
python3 merge_pr.py --set-ci-status
|
||||||
|
- name: Check Workflow results
|
||||||
|
uses: ./.github/actions/check_workflow
|
||||||
|
with:
|
||||||
|
needs: ${{ toJson(needs) }}
|
||||||
|
|
||||||
################################# Stage Final #################################
|
################################# Stage Final #################################
|
||||||
#
|
#
|
||||||
FinishCheck:
|
FinishCheck:
|
||||||
if: ${{ !failure() && !cancelled() }}
|
if: ${{ !failure() && !cancelled() }}
|
||||||
needs: [RunConfig, BuildDockers, StyleCheck, FastTest, Builds_1, Builds_2, Builds_Report, Tests_1, Tests_2, Tests_3]
|
needs: [RunConfig, BuildDockers, StyleCheck, FastTest, Builds_1, Builds_2, Builds_Report, Tests_1, Tests_2_ww, Tests_2]
|
||||||
runs-on: [self-hosted, style-checker-aarch64]
|
runs-on: [self-hosted, style-checker-aarch64]
|
||||||
steps:
|
steps:
|
||||||
- name: Check out repository code
|
- name: Check out repository code
|
||||||
|
69
.github/workflows/release.yml
vendored
69
.github/workflows/release.yml
vendored
@ -1,69 +0,0 @@
|
|||||||
name: PublishedReleaseCI
|
|
||||||
# - Gets artifacts from S3
|
|
||||||
# - Sends it to JFROG Artifactory
|
|
||||||
# - Adds them to the release assets
|
|
||||||
|
|
||||||
on: # yamllint disable-line rule:truthy
|
|
||||||
release:
|
|
||||||
types:
|
|
||||||
- published
|
|
||||||
workflow_dispatch:
|
|
||||||
inputs:
|
|
||||||
tag:
|
|
||||||
description: 'Release tag'
|
|
||||||
required: true
|
|
||||||
type: string
|
|
||||||
|
|
||||||
jobs:
|
|
||||||
ReleasePublish:
|
|
||||||
runs-on: [self-hosted, style-checker]
|
|
||||||
steps:
|
|
||||||
- name: Set tag from input
|
|
||||||
if: github.event_name == 'workflow_dispatch'
|
|
||||||
run: |
|
|
||||||
echo "GITHUB_TAG=${{ github.event.inputs.tag }}" >> "$GITHUB_ENV"
|
|
||||||
- name: Set tag from REF
|
|
||||||
if: github.event_name == 'release'
|
|
||||||
run: |
|
|
||||||
echo "GITHUB_TAG=${GITHUB_REF#refs/tags/}" >> "$GITHUB_ENV"
|
|
||||||
- name: Deploy packages and assets
|
|
||||||
run: |
|
|
||||||
curl --silent --data '' --no-buffer \
|
|
||||||
'${{ secrets.PACKAGES_RELEASE_URL }}/release/'"${GITHUB_TAG}"'?binary=binary_darwin&binary=binary_darwin_aarch64&sync=true'
|
|
||||||
############################################################################################
|
|
||||||
##################################### Docker images #######################################
|
|
||||||
############################################################################################
|
|
||||||
DockerServerImages:
|
|
||||||
runs-on: [self-hosted, style-checker]
|
|
||||||
steps:
|
|
||||||
- name: Set tag from input
|
|
||||||
if: github.event_name == 'workflow_dispatch'
|
|
||||||
run: |
|
|
||||||
echo "GITHUB_TAG=${{ github.event.inputs.tag }}" >> "$GITHUB_ENV"
|
|
||||||
- name: Set tag from REF
|
|
||||||
if: github.event_name == 'release'
|
|
||||||
run: |
|
|
||||||
echo "GITHUB_TAG=${GITHUB_REF#refs/tags/}" >> "$GITHUB_ENV"
|
|
||||||
- name: Check out repository code
|
|
||||||
uses: ClickHouse/checkout@v1
|
|
||||||
with:
|
|
||||||
clear-repository: true
|
|
||||||
fetch-depth: 0 # otherwise we will have no version info
|
|
||||||
filter: tree:0
|
|
||||||
ref: ${{ env.GITHUB_TAG }}
|
|
||||||
- name: Check docker clickhouse/clickhouse-server building
|
|
||||||
run: |
|
|
||||||
cd "$GITHUB_WORKSPACE/tests/ci"
|
|
||||||
export CHECK_NAME="Docker server image"
|
|
||||||
python3 docker_server.py --release-type auto --version "$GITHUB_TAG" --check-name "$CHECK_NAME" --push
|
|
||||||
- name: Check docker clickhouse/clickhouse-keeper building
|
|
||||||
run: |
|
|
||||||
cd "$GITHUB_WORKSPACE/tests/ci"
|
|
||||||
export CHECK_NAME="Docker keeper image"
|
|
||||||
python3 docker_server.py --release-type auto --version "$GITHUB_TAG" --check-name "$CHECK_NAME" --push
|
|
||||||
- name: Cleanup
|
|
||||||
if: always()
|
|
||||||
run: |
|
|
||||||
docker ps --quiet | xargs --no-run-if-empty docker kill ||:
|
|
||||||
docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||:
|
|
||||||
sudo rm -fr "$TEMP_PATH"
|
|
17
.github/workflows/release_branches.yml
vendored
17
.github/workflows/release_branches.yml
vendored
@ -441,8 +441,9 @@ jobs:
|
|||||||
runner_type: stress-tester
|
runner_type: stress-tester
|
||||||
data: ${{ needs.RunConfig.outputs.data }}
|
data: ${{ needs.RunConfig.outputs.data }}
|
||||||
FinishCheck:
|
FinishCheck:
|
||||||
if: ${{ !failure() && !cancelled() }}
|
if: ${{ !cancelled() }}
|
||||||
needs:
|
needs:
|
||||||
|
- RunConfig
|
||||||
- DockerServerImage
|
- DockerServerImage
|
||||||
- DockerKeeperImage
|
- DockerKeeperImage
|
||||||
- Builds_Report
|
- Builds_Report
|
||||||
@ -478,9 +479,15 @@ jobs:
|
|||||||
with:
|
with:
|
||||||
clear-repository: true
|
clear-repository: true
|
||||||
- name: Finish label
|
- name: Finish label
|
||||||
|
if: ${{ !failure() }}
|
||||||
run: |
|
run: |
|
||||||
cd "$GITHUB_WORKSPACE/tests/ci"
|
|
||||||
# update mergeable check
|
|
||||||
python3 merge_pr.py --set-ci-status --wf-status ${{ contains(needs.*.result, 'failure') && 'failure' || 'success' }}
|
|
||||||
# update overall ci report
|
# update overall ci report
|
||||||
python3 finish_check.py --wf-status ${{ contains(needs.*.result, 'failure') && 'failure' || 'success' }}
|
python3 ./tests/ci/finish_check.py --wf-status ${{ contains(needs.*.result, 'failure') && 'failure' || 'success' }}
|
||||||
|
- name: Check Workflow results
|
||||||
|
if: ${{ !cancelled() }}
|
||||||
|
run: |
|
||||||
|
export WORKFLOW_RESULT_FILE="/tmp/workflow_results.json"
|
||||||
|
cat > "$WORKFLOW_RESULT_FILE" << 'EOF'
|
||||||
|
${{ toJson(needs) }}
|
||||||
|
EOF
|
||||||
|
python3 ./tests/ci/ci_buddy.py --check-wf-status
|
||||||
|
74
.github/workflows/tags_stable.yml
vendored
74
.github/workflows/tags_stable.yml
vendored
@ -1,74 +0,0 @@
|
|||||||
name: TagsStableWorkflow
|
|
||||||
# - Gets artifacts from S3
|
|
||||||
# - Sends it to JFROG Artifactory
|
|
||||||
# - Adds them to the release assets
|
|
||||||
|
|
||||||
env:
|
|
||||||
# Force the stdout and stderr streams to be unbuffered
|
|
||||||
PYTHONUNBUFFERED: 1
|
|
||||||
|
|
||||||
on: # yamllint disable-line rule:truthy
|
|
||||||
push:
|
|
||||||
tags:
|
|
||||||
- 'v*-prestable'
|
|
||||||
- 'v*-stable'
|
|
||||||
- 'v*-lts'
|
|
||||||
workflow_dispatch:
|
|
||||||
inputs:
|
|
||||||
tag:
|
|
||||||
description: 'Test tag'
|
|
||||||
required: true
|
|
||||||
type: string
|
|
||||||
|
|
||||||
|
|
||||||
jobs:
|
|
||||||
UpdateVersions:
|
|
||||||
runs-on: [self-hosted, style-checker]
|
|
||||||
steps:
|
|
||||||
- name: Set test tag
|
|
||||||
if: github.event_name == 'workflow_dispatch'
|
|
||||||
run: |
|
|
||||||
echo "GITHUB_TAG=${{ github.event.inputs.tag }}" >> "$GITHUB_ENV"
|
|
||||||
- name: Get tag name
|
|
||||||
if: github.event_name != 'workflow_dispatch'
|
|
||||||
run: |
|
|
||||||
echo "GITHUB_TAG=${GITHUB_REF#refs/tags/}" >> "$GITHUB_ENV"
|
|
||||||
- name: Check out repository code
|
|
||||||
uses: ClickHouse/checkout@v1
|
|
||||||
with:
|
|
||||||
ref: master
|
|
||||||
fetch-depth: 0
|
|
||||||
filter: tree:0
|
|
||||||
- name: Update versions, docker version, changelog, security
|
|
||||||
env:
|
|
||||||
GITHUB_TOKEN: ${{ secrets.ROBOT_CLICKHOUSE_COMMIT_TOKEN }}
|
|
||||||
run: |
|
|
||||||
./utils/list-versions/list-versions.sh > ./utils/list-versions/version_date.tsv
|
|
||||||
./utils/list-versions/update-docker-version.sh
|
|
||||||
GID=$(id -g "${UID}")
|
|
||||||
# --network=host and CI=1 are required for the S3 access from a container
|
|
||||||
docker run -u "${UID}:${GID}" -e PYTHONUNBUFFERED=1 -e CI=1 --network=host \
|
|
||||||
--volume="${GITHUB_WORKSPACE}:/ClickHouse" clickhouse/style-test \
|
|
||||||
/ClickHouse/tests/ci/changelog.py -v --debug-helpers \
|
|
||||||
--gh-user-or-token="$GITHUB_TOKEN" --jobs=5 \
|
|
||||||
--output="/ClickHouse/docs/changelogs/${GITHUB_TAG}.md" "${GITHUB_TAG}"
|
|
||||||
git add "./docs/changelogs/${GITHUB_TAG}.md"
|
|
||||||
python3 ./utils/security-generator/generate_security.py > SECURITY.md
|
|
||||||
git diff HEAD
|
|
||||||
- name: Create Pull Request
|
|
||||||
uses: peter-evans/create-pull-request@v6
|
|
||||||
with:
|
|
||||||
author: "robot-clickhouse <robot-clickhouse@users.noreply.github.com>"
|
|
||||||
token: ${{ secrets.ROBOT_CLICKHOUSE_COMMIT_TOKEN }}
|
|
||||||
committer: "robot-clickhouse <robot-clickhouse@users.noreply.github.com>"
|
|
||||||
commit-message: Update version_date.tsv and changelogs after ${{ env.GITHUB_TAG }}
|
|
||||||
branch: auto/${{ env.GITHUB_TAG }}
|
|
||||||
assignees: ${{ github.event.sender.login }} # assign the PR to the tag pusher
|
|
||||||
delete-branch: true
|
|
||||||
title: Update version_date.tsv and changelogs after ${{ env.GITHUB_TAG }}
|
|
||||||
labels: do not test
|
|
||||||
body: |
|
|
||||||
Update version_date.tsv and changelogs after ${{ env.GITHUB_TAG }}
|
|
||||||
|
|
||||||
### Changelog category (leave one):
|
|
||||||
- Not for changelog (changelog entry is not required)
|
|
11
.gitmodules
vendored
11
.gitmodules
vendored
@ -230,9 +230,6 @@
|
|||||||
[submodule "contrib/minizip-ng"]
|
[submodule "contrib/minizip-ng"]
|
||||||
path = contrib/minizip-ng
|
path = contrib/minizip-ng
|
||||||
url = https://github.com/zlib-ng/minizip-ng
|
url = https://github.com/zlib-ng/minizip-ng
|
||||||
[submodule "contrib/annoy"]
|
|
||||||
path = contrib/annoy
|
|
||||||
url = https://github.com/ClickHouse/annoy
|
|
||||||
[submodule "contrib/qpl"]
|
[submodule "contrib/qpl"]
|
||||||
path = contrib/qpl
|
path = contrib/qpl
|
||||||
url = https://github.com/intel/qpl
|
url = https://github.com/intel/qpl
|
||||||
@ -341,16 +338,13 @@
|
|||||||
url = https://github.com/graphitemaster/incbin.git
|
url = https://github.com/graphitemaster/incbin.git
|
||||||
[submodule "contrib/usearch"]
|
[submodule "contrib/usearch"]
|
||||||
path = contrib/usearch
|
path = contrib/usearch
|
||||||
url = https://github.com/unum-cloud/usearch.git
|
url = https://github.com/ClickHouse/usearch.git
|
||||||
[submodule "contrib/SimSIMD"]
|
[submodule "contrib/SimSIMD"]
|
||||||
path = contrib/SimSIMD
|
path = contrib/SimSIMD
|
||||||
url = https://github.com/ashvardanian/SimSIMD.git
|
url = https://github.com/ashvardanian/SimSIMD.git
|
||||||
[submodule "contrib/FP16"]
|
[submodule "contrib/FP16"]
|
||||||
path = contrib/FP16
|
path = contrib/FP16
|
||||||
url = https://github.com/Maratyszcza/FP16.git
|
url = https://github.com/Maratyszcza/FP16.git
|
||||||
[submodule "contrib/robin-map"]
|
|
||||||
path = contrib/robin-map
|
|
||||||
url = https://github.com/Tessil/robin-map.git
|
|
||||||
[submodule "contrib/aklomp-base64"]
|
[submodule "contrib/aklomp-base64"]
|
||||||
path = contrib/aklomp-base64
|
path = contrib/aklomp-base64
|
||||||
url = https://github.com/aklomp/base64.git
|
url = https://github.com/aklomp/base64.git
|
||||||
@ -372,3 +366,6 @@
|
|||||||
[submodule "contrib/double-conversion"]
|
[submodule "contrib/double-conversion"]
|
||||||
path = contrib/double-conversion
|
path = contrib/double-conversion
|
||||||
url = https://github.com/ClickHouse/double-conversion.git
|
url = https://github.com/ClickHouse/double-conversion.git
|
||||||
|
[submodule "contrib/numactl"]
|
||||||
|
path = contrib/numactl
|
||||||
|
url = https://github.com/ClickHouse/numactl.git
|
||||||
|
12
.yamllint
12
.yamllint
@ -5,12 +5,12 @@ rules:
|
|||||||
indentation:
|
indentation:
|
||||||
level: warning
|
level: warning
|
||||||
indent-sequences: consistent
|
indent-sequences: consistent
|
||||||
line-length:
|
|
||||||
# there are:
|
|
||||||
# - bash -c "", so this is OK
|
|
||||||
# - yaml in tests
|
|
||||||
max: 1000
|
|
||||||
level: warning
|
|
||||||
comments:
|
comments:
|
||||||
min-spaces-from-content: 1
|
min-spaces-from-content: 1
|
||||||
document-start: disable
|
document-start: disable
|
||||||
|
colons: disable
|
||||||
|
indentation: disable
|
||||||
|
line-length: disable
|
||||||
|
trailing-spaces: disable
|
||||||
|
truthy: disable
|
||||||
|
new-line-at-end-of-file: disable
|
||||||
|
168
CHANGELOG.md
168
CHANGELOG.md
@ -1,4 +1,5 @@
|
|||||||
### Table of Contents
|
### Table of Contents
|
||||||
|
**[ClickHouse release v24.7, 2024-07-30](#247)**<br/>
|
||||||
**[ClickHouse release v24.6, 2024-07-01](#246)**<br/>
|
**[ClickHouse release v24.6, 2024-07-01](#246)**<br/>
|
||||||
**[ClickHouse release v24.5, 2024-05-30](#245)**<br/>
|
**[ClickHouse release v24.5, 2024-05-30](#245)**<br/>
|
||||||
**[ClickHouse release v24.4, 2024-04-30](#244)**<br/>
|
**[ClickHouse release v24.4, 2024-04-30](#244)**<br/>
|
||||||
@ -9,6 +10,173 @@
|
|||||||
|
|
||||||
# 2024 Changelog
|
# 2024 Changelog
|
||||||
|
|
||||||
|
### <a id="247"></a> ClickHouse release 24.7, 2024-07-30
|
||||||
|
|
||||||
|
#### Backward Incompatible Change
|
||||||
|
* Forbid `CRATE MATERIALIZED VIEW ... ENGINE Replicated*MergeTree POPULATE AS SELECT ...` with Replicated databases. [#63963](https://github.com/ClickHouse/ClickHouse/pull/63963) ([vdimir](https://github.com/vdimir)).
|
||||||
|
* `clickhouse-keeper-client` will only accept paths in string literals, such as `ls '/hello/world'`, not bare strings such as `ls /hello/world`. [#65494](https://github.com/ClickHouse/ClickHouse/pull/65494) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||||
|
* Metric `KeeperOutstandingRequets` was renamed to `KeeperOutstandingRequests`. [#66206](https://github.com/ClickHouse/ClickHouse/pull/66206) ([Robert Schulze](https://github.com/rschu1ze)).
|
||||||
|
* Remove `is_deterministic` field from the `system.functions` table. [#66630](https://github.com/ClickHouse/ClickHouse/pull/66630) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||||
|
* Function `tuple` will now try to construct named tuples in query (controlled by `enable_named_columns_in_function_tuple`). Introduce function `tupleNames` to extract names from tuples. [#54881](https://github.com/ClickHouse/ClickHouse/pull/54881) ([Amos Bird](https://github.com/amosbird)).
|
||||||
|
* Change how deduplication for Materialized Views works. Fixed a lot of cases like: - on destination table: data is split for 2 or more blocks and that blocks is considered as duplicate when that block is inserted in parallel. - on MV destination table: the equal blocks are deduplicated, that happens when MV often produces equal data as a result for different input data due to performing aggregation. - on MV destination table: the equal blocks which comes from different MV are deduplicated. [#61601](https://github.com/ClickHouse/ClickHouse/pull/61601) ([Sema Checherinda](https://github.com/CheSema)).
|
||||||
|
|
||||||
|
#### New Feature
|
||||||
|
* Add `ASOF JOIN` support for `full_sorting_join` algorithm. [#55051](https://github.com/ClickHouse/ClickHouse/pull/55051) ([vdimir](https://github.com/vdimir)).
|
||||||
|
* Support JWT authentication in `clickhouse-client` (will be available only in ClickHouse Cloud). [#62829](https://github.com/ClickHouse/ClickHouse/pull/62829) ([Konstantin Bogdanov](https://github.com/thevar1able)).
|
||||||
|
* Add SQL functions `changeYear`, `changeMonth`, `changeDay`, `changeHour`, `changeMinute`, `changeSecond`. For example, `SELECT changeMonth(toDate('2024-06-14'), 7)` returns date `2024-07-14`. [#63186](https://github.com/ClickHouse/ClickHouse/pull/63186) ([cucumber95](https://github.com/cucumber95)).
|
||||||
|
* Introduce startup scripts, which allow the execution of preconfigured queries at the startup stage. [#64889](https://github.com/ClickHouse/ClickHouse/pull/64889) ([pufit](https://github.com/pufit)).
|
||||||
|
* Support accept_invalid_certificate in client's config in order to allow for client to connect over secure TCP to a server running with self-signed certificate - can be used as a shorthand for corresponding `openSSL` client settings `verificationMode=none` + `invalidCertificateHandler.name=AcceptCertificateHandler`. [#65238](https://github.com/ClickHouse/ClickHouse/pull/65238) ([peacewalker122](https://github.com/peacewalker122)).
|
||||||
|
* Add system.error_log which contains history of error values from table system.errors, periodically flushed to disk. [#65381](https://github.com/ClickHouse/ClickHouse/pull/65381) ([Pablo Marcos](https://github.com/pamarcos)).
|
||||||
|
* Add aggregate function `groupConcat`. About the same as `arrayStringConcat( groupArray(column), ',')` Can receive 2 parameters: a string delimiter and the number of elements to be processed. [#65451](https://github.com/ClickHouse/ClickHouse/pull/65451) ([Yarik Briukhovetskyi](https://github.com/yariks5s)).
|
||||||
|
* Add AzureQueue storage. [#65458](https://github.com/ClickHouse/ClickHouse/pull/65458) ([Kseniia Sumarokova](https://github.com/kssenii)).
|
||||||
|
* Add a new setting to disable/enable writing page index into parquet files. [#65475](https://github.com/ClickHouse/ClickHouse/pull/65475) ([lgbo](https://github.com/lgbo-ustc)).
|
||||||
|
* Introduce `logger.console_log_level` server config to control the log level to the console (if enabled). [#65559](https://github.com/ClickHouse/ClickHouse/pull/65559) ([Azat Khuzhin](https://github.com/azat)).
|
||||||
|
* Automatically append a wildcard `*` to the end of a directory path with table function `file`. [#66019](https://github.com/ClickHouse/ClickHouse/pull/66019) ([Zhidong (David) Guo](https://github.com/Gun9niR)).
|
||||||
|
* Add `--memory-usage` option to client in non-interactive mode. [#66393](https://github.com/ClickHouse/ClickHouse/pull/66393) ([vdimir](https://github.com/vdimir)).
|
||||||
|
* Make an interactive client for clickhouse-disks, add local disk from the local directory. [#64446](https://github.com/ClickHouse/ClickHouse/pull/64446) ([Daniil Ivanik](https://github.com/divanik)).
|
||||||
|
* When lightweight delete happens on a table with projection(s), users have choices either throw an exception (by default) or drop the projection [#65594](https://github.com/ClickHouse/ClickHouse/pull/65594) ([jsc0218](https://github.com/jsc0218)).
|
||||||
|
* Add system tables with main information about all detached tables. [#65400](https://github.com/ClickHouse/ClickHouse/pull/65400) ([Konstantin Morozov](https://github.com/k-morozov)).
|
||||||
|
|
||||||
|
#### Experimental Feature
|
||||||
|
* Change binary serialization of the `Variant` data type: add `compact` mode to avoid writing the same discriminator multiple times for granules with single variant or with only NULL values. Add MergeTree setting `use_compact_variant_discriminators_serialization` that is enabled by default. Note that Variant type is still experimental and backward-incompatible change in serialization is ok. [#62774](https://github.com/ClickHouse/ClickHouse/pull/62774) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||||
|
* Support on-disk backend storage for clickhouse-keeper. [#56626](https://github.com/ClickHouse/ClickHouse/pull/56626) ([Han Fei](https://github.com/hanfei1991)).
|
||||||
|
* Refactor JSONExtract functions, support more types including experimental Dynamic type. [#66046](https://github.com/ClickHouse/ClickHouse/pull/66046) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||||
|
* Support null map subcolumn for `Variant` and `Dynamic` subcolumns. [#66178](https://github.com/ClickHouse/ClickHouse/pull/66178) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||||
|
* Fix reading `Dynamic` subcolumns from altered `Memory` table. Previously if `max_types` parameter of a Dynamic type was changed in Memory table via alter, further subcolumns reading can return wrong result. [#66066](https://github.com/ClickHouse/ClickHouse/pull/66066) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||||
|
* Add support for `cluster_for_parallel_replicas` when using custom key parallel replicas. It allows you to use parallel replicas with custom key with MergeTree tables. [#65453](https://github.com/ClickHouse/ClickHouse/pull/65453) ([Antonio Andelic](https://github.com/antonio2368)).
|
||||||
|
|
||||||
|
#### Performance Improvement
|
||||||
|
* Replace int to string algorithm with a faster one (from a modified amdn/itoa to a modified jeaiii/itoa). [#61661](https://github.com/ClickHouse/ClickHouse/pull/61661) ([Raúl Marín](https://github.com/Algunenano)).
|
||||||
|
* Sizes of hash tables created by join (`parallel_hash` algorithm) are collected and cached now. This information will be used to preallocate space in hash tables for subsequent query executions and save time on hash table resizes. [#64553](https://github.com/ClickHouse/ClickHouse/pull/64553) ([Nikita Taranov](https://github.com/nickitat)).
|
||||||
|
* Optimized queries with `ORDER BY` primary key and `WHERE` that have a condition with high selectivity by using buffering. It is controlled by setting `read_in_order_use_buffering` (enabled by default) and can increase memory usage of query. [#64607](https://github.com/ClickHouse/ClickHouse/pull/64607) ([Anton Popov](https://github.com/CurtizJ)).
|
||||||
|
* Improve performance of loading `plain_rewritable` metadata. [#65634](https://github.com/ClickHouse/ClickHouse/pull/65634) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||||
|
* Attaching tables on read-only disks will use fewer resources by not loading outdated parts. [#65635](https://github.com/ClickHouse/ClickHouse/pull/65635) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||||
|
* Support minmax hyperrectangle for Set indices. [#65676](https://github.com/ClickHouse/ClickHouse/pull/65676) ([AntiTopQuark](https://github.com/AntiTopQuark)).
|
||||||
|
* Unload primary index of outdated parts to reduce total memory usage. [#65852](https://github.com/ClickHouse/ClickHouse/pull/65852) ([Anton Popov](https://github.com/CurtizJ)).
|
||||||
|
* Functions `replaceRegexpAll` and `replaceRegexpOne` are now significantly faster if the pattern is trivial, i.e. contains no metacharacters, pattern classes, flags, grouping characters etc. (Thanks to Taiyang Li). [#66185](https://github.com/ClickHouse/ClickHouse/pull/66185) ([Robert Schulze](https://github.com/rschu1ze)).
|
||||||
|
* s3 requests: Reduce retry time for queries, increase retries count for backups. 8.5 minutes and 100 retires for queries, 1.2 hours and 1000 retries for backup restore. [#65232](https://github.com/ClickHouse/ClickHouse/pull/65232) ([Sema Checherinda](https://github.com/CheSema)).
|
||||||
|
* Support query plan LIMIT optimization. Support LIMIT pushdown for PostgreSQL storage and table function. [#65454](https://github.com/ClickHouse/ClickHouse/pull/65454) ([Maksim Kita](https://github.com/kitaisreal)).
|
||||||
|
* Improved ZooKeeper load balancing. The current session doesn't expire until the optimal nodes become available despite `fallback_session_lifetime`. Added support for AZ-aware balancing. [#65570](https://github.com/ClickHouse/ClickHouse/pull/65570) ([Alexander Tokmakov](https://github.com/tavplubix)).
|
||||||
|
* DatabaseCatalog drops tables faster by using up to database_catalog_drop_table_concurrency threads. [#66065](https://github.com/ClickHouse/ClickHouse/pull/66065) ([Sema Checherinda](https://github.com/CheSema)).
|
||||||
|
|
||||||
|
#### Improvement
|
||||||
|
* Improved ZooKeeper load balancing. The current session doesn't expire until the optimal nodes become available despite `fallback_session_lifetime`. Added support for AZ-aware balancing. [#65570](https://github.com/ClickHouse/ClickHouse/pull/65570) ([Alexander Tokmakov](https://github.com/tavplubix)).
|
||||||
|
* The setting `optimize_trivial_insert_select` is disabled by default. In most cases, it should be beneficial. Nevertheless, if you are seeing slower INSERT SELECT or increased memory usage, you can enable it back or `SET compatibility = '24.6'`. [#58970](https://github.com/ClickHouse/ClickHouse/pull/58970) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||||
|
* Print stacktrace and diagnostic info if `clickhouse-client` or `clickhouse-local` crashes. [#61109](https://github.com/ClickHouse/ClickHouse/pull/61109) ([Alexander Tokmakov](https://github.com/tavplubix)).
|
||||||
|
* The result of `SHOW INDEX | INDEXES | INDICES | KEYS` was previously sorted by the primary key column names. Since this was unintuitive, the result is now sorted by the position of the primary key columns within the primary key. [#61131](https://github.com/ClickHouse/ClickHouse/pull/61131) ([Robert Schulze](https://github.com/rschu1ze)).
|
||||||
|
* Change how deduplication for Materialized Views works. Fixed a lot of cases like: - on destination table: data is split for 2 or more blocks and that blocks is considered as duplicate when that block is inserted in parallel. - on MV destination table: the equal blocks are deduplicated, that happens when MV often produces equal data as a result for different input data due to performing aggregation. - on MV destination table: the equal blocks which comes from different MV are deduplicated. [#61601](https://github.com/ClickHouse/ClickHouse/pull/61601) ([Sema Checherinda](https://github.com/CheSema)).
|
||||||
|
* Support reading partitioned data DeltaLake data. Infer DeltaLake schema by reading metadata instead of data. [#63201](https://github.com/ClickHouse/ClickHouse/pull/63201) ([Kseniia Sumarokova](https://github.com/kssenii)).
|
||||||
|
* In composable protocols TLS layer accepted only `certificateFile` and `privateKeyFile` parameters. https://clickhouse.com/docs/en/operations/settings/composable-protocols. [#63985](https://github.com/ClickHouse/ClickHouse/pull/63985) ([Anton Ivashkin](https://github.com/ianton-ru)).
|
||||||
|
* Added profile event `SelectQueriesWithPrimaryKeyUsage` which indicates how many SELECT queries use the primary key to evaluate the WHERE clause. [#64492](https://github.com/ClickHouse/ClickHouse/pull/64492) ([0x01f](https://github.com/0xfei)).
|
||||||
|
* `StorageS3Queue` related fixes and improvements. Deduce a default value of `s3queue_processing_threads_num` according to the number of physical cpu cores on the server (instead of the previous default value as 1). Set default value of `s3queue_loading_retries` to 10. Fix possible vague "Uncaught exception" in exception column of `system.s3queue`. Do not increment retry count on `MEMORY_LIMIT_EXCEEDED` exception. Move files commit to a stage after insertion into table fully finished to avoid files being commited while not inserted. Add settings `s3queue_max_processed_files_before_commit`, `s3queue_max_processed_rows_before_commit`, `s3queue_max_processed_bytes_before_commit`, `s3queue_max_processing_time_sec_before_commit`, to better control commit and flush time. [#65046](https://github.com/ClickHouse/ClickHouse/pull/65046) ([Kseniia Sumarokova](https://github.com/kssenii)).
|
||||||
|
* Support aliases in parametrized view function (only new analyzer). [#65190](https://github.com/ClickHouse/ClickHouse/pull/65190) ([Kseniia Sumarokova](https://github.com/kssenii)).
|
||||||
|
* Updated to mask account key in logs in azureBlobStorage. [#65273](https://github.com/ClickHouse/ClickHouse/pull/65273) ([SmitaRKulkarni](https://github.com/SmitaRKulkarni)).
|
||||||
|
* Partition pruning for `IN` predicates when filter expression is a part of `PARTITION BY` expression. [#65335](https://github.com/ClickHouse/ClickHouse/pull/65335) ([Eduard Karacharov](https://github.com/korowa)).
|
||||||
|
* `arrayMin`/`arrayMax` can be applicable to all data types that are comparable. [#65455](https://github.com/ClickHouse/ClickHouse/pull/65455) ([pn](https://github.com/chloro-pn)).
|
||||||
|
* Improved memory accounting for cgroups v2 to exclude the amount occupied by the page cache. [#65470](https://github.com/ClickHouse/ClickHouse/pull/65470) ([Nikita Taranov](https://github.com/nickitat)).
|
||||||
|
* Do not create format settings for each row when serializing chunks to insert to EmbeddedRocksDB table. [#65474](https://github.com/ClickHouse/ClickHouse/pull/65474) ([Duc Canh Le](https://github.com/canhld94)).
|
||||||
|
* Reduce `clickhouse-local` prompt to just `:)`. `getFQDNOrHostName()` takes too long on macOS, and we don't want a hostname in the prompt for `clickhouse-local` anyway. [#65510](https://github.com/ClickHouse/ClickHouse/pull/65510) ([Konstantin Bogdanov](https://github.com/thevar1able)).
|
||||||
|
* Avoid printing a message from jemalloc about per-CPU arenas on low-end virtual machines. [#65532](https://github.com/ClickHouse/ClickHouse/pull/65532) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||||
|
* Disable filesystem cache background download by default. It will be enabled back when we fix the issue with possible "Memory limit exceeded" because memory deallocation is done outside of query context (while buffer is allocated inside of query context) if we use background download threads. Plus we need to add a separate setting to define max size to download for background workers (currently it is limited by max_file_segment_size, which might be too big). [#65534](https://github.com/ClickHouse/ClickHouse/pull/65534) ([Kseniia Sumarokova](https://github.com/kssenii)).
|
||||||
|
* Add new option to config `<config_reload_interval_ms>` which allow to specify how often clickhouse will reload config. [#65545](https://github.com/ClickHouse/ClickHouse/pull/65545) ([alesapin](https://github.com/alesapin)).
|
||||||
|
* Implement binary encoding for ClickHouse data types and add its specification in docs. Use it in Dynamic binary serialization, allow to use it in RowBinaryWithNamesAndTypes and Native formats under settings. [#65546](https://github.com/ClickHouse/ClickHouse/pull/65546) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||||
|
* Server settings `compiled_expression_cache_size` and `compiled_expression_cache_elements_size` are now shown in `system.server_settings`. [#65584](https://github.com/ClickHouse/ClickHouse/pull/65584) ([Robert Schulze](https://github.com/rschu1ze)).
|
||||||
|
* Add support for user identification based on x509 SubjectAltName extension. [#65626](https://github.com/ClickHouse/ClickHouse/pull/65626) ([Anton Kozlov](https://github.com/tonickkozlov)).
|
||||||
|
* `clickhouse-local` will respect the `max_server_memory_usage` and `max_server_memory_usage_to_ram_ratio` from the configuration file. It will also set the max memory usage to 90% of the system memory by default, like `clickhouse-server` does. [#65697](https://github.com/ClickHouse/ClickHouse/pull/65697) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||||
|
* Add a script to backup your files to ClickHouse. [#65699](https://github.com/ClickHouse/ClickHouse/pull/65699) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||||
|
* PostgreSQL source to support query cancellations. [#65722](https://github.com/ClickHouse/ClickHouse/pull/65722) ([Maksim Kita](https://github.com/kitaisreal)).
|
||||||
|
* Make `allow_experimental_analyzer` be controlled by the initiator for distributed queries. This ensures compatibility and correctness during operations in mixed version clusters. [#65777](https://github.com/ClickHouse/ClickHouse/pull/65777) ([Nikita Mikhaylov](https://github.com/nikitamikhaylov)).
|
||||||
|
* Respect cgroup CPU limit in Keeper. [#65819](https://github.com/ClickHouse/ClickHouse/pull/65819) ([Antonio Andelic](https://github.com/antonio2368)).
|
||||||
|
* Allow to use `concat` function with empty arguments `:) select concat();`. [#65887](https://github.com/ClickHouse/ClickHouse/pull/65887) ([李扬](https://github.com/taiyang-li)).
|
||||||
|
* Allow controlling named collections in `clickhouse-local`. [#65973](https://github.com/ClickHouse/ClickHouse/pull/65973) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||||
|
* Improve Azure-related profile events. [#65999](https://github.com/ClickHouse/ClickHouse/pull/65999) ([alesapin](https://github.com/alesapin)).
|
||||||
|
* Support ORC file read by writer's time zone. [#66025](https://github.com/ClickHouse/ClickHouse/pull/66025) ([kevinyhzou](https://github.com/KevinyhZou)).
|
||||||
|
* Add settings to control connections to PostgreSQL. The setting `postgresql_connection_attempt_timeout` specifies the value passed to `connect_timeout` parameter of connection URL. The setting `postgresql_connection_pool_retries` specifies the number of retries to establish a connection to the PostgreSQL end-point. [#66232](https://github.com/ClickHouse/ClickHouse/pull/66232) ([Dmitry Novik](https://github.com/novikd)).
|
||||||
|
* Reduce inaccuracy of `input_wait_elapsed_us`/`elapsed_us` in the `system.processors_profile_log`. [#66239](https://github.com/ClickHouse/ClickHouse/pull/66239) ([Azat Khuzhin](https://github.com/azat)).
|
||||||
|
* Improve ProfileEvents for the filesystem cache. [#66249](https://github.com/ClickHouse/ClickHouse/pull/66249) ([zhukai](https://github.com/nauu)).
|
||||||
|
* Add settings to ignore the `ON CLUSTER` clause in queries for named collection management with the replicated storage. [#66288](https://github.com/ClickHouse/ClickHouse/pull/66288) ([MikhailBurdukov](https://github.com/MikhailBurdukov)).
|
||||||
|
* Function `generateSnowflakeID` now allows to specify a machine ID as a parameter to prevent collisions in large clusters. [#66374](https://github.com/ClickHouse/ClickHouse/pull/66374) ([ZAWA_ll](https://github.com/Zawa-ll)).
|
||||||
|
* Disable suspending on `Ctrl+Z` in interactive mode. This is a common trap and is not expected behavior for almost all users. I imagine only a few extreme power users could appreciate suspending terminal applications to the background, but I don't know any. [#66511](https://github.com/ClickHouse/ClickHouse/pull/66511) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||||
|
* Add option for validating the primary key type in Dictionaries. Without this option for simple layouts any column type will be implicitly converted to UInt64. [#66595](https://github.com/ClickHouse/ClickHouse/pull/66595) ([MikhailBurdukov](https://github.com/MikhailBurdukov)).
|
||||||
|
|
||||||
|
#### Bug Fix (user-visible misbehavior in an official stable release)
|
||||||
|
* Check cyclic dependencies on CREATE/REPLACE/RENAME/EXCHANGE queries and throw an exception if there is a cyclic dependency. Previously such cyclic dependencies could lead to a deadlock during server startup. Also fix some bugs in dependencies creation. [#65405](https://github.com/ClickHouse/ClickHouse/pull/65405) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||||
|
* Fix unexpected sizes of `LowCardinality` columns in function calls. [#65298](https://github.com/ClickHouse/ClickHouse/pull/65298) ([Raúl Marín](https://github.com/Algunenano)).
|
||||||
|
* Fix crash in maxIntersections. [#65689](https://github.com/ClickHouse/ClickHouse/pull/65689) ([Raúl Marín](https://github.com/Algunenano)).
|
||||||
|
* Fix the `VALID UNTIL` clause in the user definition resetting after a restart. [#66409](https://github.com/ClickHouse/ClickHouse/pull/66409) ([Nikolay Degterinsky](https://github.com/evillique)).
|
||||||
|
* Fix the remaining time column in `SHOW MERGES`. [#66735](https://github.com/ClickHouse/ClickHouse/pull/66735) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||||
|
* `Query was cancelled` might have been printed twice in clickhouse-client. This behaviour is fixed. [#66005](https://github.com/ClickHouse/ClickHouse/pull/66005) ([Nikita Mikhaylov](https://github.com/nikitamikhaylov)).
|
||||||
|
* Fixed crash while using `MaterializedMySQL` (which is an unsupported, experimental feature) with TABLE OVERRIDE that maps MySQL NULL field into ClickHouse not NULL field. [#54649](https://github.com/ClickHouse/ClickHouse/pull/54649) ([Filipp Ozinov](https://github.com/bakwc)).
|
||||||
|
* Fix logical error when `PREWHERE` expression read no columns and table has no adaptive index granularity (very old table). [#59173](https://github.com/ClickHouse/ClickHouse/pull/59173) ([Alexander Gololobov](https://github.com/davenger)).
|
||||||
|
* Fix bug with the cancellation buffer when canceling a query. [#64478](https://github.com/ClickHouse/ClickHouse/pull/64478) ([Sema Checherinda](https://github.com/CheSema)).
|
||||||
|
* Fix filling parts columns from metadata (when columns.txt does not exists). [#64757](https://github.com/ClickHouse/ClickHouse/pull/64757) ([Azat Khuzhin](https://github.com/azat)).
|
||||||
|
* Fix crash for `ALTER TABLE ... ON CLUSTER ... MODIFY SQL SECURITY`. [#64957](https://github.com/ClickHouse/ClickHouse/pull/64957) ([pufit](https://github.com/pufit)).
|
||||||
|
* Fix crash on destroying AccessControl: add explicit shutdown. [#64993](https://github.com/ClickHouse/ClickHouse/pull/64993) ([Vitaly Baranov](https://github.com/vitlibar)).
|
||||||
|
* Eliminate injective function in argument of functions `uniq*` recursively. This used to work correctly but was broken in the new analyzer. [#65140](https://github.com/ClickHouse/ClickHouse/pull/65140) ([Duc Canh Le](https://github.com/canhld94)).
|
||||||
|
* Fix unexpected projection name when query with CTE. [#65267](https://github.com/ClickHouse/ClickHouse/pull/65267) ([wudidapaopao](https://github.com/wudidapaopao)).
|
||||||
|
* Require `dictGet` privilege when accessing dictionaries via direct query or the `Dictionary` table engine. [#65359](https://github.com/ClickHouse/ClickHouse/pull/65359) ([Joe Lynch](https://github.com/joelynch)).
|
||||||
|
* Fix user-specific S3 auth with incremental backups. [#65481](https://github.com/ClickHouse/ClickHouse/pull/65481) ([Antonio Andelic](https://github.com/antonio2368)).
|
||||||
|
* Disable `non-intersecting-parts` optimization for queries with `FINAL` in case of `read-in-order` optimization was enabled. This could lead to an incorrect query result. As a workaround, disable `do_not_merge_across_partitions_select_final` and `split_parts_ranges_into_intersecting_and_non_intersecting_final` before this fix is merged. [#65505](https://github.com/ClickHouse/ClickHouse/pull/65505) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
|
||||||
|
* Fix getting exception `Index out of bound for blob metadata` in case all files from list batch were filtered out. [#65523](https://github.com/ClickHouse/ClickHouse/pull/65523) ([Kseniia Sumarokova](https://github.com/kssenii)).
|
||||||
|
* Fix NOT_FOUND_COLUMN_IN_BLOCK for deduplicate merge of projection. [#65573](https://github.com/ClickHouse/ClickHouse/pull/65573) ([Yakov Olkhovskiy](https://github.com/yakov-olkhovskiy)).
|
||||||
|
* Fixed bug in MergeJoin. Column in sparse serialisation might be treated as a column of its nested type though the required conversion wasn't performed. [#65632](https://github.com/ClickHouse/ClickHouse/pull/65632) ([Nikita Taranov](https://github.com/nickitat)).
|
||||||
|
* Fixed a bug that compatibility level '23.4' was not properly applied. [#65737](https://github.com/ClickHouse/ClickHouse/pull/65737) ([cw5121](https://github.com/cw5121)).
|
||||||
|
* Fix odbc table with nullable fields. [#65738](https://github.com/ClickHouse/ClickHouse/pull/65738) ([Rodolphe Dugé de Bernonville](https://github.com/RodolpheDuge)).
|
||||||
|
* Fix data race in `TCPHandler`, which could happen on fatal error. [#65744](https://github.com/ClickHouse/ClickHouse/pull/65744) ([Kseniia Sumarokova](https://github.com/kssenii)).
|
||||||
|
* Fix invalid exceptions in function `parseDateTime` with `%F` and `%D` placeholders. [#65768](https://github.com/ClickHouse/ClickHouse/pull/65768) ([Antonio Andelic](https://github.com/antonio2368)).
|
||||||
|
* For queries that read from `PostgreSQL`, cancel the internal `PostgreSQL` query if the ClickHouse query is finished. Otherwise, `ClickHouse` query cannot be canceled until the internal `PostgreSQL` query is finished. [#65771](https://github.com/ClickHouse/ClickHouse/pull/65771) ([Maksim Kita](https://github.com/kitaisreal)).
|
||||||
|
* Fix a bug in short circuit logic when old analyzer and dictGetOrDefault is used. [#65802](https://github.com/ClickHouse/ClickHouse/pull/65802) ([jsc0218](https://github.com/jsc0218)).
|
||||||
|
* Fix a bug leads to EmbeddedRocksDB with TTL write corrupted SST files. [#65816](https://github.com/ClickHouse/ClickHouse/pull/65816) ([Duc Canh Le](https://github.com/canhld94)).
|
||||||
|
* Functions `bitTest`, `bitTestAll`, and `bitTestAny` now return an error if the specified bit index is out-of-bounds [#65818](https://github.com/ClickHouse/ClickHouse/pull/65818) ([Pablo Marcos](https://github.com/pamarcos)).
|
||||||
|
* Setting `join_any_take_last_row` is supported in any query with hash join. [#65820](https://github.com/ClickHouse/ClickHouse/pull/65820) ([vdimir](https://github.com/vdimir)).
|
||||||
|
* Better handling of join conditions involving `IS NULL` checks (for example `ON (a = b AND (a IS NOT NULL) AND (b IS NOT NULL) ) OR ( (a IS NULL) AND (b IS NULL) )` is rewritten to `ON a <=> b`), fix incorrect optimization when condition other then `IS NULL` are present. [#65835](https://github.com/ClickHouse/ClickHouse/pull/65835) ([vdimir](https://github.com/vdimir)).
|
||||||
|
* Functions `bitShiftLeft` and `bitShitfRight` return an error for out of bounds shift positions [#65838](https://github.com/ClickHouse/ClickHouse/pull/65838) ([Pablo Marcos](https://github.com/pamarcos)).
|
||||||
|
* Fix growing memory usage in S3Queue. [#65839](https://github.com/ClickHouse/ClickHouse/pull/65839) ([Kseniia Sumarokova](https://github.com/kssenii)).
|
||||||
|
* Fix tie handling in `arrayAUC` to match sklearn. [#65840](https://github.com/ClickHouse/ClickHouse/pull/65840) ([gabrielmcg44](https://github.com/gabrielmcg44)).
|
||||||
|
* Fix possible issues with MySQL server protocol TLS connections. [#65917](https://github.com/ClickHouse/ClickHouse/pull/65917) ([Azat Khuzhin](https://github.com/azat)).
|
||||||
|
* Fix possible issues with MySQL client protocol TLS connections. [#65938](https://github.com/ClickHouse/ClickHouse/pull/65938) ([Azat Khuzhin](https://github.com/azat)).
|
||||||
|
* Fix handling of `SSL_ERROR_WANT_READ`/`SSL_ERROR_WANT_WRITE` with zero timeout. [#65941](https://github.com/ClickHouse/ClickHouse/pull/65941) ([Azat Khuzhin](https://github.com/azat)).
|
||||||
|
* Add missing settings `input_format_csv_skip_first_lines/input_format_tsv_skip_first_lines/input_format_csv_try_infer_numbers_from_strings/input_format_csv_try_infer_strings_from_quoted_tuples` in schema inference cache because they can change the resulting schema. It prevents from incorrect result of schema inference with these settings changed. [#65980](https://github.com/ClickHouse/ClickHouse/pull/65980) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||||
|
* Column _size in s3 engine and s3 table function denotes the size of a file inside the archive, not a size of the archive itself. [#65993](https://github.com/ClickHouse/ClickHouse/pull/65993) ([Daniil Ivanik](https://github.com/divanik)).
|
||||||
|
* Fix resolving dynamic subcolumns in analyzer, avoid reading the whole column on dynamic subcolumn reading. [#66004](https://github.com/ClickHouse/ClickHouse/pull/66004) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||||
|
* Fix config merging for from_env with replace overrides. [#66034](https://github.com/ClickHouse/ClickHouse/pull/66034) ([Azat Khuzhin](https://github.com/azat)).
|
||||||
|
* Fix a possible hanging in `GRPCServer` during shutdown. [#66061](https://github.com/ClickHouse/ClickHouse/pull/66061) ([Vitaly Baranov](https://github.com/vitlibar)).
|
||||||
|
* Fixed several cases in function `has` with non-constant `LowCardinality` arguments. [#66088](https://github.com/ClickHouse/ClickHouse/pull/66088) ([Anton Popov](https://github.com/CurtizJ)).
|
||||||
|
* Fix for `groupArrayIntersect`. It had incorrect behavior in the `merge()` function. Also, fixed behavior in `deserialise()` for numeric and general data. [#66103](https://github.com/ClickHouse/ClickHouse/pull/66103) ([Yarik Briukhovetskyi](https://github.com/yariks5s)).
|
||||||
|
* Fixed buffer overflow bug in `unbin`/`unhex` implementation. [#66106](https://github.com/ClickHouse/ClickHouse/pull/66106) ([Nikita Taranov](https://github.com/nickitat)).
|
||||||
|
* Disable the `merge-filters` optimization introduced in [#64760](https://github.com/ClickHouse/ClickHouse/issues/64760). It may cause an exception if optimization merges two filter expressions and does not apply a short-circuit evaluation. [#66126](https://github.com/ClickHouse/ClickHouse/pull/66126) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
|
||||||
|
* Fixed the issue when the server failed to parse Avro files with negative block size arrays encoded, which is now allowed by the Avro specification. [#66130](https://github.com/ClickHouse/ClickHouse/pull/66130) ([Serge Klochkov](https://github.com/slvrtrn)).
|
||||||
|
* Fixed a bug in ZooKeeper client: a session could get stuck in unusable state after receiving a hardware error from ZooKeeper. For example, this might happen due to "soft memory limit" in ClickHouse Keeper. [#66140](https://github.com/ClickHouse/ClickHouse/pull/66140) ([Alexander Tokmakov](https://github.com/tavplubix)).
|
||||||
|
* Fix issue in SumIfToCountIfVisitor and signed integers. [#66146](https://github.com/ClickHouse/ClickHouse/pull/66146) ([Raúl Marín](https://github.com/Algunenano)).
|
||||||
|
* Fix rare case with missing data in the result of distributed query. [#66174](https://github.com/ClickHouse/ClickHouse/pull/66174) ([vdimir](https://github.com/vdimir)).
|
||||||
|
* Fix order of parsing metadata fields in StorageDeltaLake. [#66211](https://github.com/ClickHouse/ClickHouse/pull/66211) ([Kseniia Sumarokova](https://github.com/kssenii)).
|
||||||
|
* Don't throw `TIMEOUT_EXCEEDED` for `none_only_active` mode of `distributed_ddl_output_mode`. [#66218](https://github.com/ClickHouse/ClickHouse/pull/66218) ([Alexander Tokmakov](https://github.com/tavplubix)).
|
||||||
|
* Fix handling limit for `system.numbers_mt` when no index can be used. [#66231](https://github.com/ClickHouse/ClickHouse/pull/66231) ([János Benjamin Antal](https://github.com/antaljanosbenjamin)).
|
||||||
|
* Fixed how the ClickHouse server detects the maximum number of usable CPU cores as specified by cgroups v2 if the server runs in a container such as Docker. In more detail, containers often run their process in the root cgroup which has an empty name. In that case, ClickHouse ignored the CPU limits set by cgroups v2. [#66237](https://github.com/ClickHouse/ClickHouse/pull/66237) ([filimonov](https://github.com/filimonov)).
|
||||||
|
* Fix the `Not-ready set` error when a subquery with `IN` is used in the constraint. [#66261](https://github.com/ClickHouse/ClickHouse/pull/66261) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
|
||||||
|
* Fix error reporting while copying to S3 or AzureBlobStorage. [#66295](https://github.com/ClickHouse/ClickHouse/pull/66295) ([Vitaly Baranov](https://github.com/vitlibar)).
|
||||||
|
* Prevent watchdog from keeping descriptors of unlinked (rotated) log files. [#66334](https://github.com/ClickHouse/ClickHouse/pull/66334) ([Aleksei Filatov](https://github.com/aalexfvk)).
|
||||||
|
* Fix the bug that logicalexpressionoptimizerpass lost logical type of constant. [#66344](https://github.com/ClickHouse/ClickHouse/pull/66344) ([pn](https://github.com/chloro-pn)).
|
||||||
|
* Fix `Column identifier is already registered` error with `group_by_use_nulls=true` and new analyzer. [#66400](https://github.com/ClickHouse/ClickHouse/pull/66400) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
|
||||||
|
* Fix possible incorrect result for queries joining and filtering table external engine (like PostgreSQL), due to too aggressive filter pushdown. Since now, conditions from where section won't be send to external database in case of outer join with external table. [#66402](https://github.com/ClickHouse/ClickHouse/pull/66402) ([vdimir](https://github.com/vdimir)).
|
||||||
|
* Added missing column materialization for cross join. [#66413](https://github.com/ClickHouse/ClickHouse/pull/66413) ([lgbo](https://github.com/lgbo-ustc)).
|
||||||
|
* Fix `Cannot find column` error for queries with constant expression in `GROUP BY` key and new analyzer enabled. [#66433](https://github.com/ClickHouse/ClickHouse/pull/66433) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
|
||||||
|
* Avoid possible logical error during import from Npy format in case of bad array nesting level, fix testing of other kinds of errors. [#66461](https://github.com/ClickHouse/ClickHouse/pull/66461) ([Yarik Briukhovetskyi](https://github.com/yariks5s)).
|
||||||
|
* Fix wrong count() result when there is non-deterministic function in predicate. [#66510](https://github.com/ClickHouse/ClickHouse/pull/66510) ([Duc Canh Le](https://github.com/canhld94)).
|
||||||
|
* Correctly track memory for `Allocator::realloc`. [#66548](https://github.com/ClickHouse/ClickHouse/pull/66548) ([Antonio Andelic](https://github.com/antonio2368)).
|
||||||
|
* Fix reading of uninitialized memory when hashing empty tuples. [#66562](https://github.com/ClickHouse/ClickHouse/pull/66562) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||||
|
* Fix an invalid result for queries with `WINDOW`. This could happen when `PARTITION` columns have sparse serialization and window functions are executed in parallel. [#66579](https://github.com/ClickHouse/ClickHouse/pull/66579) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
|
||||||
|
* Fix removing named collections in local storage. [#66599](https://github.com/ClickHouse/ClickHouse/pull/66599) ([János Benjamin Antal](https://github.com/antaljanosbenjamin)).
|
||||||
|
* Fix `column_length` is not updated in `ColumnTuple::insertManyFrom`. [#66626](https://github.com/ClickHouse/ClickHouse/pull/66626) ([lgbo](https://github.com/lgbo-ustc)).
|
||||||
|
* Fix `Unknown identifier` and `Column is not under aggregate function` errors for queries with the expression `(column IS NULL).` The bug was triggered by [#65088](https://github.com/ClickHouse/ClickHouse/issues/65088), with the disabled analyzer only. [#66654](https://github.com/ClickHouse/ClickHouse/pull/66654) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
|
||||||
|
* Fix `Method getResultType is not supported for QUERY query node` error when scalar subquery was used as the first argument of IN (with new analyzer). [#66655](https://github.com/ClickHouse/ClickHouse/pull/66655) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
|
||||||
|
* Fix possible PARAMETER_OUT_OF_BOUND error during reading variant subcolumn. [#66659](https://github.com/ClickHouse/ClickHouse/pull/66659) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||||
|
* Fix rare case of stuck merge after drop column. [#66707](https://github.com/ClickHouse/ClickHouse/pull/66707) ([Raúl Marín](https://github.com/Algunenano)).
|
||||||
|
* Fix assertion `isUniqTypes` when insert select from remote sources. [#66722](https://github.com/ClickHouse/ClickHouse/pull/66722) ([Sema Checherinda](https://github.com/CheSema)).
|
||||||
|
* Fix logical error in PrometheusRequestHandler. [#66621](https://github.com/ClickHouse/ClickHouse/pull/66621) ([Vitaly Baranov](https://github.com/vitlibar)).
|
||||||
|
* Fix `indexHint` function case found by fuzzer. [#66286](https://github.com/ClickHouse/ClickHouse/pull/66286) ([Anton Popov](https://github.com/CurtizJ)).
|
||||||
|
* Fix AST formatting of 'create table b empty as a'. [#64951](https://github.com/ClickHouse/ClickHouse/pull/64951) ([Michael Kolupaev](https://github.com/al13n321)).
|
||||||
|
|
||||||
### <a id="246"></a> ClickHouse release 24.6, 2024-07-01
|
### <a id="246"></a> ClickHouse release 24.6, 2024-07-01
|
||||||
|
|
||||||
#### Backward Incompatible Change
|
#### Backward Incompatible Change
|
||||||
|
@ -187,14 +187,6 @@ else ()
|
|||||||
set(NO_WHOLE_ARCHIVE --no-whole-archive)
|
set(NO_WHOLE_ARCHIVE --no-whole-archive)
|
||||||
endif ()
|
endif ()
|
||||||
|
|
||||||
if (NOT CMAKE_BUILD_TYPE_UC STREQUAL "RELEASE")
|
|
||||||
# Can be lld or ld-lld or lld-13 or /path/to/lld.
|
|
||||||
if (LINKER_NAME MATCHES "lld")
|
|
||||||
set (CMAKE_EXE_LINKER_FLAGS "${CMAKE_EXE_LINKER_FLAGS} -Wl,--gdb-index")
|
|
||||||
message (STATUS "Adding .gdb-index via --gdb-index linker option.")
|
|
||||||
endif ()
|
|
||||||
endif()
|
|
||||||
|
|
||||||
if (NOT (SANITIZE_COVERAGE OR WITH_COVERAGE)
|
if (NOT (SANITIZE_COVERAGE OR WITH_COVERAGE)
|
||||||
AND (CMAKE_BUILD_TYPE_UC STREQUAL "RELEASE"
|
AND (CMAKE_BUILD_TYPE_UC STREQUAL "RELEASE"
|
||||||
OR CMAKE_BUILD_TYPE_UC STREQUAL "RELWITHDEBINFO"
|
OR CMAKE_BUILD_TYPE_UC STREQUAL "RELWITHDEBINFO"
|
||||||
@ -330,17 +322,21 @@ if (DISABLE_OMIT_FRAME_POINTER)
|
|||||||
set (CMAKE_ASM_FLAGS_ADD "${CMAKE_ASM_FLAGS_ADD} -fno-omit-frame-pointer -mno-omit-leaf-frame-pointer")
|
set (CMAKE_ASM_FLAGS_ADD "${CMAKE_ASM_FLAGS_ADD} -fno-omit-frame-pointer -mno-omit-leaf-frame-pointer")
|
||||||
endif()
|
endif()
|
||||||
|
|
||||||
|
# Before you start hating your debugger because it refuses to show variables ('<optimized out>'), try building with -DDEBUG_O_LEVEL="0"
|
||||||
|
# https://stackoverflow.com/questions/63386189/whats-the-difference-between-a-compilers-o0-option-and-og-option/63386263#63386263
|
||||||
|
set(DEBUG_O_LEVEL "g" CACHE STRING "The -Ox level used for debug builds")
|
||||||
|
|
||||||
set (CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} ${COMPILER_FLAGS} ${CMAKE_CXX_FLAGS_ADD}")
|
set (CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} ${COMPILER_FLAGS} ${CMAKE_CXX_FLAGS_ADD}")
|
||||||
set (CMAKE_CXX_FLAGS_RELWITHDEBINFO "${CMAKE_CXX_FLAGS_RELWITHDEBINFO} -O3 ${DEBUG_INFO_FLAGS} ${CMAKE_CXX_FLAGS_ADD}")
|
set (CMAKE_CXX_FLAGS_RELWITHDEBINFO "${CMAKE_CXX_FLAGS_RELWITHDEBINFO} -O3 ${DEBUG_INFO_FLAGS} ${CMAKE_CXX_FLAGS_ADD}")
|
||||||
set (CMAKE_CXX_FLAGS_DEBUG "${CMAKE_CXX_FLAGS_DEBUG} -Og ${DEBUG_INFO_FLAGS} ${CMAKE_CXX_FLAGS_ADD}")
|
set (CMAKE_CXX_FLAGS_DEBUG "${CMAKE_CXX_FLAGS_DEBUG} -O${DEBUG_O_LEVEL} ${DEBUG_INFO_FLAGS} ${CMAKE_CXX_FLAGS_ADD}")
|
||||||
|
|
||||||
set (CMAKE_C_FLAGS "${CMAKE_C_FLAGS} ${COMPILER_FLAGS} ${CMAKE_C_FLAGS_ADD}")
|
set (CMAKE_C_FLAGS "${CMAKE_C_FLAGS} ${COMPILER_FLAGS} ${CMAKE_C_FLAGS_ADD}")
|
||||||
set (CMAKE_C_FLAGS_RELWITHDEBINFO "${CMAKE_C_FLAGS_RELWITHDEBINFO} -O3 ${DEBUG_INFO_FLAGS} ${CMAKE_C_FLAGS_ADD}")
|
set (CMAKE_C_FLAGS_RELWITHDEBINFO "${CMAKE_C_FLAGS_RELWITHDEBINFO} -O3 ${DEBUG_INFO_FLAGS} ${CMAKE_C_FLAGS_ADD}")
|
||||||
set (CMAKE_C_FLAGS_DEBUG "${CMAKE_C_FLAGS_DEBUG} -Og ${DEBUG_INFO_FLAGS} ${CMAKE_C_FLAGS_ADD}")
|
set (CMAKE_C_FLAGS_DEBUG "${CMAKE_C_FLAGS_DEBUG} -O${DEBUG_O_LEVEL} ${DEBUG_INFO_FLAGS} ${CMAKE_C_FLAGS_ADD}")
|
||||||
|
|
||||||
set (CMAKE_ASM_FLAGS "${CMAKE_ASM_FLAGS} ${COMPILER_FLAGS} ${CMAKE_ASM_FLAGS_ADD}")
|
set (CMAKE_ASM_FLAGS "${CMAKE_ASM_FLAGS} ${COMPILER_FLAGS} ${CMAKE_ASM_FLAGS_ADD}")
|
||||||
set (CMAKE_ASM_FLAGS_RELWITHDEBINFO "${CMAKE_ASM_FLAGS_RELWITHDEBINFO} -O3 ${DEBUG_INFO_FLAGS} ${CMAKE_ASM_FLAGS_ADD}")
|
set (CMAKE_ASM_FLAGS_RELWITHDEBINFO "${CMAKE_ASM_FLAGS_RELWITHDEBINFO} -O3 ${DEBUG_INFO_FLAGS} ${CMAKE_ASM_FLAGS_ADD}")
|
||||||
set (CMAKE_ASM_FLAGS_DEBUG "${CMAKE_ASM_FLAGS_DEBUG} -Og ${DEBUG_INFO_FLAGS} ${CMAKE_ASM_FLAGS_ADD}")
|
set (CMAKE_ASM_FLAGS_DEBUG "${CMAKE_ASM_FLAGS_DEBUG} -O${DEBUG_O_LEVEL} ${DEBUG_INFO_FLAGS} ${CMAKE_ASM_FLAGS_ADD}")
|
||||||
|
|
||||||
if (OS_DARWIN)
|
if (OS_DARWIN)
|
||||||
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -stdlib=libc++")
|
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -stdlib=libc++")
|
||||||
@ -402,7 +398,7 @@ if ((NOT OS_LINUX AND NOT OS_ANDROID) OR (CMAKE_BUILD_TYPE_UC STREQUAL "DEBUG")
|
|||||||
set(ENABLE_GWP_ASAN OFF)
|
set(ENABLE_GWP_ASAN OFF)
|
||||||
endif ()
|
endif ()
|
||||||
|
|
||||||
option (ENABLE_FIU "Enable Fiu" ON)
|
option (ENABLE_LIBFIU "Enable libfiu" ON)
|
||||||
|
|
||||||
option(WERROR "Enable -Werror compiler option" ON)
|
option(WERROR "Enable -Werror compiler option" ON)
|
||||||
|
|
||||||
@ -428,12 +424,17 @@ if (NOT SANITIZE)
|
|||||||
set (CMAKE_POSITION_INDEPENDENT_CODE OFF)
|
set (CMAKE_POSITION_INDEPENDENT_CODE OFF)
|
||||||
endif()
|
endif()
|
||||||
|
|
||||||
if (OS_LINUX AND NOT (ARCH_AARCH64 OR ARCH_S390X) AND NOT SANITIZE)
|
if (NOT OS_ANDROID AND OS_LINUX AND NOT ARCH_S390X AND NOT SANITIZE)
|
||||||
# Slightly more efficient code can be generated
|
# Using '-no-pie' builds executables with fixed addresses, resulting in slightly more efficient code
|
||||||
# It's disabled for ARM because otherwise ClickHouse cannot run on Android.
|
# and keeping binary addresses constant even with ASLR enabled.
|
||||||
|
# Disabled on Android as it requires PIE: https://source.android.com/docs/security/enhancements#android-5
|
||||||
|
# Disabled on IBM S390X due to build issues with 'no-pie'
|
||||||
|
# Disabled with sanitizers to avoid issues with maximum relocation size: https://github.com/ClickHouse/ClickHouse/pull/49145
|
||||||
set (CMAKE_CXX_FLAGS_RELWITHDEBINFO "${CMAKE_CXX_FLAGS_RELWITHDEBINFO} -fno-pie")
|
set (CMAKE_CXX_FLAGS_RELWITHDEBINFO "${CMAKE_CXX_FLAGS_RELWITHDEBINFO} -fno-pie")
|
||||||
set (CMAKE_C_FLAGS_RELWITHDEBINFO "${CMAKE_C_FLAGS_RELWITHDEBINFO} -fno-pie")
|
set (CMAKE_C_FLAGS_RELWITHDEBINFO "${CMAKE_C_FLAGS_RELWITHDEBINFO} -fno-pie")
|
||||||
set (CMAKE_EXE_LINKER_FLAGS "${CMAKE_EXE_LINKER_FLAGS} -no-pie -Wl,-no-pie")
|
set (CMAKE_EXE_LINKER_FLAGS "${CMAKE_EXE_LINKER_FLAGS} -no-pie -Wl,-no-pie")
|
||||||
|
else ()
|
||||||
|
message (WARNING "ClickHouse is built as PIE, system.trace_log will contain invalid addresses after server restart.")
|
||||||
endif ()
|
endif ()
|
||||||
|
|
||||||
if (ENABLE_TESTS)
|
if (ENABLE_TESTS)
|
||||||
|
@ -34,17 +34,13 @@ curl https://clickhouse.com/ | sh
|
|||||||
|
|
||||||
Every month we get together with the community (users, contributors, customers, those interested in learning more about ClickHouse) to discuss what is coming in the latest release. If you are interested in sharing what you've built on ClickHouse, let us know.
|
Every month we get together with the community (users, contributors, customers, those interested in learning more about ClickHouse) to discuss what is coming in the latest release. If you are interested in sharing what you've built on ClickHouse, let us know.
|
||||||
|
|
||||||
* [v24.7 Community Call](https://clickhouse.com/company/events/v24-7-community-release-call) - Jul 30
|
* [v24.8 Community Call](https://clickhouse.com/company/events/v24-8-community-release-call) - August 29
|
||||||
|
|
||||||
## Upcoming Events
|
## Upcoming Events
|
||||||
|
|
||||||
Keep an eye out for upcoming meetups and events around the world. Somewhere else you want us to be? Please feel free to reach out to tyler `<at>` clickhouse `<dot>` com. You can also peruse [ClickHouse Events](https://clickhouse.com/company/news-events) for a list of all upcoming trainings, meetups, speaking engagements, etc.
|
Keep an eye out for upcoming meetups and events around the world. Somewhere else you want us to be? Please feel free to reach out to tyler `<at>` clickhouse `<dot>` com. You can also peruse [ClickHouse Events](https://clickhouse.com/company/news-events) for a list of all upcoming trainings, meetups, speaking engagements, etc.
|
||||||
|
|
||||||
* [ClickHouse Meetup in Paris](https://www.meetup.com/clickhouse-france-user-group/events/300783448/) - Jul 9
|
* MORE COMING SOON!
|
||||||
* [ClickHouse Cloud - Live Update Call](https://clickhouse.com/company/events/202407-cloud-update-live) - Jul 9
|
|
||||||
* [ClickHouse Meetup @ Ramp - New York City](https://www.meetup.com/clickhouse-new-york-user-group/events/300595845/) - Jul 9
|
|
||||||
* [AWS Summit in New York](https://clickhouse.com/company/events/2024-07-awssummit-nyc) - Jul 10
|
|
||||||
* [ClickHouse Meetup @ Klaviyo - Boston](https://www.meetup.com/clickhouse-boston-user-group/events/300907870) - Jul 11
|
|
||||||
|
|
||||||
## Recent Recordings
|
## Recent Recordings
|
||||||
* **Recent Meetup Videos**: [Meetup Playlist](https://www.youtube.com/playlist?list=PL0Z2YDlm0b3iNDUzpY1S3L_iV4nARda_U) Whenever possible recordings of the ClickHouse Community Meetups are edited and presented as individual talks. Current featuring "Modern SQL in 2023", "Fast, Concurrent, and Consistent Asynchronous INSERTS in ClickHouse", and "Full-Text Indices: Design and Experiments"
|
* **Recent Meetup Videos**: [Meetup Playlist](https://www.youtube.com/playlist?list=PL0Z2YDlm0b3iNDUzpY1S3L_iV4nARda_U) Whenever possible recordings of the ClickHouse Community Meetups are edited and presented as individual talks. Current featuring "Modern SQL in 2023", "Fast, Concurrent, and Consistent Asynchronous INSERTS in ClickHouse", and "Full-Text Indices: Design and Experiments"
|
||||||
|
@ -14,9 +14,10 @@ The following versions of ClickHouse server are currently supported with securit
|
|||||||
|
|
||||||
| Version | Supported |
|
| Version | Supported |
|
||||||
|:-|:-|
|
|:-|:-|
|
||||||
|
| 24.7 | ✔️ |
|
||||||
| 24.6 | ✔️ |
|
| 24.6 | ✔️ |
|
||||||
| 24.5 | ✔️ |
|
| 24.5 | ✔️ |
|
||||||
| 24.4 | ✔️ |
|
| 24.4 | ❌ |
|
||||||
| 24.3 | ✔️ |
|
| 24.3 | ✔️ |
|
||||||
| 24.2 | ❌ |
|
| 24.2 | ❌ |
|
||||||
| 24.1 | ❌ |
|
| 24.1 | ❌ |
|
||||||
|
@ -1,4 +1,4 @@
|
|||||||
add_compile_options($<$<OR:$<COMPILE_LANGUAGE:C>,$<COMPILE_LANGUAGE:CXX>>:${COVERAGE_FLAGS}>)
|
add_compile_options("$<$<OR:$<COMPILE_LANGUAGE:C>,$<COMPILE_LANGUAGE:CXX>>:${COVERAGE_FLAGS}>")
|
||||||
|
|
||||||
if (USE_CLANG_TIDY)
|
if (USE_CLANG_TIDY)
|
||||||
set (CMAKE_CXX_CLANG_TIDY "${CLANG_TIDY_PATH}")
|
set (CMAKE_CXX_CLANG_TIDY "${CLANG_TIDY_PATH}")
|
||||||
@ -8,6 +8,8 @@ endif ()
|
|||||||
# when instantiated from JSON.cpp. Try again when libcxx(abi) and Clang are upgraded to 16.
|
# when instantiated from JSON.cpp. Try again when libcxx(abi) and Clang are upgraded to 16.
|
||||||
set (CMAKE_CXX_STANDARD 20)
|
set (CMAKE_CXX_STANDARD 20)
|
||||||
|
|
||||||
|
configure_file(GitHash.cpp.in GitHash.generated.cpp)
|
||||||
|
|
||||||
set (SRCS
|
set (SRCS
|
||||||
argsToConfig.cpp
|
argsToConfig.cpp
|
||||||
cgroupsv2.cpp
|
cgroupsv2.cpp
|
||||||
@ -32,6 +34,8 @@ set (SRCS
|
|||||||
StringRef.cpp
|
StringRef.cpp
|
||||||
safeExit.cpp
|
safeExit.cpp
|
||||||
throwError.cpp
|
throwError.cpp
|
||||||
|
Numa.cpp
|
||||||
|
GitHash.generated.cpp
|
||||||
)
|
)
|
||||||
|
|
||||||
add_library (common ${SRCS})
|
add_library (common ${SRCS})
|
||||||
@ -46,6 +50,10 @@ if (TARGET ch_contrib::crc32_s390x)
|
|||||||
target_link_libraries(common PUBLIC ch_contrib::crc32_s390x)
|
target_link_libraries(common PUBLIC ch_contrib::crc32_s390x)
|
||||||
endif()
|
endif()
|
||||||
|
|
||||||
|
if (TARGET ch_contrib::numactl)
|
||||||
|
target_link_libraries(common PUBLIC ch_contrib::numactl)
|
||||||
|
endif()
|
||||||
|
|
||||||
target_include_directories(common PUBLIC .. "${CMAKE_CURRENT_BINARY_DIR}/..")
|
target_include_directories(common PUBLIC .. "${CMAKE_CURRENT_BINARY_DIR}/..")
|
||||||
|
|
||||||
target_link_libraries (common
|
target_link_libraries (common
|
||||||
|
37
base/base/Numa.cpp
Normal file
37
base/base/Numa.cpp
Normal file
@ -0,0 +1,37 @@
|
|||||||
|
#include <base/Numa.h>
|
||||||
|
|
||||||
|
#include "config.h"
|
||||||
|
|
||||||
|
#if USE_NUMACTL
|
||||||
|
# include <numa.h>
|
||||||
|
#endif
|
||||||
|
|
||||||
|
namespace DB
|
||||||
|
{
|
||||||
|
|
||||||
|
std::optional<size_t> getNumaNodesTotalMemory()
|
||||||
|
{
|
||||||
|
std::optional<size_t> total_memory;
|
||||||
|
#if USE_NUMACTL
|
||||||
|
if (numa_available() != -1)
|
||||||
|
{
|
||||||
|
auto * membind = numa_get_membind();
|
||||||
|
if (!numa_bitmask_equal(membind, numa_all_nodes_ptr))
|
||||||
|
{
|
||||||
|
total_memory.emplace(0);
|
||||||
|
auto max_node = numa_max_node();
|
||||||
|
for (int i = 0; i <= max_node; ++i)
|
||||||
|
{
|
||||||
|
if (numa_bitmask_isbitset(membind, i))
|
||||||
|
*total_memory += numa_node_size(i, nullptr);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
numa_bitmask_free(membind);
|
||||||
|
}
|
||||||
|
|
||||||
|
#endif
|
||||||
|
return total_memory;
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
12
base/base/Numa.h
Normal file
12
base/base/Numa.h
Normal file
@ -0,0 +1,12 @@
|
|||||||
|
#pragma once
|
||||||
|
|
||||||
|
#include <optional>
|
||||||
|
|
||||||
|
namespace DB
|
||||||
|
{
|
||||||
|
|
||||||
|
/// return total memory of NUMA nodes the process is bound to
|
||||||
|
/// if NUMA is not supported or process can use all nodes, std::nullopt is returned
|
||||||
|
std::optional<size_t> getNumaNodesTotalMemory();
|
||||||
|
|
||||||
|
}
|
@ -27,27 +27,6 @@ bool cgroupsV2Enabled()
|
|||||||
#endif
|
#endif
|
||||||
}
|
}
|
||||||
|
|
||||||
bool cgroupsV2MemoryControllerEnabled()
|
|
||||||
{
|
|
||||||
#if defined(OS_LINUX)
|
|
||||||
chassert(cgroupsV2Enabled());
|
|
||||||
/// According to https://docs.kernel.org/admin-guide/cgroup-v2.html, file "cgroup.controllers" defines which controllers are available
|
|
||||||
/// for the current + child cgroups. The set of available controllers can be restricted from level to level using file
|
|
||||||
/// "cgroups.subtree_control". It is therefore sufficient to check the bottom-most nested "cgroup.controllers" file.
|
|
||||||
fs::path cgroup_dir = cgroupV2PathOfProcess();
|
|
||||||
if (cgroup_dir.empty())
|
|
||||||
return false;
|
|
||||||
std::ifstream controllers_file(cgroup_dir / "cgroup.controllers");
|
|
||||||
if (!controllers_file.is_open())
|
|
||||||
return false;
|
|
||||||
std::string controllers;
|
|
||||||
std::getline(controllers_file, controllers);
|
|
||||||
return controllers.find("memory") != std::string::npos;
|
|
||||||
#else
|
|
||||||
return false;
|
|
||||||
#endif
|
|
||||||
}
|
|
||||||
|
|
||||||
fs::path cgroupV2PathOfProcess()
|
fs::path cgroupV2PathOfProcess()
|
||||||
{
|
{
|
||||||
#if defined(OS_LINUX)
|
#if defined(OS_LINUX)
|
||||||
@ -71,3 +50,28 @@ fs::path cgroupV2PathOfProcess()
|
|||||||
return {};
|
return {};
|
||||||
#endif
|
#endif
|
||||||
}
|
}
|
||||||
|
|
||||||
|
std::optional<std::string> getCgroupsV2PathContainingFile([[maybe_unused]] std::string_view file_name)
|
||||||
|
{
|
||||||
|
#if defined(OS_LINUX)
|
||||||
|
if (!cgroupsV2Enabled())
|
||||||
|
return {};
|
||||||
|
|
||||||
|
fs::path current_cgroup = cgroupV2PathOfProcess();
|
||||||
|
if (current_cgroup.empty())
|
||||||
|
return {};
|
||||||
|
|
||||||
|
/// Return the bottom-most nested file. If there is no such file at the current
|
||||||
|
/// level, try again at the parent level as settings are inherited.
|
||||||
|
while (current_cgroup != default_cgroups_mount.parent_path())
|
||||||
|
{
|
||||||
|
const auto path = current_cgroup / file_name;
|
||||||
|
if (fs::exists(path))
|
||||||
|
return {current_cgroup};
|
||||||
|
current_cgroup = current_cgroup.parent_path();
|
||||||
|
}
|
||||||
|
return {};
|
||||||
|
#else
|
||||||
|
return {};
|
||||||
|
#endif
|
||||||
|
}
|
||||||
|
@ -1,6 +1,7 @@
|
|||||||
#pragma once
|
#pragma once
|
||||||
|
|
||||||
#include <filesystem>
|
#include <filesystem>
|
||||||
|
#include <string_view>
|
||||||
|
|
||||||
#if defined(OS_LINUX)
|
#if defined(OS_LINUX)
|
||||||
/// I think it is possible to mount the cgroups hierarchy somewhere else (e.g. when in containers).
|
/// I think it is possible to mount the cgroups hierarchy somewhere else (e.g. when in containers).
|
||||||
@ -11,11 +12,11 @@ static inline const std::filesystem::path default_cgroups_mount = "/sys/fs/cgrou
|
|||||||
/// Is cgroups v2 enabled on the system?
|
/// Is cgroups v2 enabled on the system?
|
||||||
bool cgroupsV2Enabled();
|
bool cgroupsV2Enabled();
|
||||||
|
|
||||||
/// Is the memory controller of cgroups v2 enabled on the system?
|
|
||||||
/// Assumes that cgroupsV2Enabled() is enabled.
|
|
||||||
bool cgroupsV2MemoryControllerEnabled();
|
|
||||||
|
|
||||||
/// Detects which cgroup v2 the process belongs to and returns the filesystem path to the cgroup.
|
/// Detects which cgroup v2 the process belongs to and returns the filesystem path to the cgroup.
|
||||||
/// Returns an empty path the cgroup cannot be determined.
|
/// Returns an empty path the cgroup cannot be determined.
|
||||||
/// Assumes that cgroupsV2Enabled() is enabled.
|
/// Assumes that cgroupsV2Enabled() is enabled.
|
||||||
std::filesystem::path cgroupV2PathOfProcess();
|
std::filesystem::path cgroupV2PathOfProcess();
|
||||||
|
|
||||||
|
/// Returns the most nested cgroup dir containing the specified file.
|
||||||
|
/// If cgroups v2 is not enabled - returns an empty optional.
|
||||||
|
std::optional<std::string> getCgroupsV2PathContainingFile([[maybe_unused]] std::string_view file_name);
|
||||||
|
@ -87,10 +87,13 @@
|
|||||||
# define ASAN_POISON_MEMORY_REGION(a, b)
|
# define ASAN_POISON_MEMORY_REGION(a, b)
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
#if !defined(ABORT_ON_LOGICAL_ERROR)
|
/// We used to have only ABORT_ON_LOGICAL_ERROR macro, but most of its uses were actually in places where we didn't care about logical errors
|
||||||
#if !defined(NDEBUG) || defined(ADDRESS_SANITIZER) || defined(THREAD_SANITIZER) || defined(MEMORY_SANITIZER) || defined(UNDEFINED_BEHAVIOR_SANITIZER)
|
/// but wanted to check exactly if the current build type is debug or with sanitizer. This new macro is introduced to fix those places.
|
||||||
#define ABORT_ON_LOGICAL_ERROR
|
#if !defined(DEBUG_OR_SANITIZER_BUILD)
|
||||||
#endif
|
# if !defined(NDEBUG) || defined(ADDRESS_SANITIZER) || defined(THREAD_SANITIZER) || defined(MEMORY_SANITIZER) \
|
||||||
|
|| defined(UNDEFINED_BEHAVIOR_SANITIZER)
|
||||||
|
# define DEBUG_OR_SANITIZER_BUILD
|
||||||
|
# endif
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
/// chassert(x) is similar to assert(x), but:
|
/// chassert(x) is similar to assert(x), but:
|
||||||
@ -101,7 +104,7 @@
|
|||||||
/// Also it makes sense to call abort() instead of __builtin_unreachable() in debug builds,
|
/// Also it makes sense to call abort() instead of __builtin_unreachable() in debug builds,
|
||||||
/// because SIGABRT is easier to debug than SIGTRAP (the second one makes gdb crazy)
|
/// because SIGABRT is easier to debug than SIGTRAP (the second one makes gdb crazy)
|
||||||
#if !defined(chassert)
|
#if !defined(chassert)
|
||||||
#if defined(ABORT_ON_LOGICAL_ERROR)
|
# if defined(DEBUG_OR_SANITIZER_BUILD)
|
||||||
// clang-format off
|
// clang-format off
|
||||||
#include <base/types.h>
|
#include <base/types.h>
|
||||||
namespace DB
|
namespace DB
|
||||||
|
@ -2,15 +2,14 @@
|
|||||||
|
|
||||||
#include <base/cgroupsv2.h>
|
#include <base/cgroupsv2.h>
|
||||||
#include <base/getPageSize.h>
|
#include <base/getPageSize.h>
|
||||||
|
#include <base/Numa.h>
|
||||||
|
|
||||||
#include <fstream>
|
#include <fstream>
|
||||||
#include <stdexcept>
|
|
||||||
|
|
||||||
#include <unistd.h>
|
#include <unistd.h>
|
||||||
#include <sys/types.h>
|
#include <sys/types.h>
|
||||||
#include <sys/param.h>
|
#include <sys/param.h>
|
||||||
|
|
||||||
|
|
||||||
namespace
|
namespace
|
||||||
{
|
{
|
||||||
|
|
||||||
@ -20,9 +19,6 @@ std::optional<uint64_t> getCgroupsV2MemoryLimit()
|
|||||||
if (!cgroupsV2Enabled())
|
if (!cgroupsV2Enabled())
|
||||||
return {};
|
return {};
|
||||||
|
|
||||||
if (!cgroupsV2MemoryControllerEnabled())
|
|
||||||
return {};
|
|
||||||
|
|
||||||
std::filesystem::path current_cgroup = cgroupV2PathOfProcess();
|
std::filesystem::path current_cgroup = cgroupV2PathOfProcess();
|
||||||
if (current_cgroup.empty())
|
if (current_cgroup.empty())
|
||||||
return {};
|
return {};
|
||||||
@ -63,6 +59,9 @@ uint64_t getMemoryAmountOrZero()
|
|||||||
|
|
||||||
uint64_t memory_amount = num_pages * page_size;
|
uint64_t memory_amount = num_pages * page_size;
|
||||||
|
|
||||||
|
if (auto total_numa_memory = DB::getNumaNodesTotalMemory(); total_numa_memory.has_value())
|
||||||
|
memory_amount = *total_numa_memory;
|
||||||
|
|
||||||
/// Respect the memory limit set by cgroups v2.
|
/// Respect the memory limit set by cgroups v2.
|
||||||
auto limit_v2 = getCgroupsV2MemoryLimit();
|
auto limit_v2 = getCgroupsV2MemoryLimit();
|
||||||
if (limit_v2.has_value() && *limit_v2 < memory_amount)
|
if (limit_v2.has_value() && *limit_v2 < memory_amount)
|
||||||
|
@ -18,6 +18,16 @@ if (GLIBC_COMPATIBILITY)
|
|||||||
message (FATAL_ERROR "glibc_compatibility can only be used on x86_64 or aarch64.")
|
message (FATAL_ERROR "glibc_compatibility can only be used on x86_64 or aarch64.")
|
||||||
endif ()
|
endif ()
|
||||||
|
|
||||||
|
if (SANITIZE STREQUAL thread)
|
||||||
|
# Disable TSAN instrumentation that conflicts with re-exec due to high ASLR entropy using getauxval
|
||||||
|
# See longer comment in __auxv_init_procfs
|
||||||
|
# In the case of tsan we need to make sure getauxval is not instrumented as that would introduce tsan
|
||||||
|
# internal calls to functions that depend on a state that isn't initialized yet
|
||||||
|
set_source_files_properties(
|
||||||
|
musl/getauxval.c
|
||||||
|
PROPERTIES COMPILE_FLAGS "-mllvm -tsan-instrument-func-entry-exit=false")
|
||||||
|
endif()
|
||||||
|
|
||||||
# Need to omit frame pointers to match the performance of glibc
|
# Need to omit frame pointers to match the performance of glibc
|
||||||
set (CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -fomit-frame-pointer")
|
set (CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -fomit-frame-pointer")
|
||||||
|
|
||||||
|
@ -75,6 +75,44 @@ unsigned long NO_SANITIZE_THREAD __getauxval_procfs(unsigned long type)
|
|||||||
}
|
}
|
||||||
static unsigned long NO_SANITIZE_THREAD __auxv_init_procfs(unsigned long type)
|
static unsigned long NO_SANITIZE_THREAD __auxv_init_procfs(unsigned long type)
|
||||||
{
|
{
|
||||||
|
#if defined(__x86_64__) && defined(__has_feature)
|
||||||
|
# if __has_feature(memory_sanitizer) || __has_feature(thread_sanitizer)
|
||||||
|
/// Sanitizers are not compatible with high ASLR entropy, which is the default on modern Linux distributions, and
|
||||||
|
/// to workaround this limitation, TSAN and MSAN (couldn't see other sanitizers doing the same), re-exec the binary
|
||||||
|
/// without ASLR (see https://github.com/llvm/llvm-project/commit/0784b1eefa36d4acbb0dacd2d18796e26313b6c5)
|
||||||
|
|
||||||
|
/// The problem we face is that, in order to re-exec, the sanitizer wants to use the original pathname in the call
|
||||||
|
/// and to get its value it uses getauxval (https://github.com/llvm/llvm-project/blob/20eff684203287828d6722fc860b9d3621429542/compiler-rt/lib/sanitizer_common/sanitizer_linux_libcdep.cpp#L985-L988).
|
||||||
|
/// Since we provide getauxval ourselves (to minimize the version dependency on runtime glibc), we are the ones
|
||||||
|
// being called and we fail horribly:
|
||||||
|
///
|
||||||
|
/// ==301455==ERROR: MemorySanitizer: SEGV on unknown address 0x2ffc6d721550 (pc 0x5622c1cc0073 bp 0x000000000003 sp 0x7ffc6d721530 T301455)
|
||||||
|
/// ==301455==The signal is caused by a WRITE memory access.
|
||||||
|
/// #0 0x5622c1cc0073 in __auxv_init_procfs ./ClickHouse/base/glibc-compatibility/musl/getauxval.c:129:5
|
||||||
|
/// #1 0x5622c1cbffe9 in getauxval ./ClickHouse/base/glibc-compatibility/musl/getauxval.c:240:12
|
||||||
|
/// #2 0x5622c0d7bfb4 in __sanitizer::ReExec() crtstuff.c
|
||||||
|
/// #3 0x5622c0df7bfc in __msan::InitShadowWithReExec(bool) crtstuff.c
|
||||||
|
/// #4 0x5622c0d95356 in __msan_init (./ClickHouse/build_msan/contrib/google-protobuf-cmake/protoc+0x256356) (BuildId: 6411d3c88b898ba3f7d49760555977d3e61f0741)
|
||||||
|
/// #5 0x5622c0dfe878 in msan.module_ctor main.cc
|
||||||
|
/// #6 0x5622c1cc156c in __libc_csu_init (./ClickHouse/build_msan/contrib/google-protobuf-cmake/protoc+0x118256c) (BuildId: 6411d3c88b898ba3f7d49760555977d3e61f0741)
|
||||||
|
/// #7 0x73dc05dd7ea3 in __libc_start_main /usr/src/debug/glibc/glibc/csu/../csu/libc-start.c:343:6
|
||||||
|
/// #8 0x5622c0d6b7cd in _start (./ClickHouse/build_msan/contrib/google-protobuf-cmake/protoc+0x22c7cd) (BuildId: 6411d3c88b898ba3f7d49760555977d3e61f0741)
|
||||||
|
|
||||||
|
/// The source of the issue above is that, at this point in time during __msan_init, we can't really do much as
|
||||||
|
/// most global variables aren't initialized or available yet, so we can't initiate the auxiliary vector.
|
||||||
|
/// Normal glibc / musl getauxval doesn't have this problem since they initiate their auxval vector at the very
|
||||||
|
/// start of __libc_start_main (just keeping track of argv+argc+1), but we don't have such option (otherwise
|
||||||
|
/// this complexity of reading "/proc/self/auxv" or using __environ would not be necessary).
|
||||||
|
|
||||||
|
/// To avoid this crashes on the re-exec call (see above how it would fail when creating `aux`, and if we used
|
||||||
|
/// __auxv_init_environ then it would SIGSEV on READing `__environ`) we capture this call for `AT_EXECFN` and
|
||||||
|
/// unconditionally return "/proc/self/exe" without any preparation. Theoretically this should be fine in
|
||||||
|
/// our case, as we don't load any libraries. That's the theory at least.
|
||||||
|
if (type == AT_EXECFN)
|
||||||
|
return (unsigned long)"/proc/self/exe";
|
||||||
|
# endif
|
||||||
|
#endif
|
||||||
|
|
||||||
// For debugging:
|
// For debugging:
|
||||||
// - od -t dL /proc/self/auxv
|
// - od -t dL /proc/self/auxv
|
||||||
// - LD_SHOW_AUX= ls
|
// - LD_SHOW_AUX= ls
|
||||||
@ -199,7 +237,7 @@ static unsigned long NO_SANITIZE_THREAD __auxv_init_environ(unsigned long type)
|
|||||||
// - __auxv_init_procfs -> __auxv_init_environ -> __getauxval_environ
|
// - __auxv_init_procfs -> __auxv_init_environ -> __getauxval_environ
|
||||||
static void * volatile getauxval_func = (void *)__auxv_init_procfs;
|
static void * volatile getauxval_func = (void *)__auxv_init_procfs;
|
||||||
|
|
||||||
unsigned long getauxval(unsigned long type)
|
unsigned long NO_SANITIZE_THREAD getauxval(unsigned long type)
|
||||||
{
|
{
|
||||||
return ((unsigned long (*)(unsigned long))getauxval_func)(type);
|
return ((unsigned long (*)(unsigned long))getauxval_func)(type);
|
||||||
}
|
}
|
||||||
|
@ -21,6 +21,7 @@
|
|||||||
#include "Poco/Exception.h"
|
#include "Poco/Exception.h"
|
||||||
#include "Poco/Foundation.h"
|
#include "Poco/Foundation.h"
|
||||||
#include "Poco/Mutex.h"
|
#include "Poco/Mutex.h"
|
||||||
|
#include "Poco/Message.h"
|
||||||
|
|
||||||
|
|
||||||
namespace Poco
|
namespace Poco
|
||||||
@ -78,6 +79,10 @@ public:
|
|||||||
///
|
///
|
||||||
/// The default implementation just breaks into the debugger.
|
/// The default implementation just breaks into the debugger.
|
||||||
|
|
||||||
|
virtual void logMessageImpl(Message::Priority priority, const std::string & msg) {}
|
||||||
|
/// Write a messages to the log
|
||||||
|
/// Useful for logging from Poco
|
||||||
|
|
||||||
static void handle(const Exception & exc);
|
static void handle(const Exception & exc);
|
||||||
/// Invokes the currently registered ErrorHandler.
|
/// Invokes the currently registered ErrorHandler.
|
||||||
|
|
||||||
@ -87,6 +92,9 @@ public:
|
|||||||
static void handle();
|
static void handle();
|
||||||
/// Invokes the currently registered ErrorHandler.
|
/// Invokes the currently registered ErrorHandler.
|
||||||
|
|
||||||
|
static void logMessage(Message::Priority priority, const std::string & msg);
|
||||||
|
/// Invokes the currently registered ErrorHandler to log a message.
|
||||||
|
|
||||||
static ErrorHandler * set(ErrorHandler * pHandler);
|
static ErrorHandler * set(ErrorHandler * pHandler);
|
||||||
/// Registers the given handler as the current error handler.
|
/// Registers the given handler as the current error handler.
|
||||||
///
|
///
|
||||||
|
@ -19,6 +19,7 @@
|
|||||||
|
|
||||||
|
|
||||||
#include "Poco/Foundation.h"
|
#include "Poco/Foundation.h"
|
||||||
|
#include <Poco/Types.h>
|
||||||
|
|
||||||
|
|
||||||
namespace Poco
|
namespace Poco
|
||||||
@ -135,6 +136,12 @@ public:
|
|||||||
static const UUID & x500();
|
static const UUID & x500();
|
||||||
/// Returns the namespace identifier for the X500 namespace.
|
/// Returns the namespace identifier for the X500 namespace.
|
||||||
|
|
||||||
|
UInt32 getTimeLow() const { return _timeLow; }
|
||||||
|
UInt16 getTimeMid() const { return _timeMid; }
|
||||||
|
UInt16 getTimeHiAndVersion() const { return _timeHiAndVersion; }
|
||||||
|
UInt16 getClockSeq() const { return _clockSeq; }
|
||||||
|
std::array<UInt8, 6> getNode() const { return std::array<UInt8, 6>{_node[0], _node[1], _node[2], _node[3], _node[4], _node[5]}; }
|
||||||
|
|
||||||
protected:
|
protected:
|
||||||
UUID(UInt32 timeLow, UInt32 timeMid, UInt32 timeHiAndVersion, UInt16 clockSeq, UInt8 node[]);
|
UUID(UInt32 timeLow, UInt32 timeMid, UInt32 timeHiAndVersion, UInt16 clockSeq, UInt8 node[]);
|
||||||
UUID(const char * bytes, Version version);
|
UUID(const char * bytes, Version version);
|
||||||
|
@ -8,7 +8,7 @@
|
|||||||
// Copyright (c) 2005-2006, Applied Informatics Software Engineering GmbH.
|
// Copyright (c) 2005-2006, Applied Informatics Software Engineering GmbH.
|
||||||
// and Contributors.
|
// and Contributors.
|
||||||
//
|
//
|
||||||
// SPDX-License-Identifier: BSL-1.0
|
// SPDX-License-Identifier: BSL-1.0
|
||||||
//
|
//
|
||||||
|
|
||||||
|
|
||||||
@ -35,79 +35,91 @@ ErrorHandler::~ErrorHandler()
|
|||||||
|
|
||||||
void ErrorHandler::exception(const Exception& exc)
|
void ErrorHandler::exception(const Exception& exc)
|
||||||
{
|
{
|
||||||
poco_debugger_msg(exc.what());
|
poco_debugger_msg(exc.what());
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
void ErrorHandler::exception(const std::exception& exc)
|
void ErrorHandler::exception(const std::exception& exc)
|
||||||
{
|
{
|
||||||
poco_debugger_msg(exc.what());
|
poco_debugger_msg(exc.what());
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
void ErrorHandler::exception()
|
void ErrorHandler::exception()
|
||||||
{
|
{
|
||||||
poco_debugger_msg("unknown exception");
|
poco_debugger_msg("unknown exception");
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
void ErrorHandler::handle(const Exception& exc)
|
void ErrorHandler::handle(const Exception& exc)
|
||||||
{
|
{
|
||||||
FastMutex::ScopedLock lock(_mutex);
|
FastMutex::ScopedLock lock(_mutex);
|
||||||
try
|
try
|
||||||
{
|
{
|
||||||
_pHandler->exception(exc);
|
_pHandler->exception(exc);
|
||||||
}
|
}
|
||||||
catch (...)
|
catch (...)
|
||||||
{
|
{
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
void ErrorHandler::handle(const std::exception& exc)
|
void ErrorHandler::handle(const std::exception& exc)
|
||||||
{
|
{
|
||||||
FastMutex::ScopedLock lock(_mutex);
|
FastMutex::ScopedLock lock(_mutex);
|
||||||
try
|
try
|
||||||
{
|
{
|
||||||
_pHandler->exception(exc);
|
_pHandler->exception(exc);
|
||||||
}
|
}
|
||||||
catch (...)
|
catch (...)
|
||||||
{
|
{
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
void ErrorHandler::handle()
|
void ErrorHandler::handle()
|
||||||
{
|
{
|
||||||
FastMutex::ScopedLock lock(_mutex);
|
FastMutex::ScopedLock lock(_mutex);
|
||||||
try
|
try
|
||||||
{
|
{
|
||||||
_pHandler->exception();
|
_pHandler->exception();
|
||||||
}
|
}
|
||||||
catch (...)
|
catch (...)
|
||||||
{
|
{
|
||||||
}
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
void ErrorHandler::logMessage(Message::Priority priority, const std::string & msg)
|
||||||
|
{
|
||||||
|
FastMutex::ScopedLock lock(_mutex);
|
||||||
|
try
|
||||||
|
{
|
||||||
|
_pHandler->logMessageImpl(priority, msg);
|
||||||
|
}
|
||||||
|
catch (...)
|
||||||
|
{
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
ErrorHandler* ErrorHandler::set(ErrorHandler* pHandler)
|
ErrorHandler* ErrorHandler::set(ErrorHandler* pHandler)
|
||||||
{
|
{
|
||||||
poco_check_ptr(pHandler);
|
poco_check_ptr(pHandler);
|
||||||
|
|
||||||
FastMutex::ScopedLock lock(_mutex);
|
FastMutex::ScopedLock lock(_mutex);
|
||||||
ErrorHandler* pOld = _pHandler;
|
ErrorHandler* pOld = _pHandler;
|
||||||
_pHandler = pHandler;
|
_pHandler = pHandler;
|
||||||
return pOld;
|
return pOld;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
ErrorHandler* ErrorHandler::defaultHandler()
|
ErrorHandler* ErrorHandler::defaultHandler()
|
||||||
{
|
{
|
||||||
// NOTE: Since this is called to initialize the static _pHandler
|
// NOTE: Since this is called to initialize the static _pHandler
|
||||||
// variable, sh has to be a local static, otherwise we run
|
// variable, sh has to be a local static, otherwise we run
|
||||||
// into static initialization order issues.
|
// into static initialization order issues.
|
||||||
static SingletonHolder<ErrorHandler> sh;
|
static SingletonHolder<ErrorHandler> sh;
|
||||||
return sh.get();
|
return sh.get();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
@ -76,13 +76,13 @@ std::string Binary::toString(int indent) const
|
|||||||
|
|
||||||
UUID Binary::uuid() const
|
UUID Binary::uuid() const
|
||||||
{
|
{
|
||||||
if (_subtype == 0x04 && _buffer.size() == 16)
|
if ((_subtype == 0x04 || _subtype == 0x03) && _buffer.size() == 16)
|
||||||
{
|
{
|
||||||
UUID uuid;
|
UUID uuid;
|
||||||
uuid.copyFrom((const char*) _buffer.begin());
|
uuid.copyFrom((const char*) _buffer.begin());
|
||||||
return uuid;
|
return uuid;
|
||||||
}
|
}
|
||||||
throw BadCastException("Invalid subtype");
|
throw BadCastException("Invalid subtype: " + std::to_string(_subtype) + ", size: " + std::to_string(_buffer.size()));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
@ -58,6 +58,10 @@ namespace Net
|
|||||||
|
|
||||||
void setKeepAliveTimeout(Poco::Timespan keepAliveTimeout);
|
void setKeepAliveTimeout(Poco::Timespan keepAliveTimeout);
|
||||||
|
|
||||||
|
size_t getKeepAliveTimeout() const { return _keepAliveTimeout.totalSeconds(); }
|
||||||
|
|
||||||
|
size_t getMaxKeepAliveRequests() const { return _maxKeepAliveRequests; }
|
||||||
|
|
||||||
private:
|
private:
|
||||||
bool _firstRequest;
|
bool _firstRequest;
|
||||||
Poco::Timespan _keepAliveTimeout;
|
Poco::Timespan _keepAliveTimeout;
|
||||||
|
@ -19,11 +19,11 @@ namespace Poco {
|
|||||||
namespace Net {
|
namespace Net {
|
||||||
|
|
||||||
|
|
||||||
HTTPServerSession::HTTPServerSession(const StreamSocket& socket, HTTPServerParams::Ptr pParams):
|
HTTPServerSession::HTTPServerSession(const StreamSocket & socket, HTTPServerParams::Ptr pParams)
|
||||||
HTTPSession(socket, pParams->getKeepAlive()),
|
: HTTPSession(socket, pParams->getKeepAlive())
|
||||||
_firstRequest(true),
|
, _firstRequest(true)
|
||||||
_keepAliveTimeout(pParams->getKeepAliveTimeout()),
|
, _keepAliveTimeout(pParams->getKeepAliveTimeout())
|
||||||
_maxKeepAliveRequests(pParams->getMaxKeepAliveRequests())
|
, _maxKeepAliveRequests(pParams->getMaxKeepAliveRequests())
|
||||||
{
|
{
|
||||||
setTimeout(pParams->getTimeout());
|
setTimeout(pParams->getTimeout());
|
||||||
}
|
}
|
||||||
@ -52,11 +52,12 @@ bool HTTPServerSession::hasMoreRequests()
|
|||||||
}
|
}
|
||||||
else if (_maxKeepAliveRequests != 0 && getKeepAlive())
|
else if (_maxKeepAliveRequests != 0 && getKeepAlive())
|
||||||
{
|
{
|
||||||
if (_maxKeepAliveRequests > 0)
|
if (_maxKeepAliveRequests > 0)
|
||||||
--_maxKeepAliveRequests;
|
--_maxKeepAliveRequests;
|
||||||
return buffered() > 0 || socket().poll(_keepAliveTimeout, Socket::SELECT_READ);
|
return buffered() > 0 || socket().poll(_keepAliveTimeout, Socket::SELECT_READ);
|
||||||
}
|
}
|
||||||
else return false;
|
else
|
||||||
|
return false;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
@ -17,6 +17,7 @@
|
|||||||
#include "Poco/Net/StreamSocketImpl.h"
|
#include "Poco/Net/StreamSocketImpl.h"
|
||||||
#include "Poco/NumberFormatter.h"
|
#include "Poco/NumberFormatter.h"
|
||||||
#include "Poco/Timestamp.h"
|
#include "Poco/Timestamp.h"
|
||||||
|
#include "Poco/ErrorHandler.h"
|
||||||
#include <string.h> // FD_SET needs memset on some platforms, so we can't use <cstring>
|
#include <string.h> // FD_SET needs memset on some platforms, so we can't use <cstring>
|
||||||
|
|
||||||
|
|
||||||
|
@ -8,7 +8,7 @@
|
|||||||
// Copyright (c) 2005-2006, Applied Informatics Software Engineering GmbH.
|
// Copyright (c) 2005-2006, Applied Informatics Software Engineering GmbH.
|
||||||
// and Contributors.
|
// and Contributors.
|
||||||
//
|
//
|
||||||
// SPDX-License-Identifier: BSL-1.0
|
// SPDX-License-Identifier: BSL-1.0
|
||||||
//
|
//
|
||||||
|
|
||||||
|
|
||||||
@ -44,190 +44,194 @@ TCPServerConnectionFilter::~TCPServerConnectionFilter()
|
|||||||
|
|
||||||
|
|
||||||
TCPServer::TCPServer(TCPServerConnectionFactory::Ptr pFactory, Poco::UInt16 portNumber, TCPServerParams::Ptr pParams):
|
TCPServer::TCPServer(TCPServerConnectionFactory::Ptr pFactory, Poco::UInt16 portNumber, TCPServerParams::Ptr pParams):
|
||||||
_socket(ServerSocket(portNumber)),
|
_socket(ServerSocket(portNumber)),
|
||||||
_thread(threadName(_socket)),
|
_thread(threadName(_socket)),
|
||||||
_stopped(true)
|
_stopped(true)
|
||||||
{
|
{
|
||||||
Poco::ThreadPool& pool = Poco::ThreadPool::defaultPool();
|
Poco::ThreadPool& pool = Poco::ThreadPool::defaultPool();
|
||||||
if (pParams)
|
if (pParams)
|
||||||
{
|
{
|
||||||
int toAdd = pParams->getMaxThreads() - pool.capacity();
|
int toAdd = pParams->getMaxThreads() - pool.capacity();
|
||||||
if (toAdd > 0) pool.addCapacity(toAdd);
|
if (toAdd > 0) pool.addCapacity(toAdd);
|
||||||
}
|
}
|
||||||
_pDispatcher = new TCPServerDispatcher(pFactory, pool, pParams);
|
_pDispatcher = new TCPServerDispatcher(pFactory, pool, pParams);
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
TCPServer::TCPServer(TCPServerConnectionFactory::Ptr pFactory, const ServerSocket& socket, TCPServerParams::Ptr pParams):
|
TCPServer::TCPServer(TCPServerConnectionFactory::Ptr pFactory, const ServerSocket& socket, TCPServerParams::Ptr pParams):
|
||||||
_socket(socket),
|
_socket(socket),
|
||||||
_thread(threadName(socket)),
|
_thread(threadName(socket)),
|
||||||
_stopped(true)
|
_stopped(true)
|
||||||
{
|
{
|
||||||
Poco::ThreadPool& pool = Poco::ThreadPool::defaultPool();
|
Poco::ThreadPool& pool = Poco::ThreadPool::defaultPool();
|
||||||
if (pParams)
|
if (pParams)
|
||||||
{
|
{
|
||||||
int toAdd = pParams->getMaxThreads() - pool.capacity();
|
int toAdd = pParams->getMaxThreads() - pool.capacity();
|
||||||
if (toAdd > 0) pool.addCapacity(toAdd);
|
if (toAdd > 0) pool.addCapacity(toAdd);
|
||||||
}
|
}
|
||||||
_pDispatcher = new TCPServerDispatcher(pFactory, pool, pParams);
|
_pDispatcher = new TCPServerDispatcher(pFactory, pool, pParams);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
TCPServer::TCPServer(TCPServerConnectionFactory::Ptr pFactory, Poco::ThreadPool& threadPool, const ServerSocket& socket, TCPServerParams::Ptr pParams):
|
TCPServer::TCPServer(TCPServerConnectionFactory::Ptr pFactory, Poco::ThreadPool& threadPool, const ServerSocket& socket, TCPServerParams::Ptr pParams):
|
||||||
_socket(socket),
|
_socket(socket),
|
||||||
_pDispatcher(new TCPServerDispatcher(pFactory, threadPool, pParams)),
|
_pDispatcher(new TCPServerDispatcher(pFactory, threadPool, pParams)),
|
||||||
_thread(threadName(socket)),
|
_thread(threadName(socket)),
|
||||||
_stopped(true)
|
_stopped(true)
|
||||||
{
|
{
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
TCPServer::~TCPServer()
|
TCPServer::~TCPServer()
|
||||||
{
|
{
|
||||||
try
|
try
|
||||||
{
|
{
|
||||||
stop();
|
stop();
|
||||||
_pDispatcher->release();
|
_pDispatcher->release();
|
||||||
}
|
}
|
||||||
catch (...)
|
catch (...)
|
||||||
{
|
{
|
||||||
poco_unexpected();
|
poco_unexpected();
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
const TCPServerParams& TCPServer::params() const
|
const TCPServerParams& TCPServer::params() const
|
||||||
{
|
{
|
||||||
return _pDispatcher->params();
|
return _pDispatcher->params();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
void TCPServer::start()
|
void TCPServer::start()
|
||||||
{
|
{
|
||||||
poco_assert (_stopped);
|
poco_assert (_stopped);
|
||||||
|
|
||||||
_stopped = false;
|
_stopped = false;
|
||||||
_thread.start(*this);
|
_thread.start(*this);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
void TCPServer::stop()
|
void TCPServer::stop()
|
||||||
{
|
{
|
||||||
if (!_stopped)
|
if (!_stopped)
|
||||||
{
|
{
|
||||||
_stopped = true;
|
_stopped = true;
|
||||||
_thread.join();
|
_thread.join();
|
||||||
_pDispatcher->stop();
|
_pDispatcher->stop();
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
void TCPServer::run()
|
void TCPServer::run()
|
||||||
{
|
{
|
||||||
while (!_stopped)
|
while (!_stopped)
|
||||||
{
|
{
|
||||||
Poco::Timespan timeout(250000);
|
Poco::Timespan timeout(250000);
|
||||||
try
|
try
|
||||||
{
|
{
|
||||||
if (_socket.poll(timeout, Socket::SELECT_READ))
|
if (_socket.poll(timeout, Socket::SELECT_READ))
|
||||||
{
|
{
|
||||||
try
|
try
|
||||||
{
|
{
|
||||||
StreamSocket ss = _socket.acceptConnection();
|
StreamSocket ss = _socket.acceptConnection();
|
||||||
|
|
||||||
if (!_pConnectionFilter || _pConnectionFilter->accept(ss))
|
if (!_pConnectionFilter || _pConnectionFilter->accept(ss))
|
||||||
{
|
{
|
||||||
// enable nodelay per default: OSX really needs that
|
// enable nodelay per default: OSX really needs that
|
||||||
#if defined(POCO_OS_FAMILY_UNIX)
|
#if defined(POCO_OS_FAMILY_UNIX)
|
||||||
if (ss.address().family() != AddressFamily::UNIX_LOCAL)
|
if (ss.address().family() != AddressFamily::UNIX_LOCAL)
|
||||||
#endif
|
#endif
|
||||||
{
|
{
|
||||||
ss.setNoDelay(true);
|
ss.setNoDelay(true);
|
||||||
}
|
}
|
||||||
_pDispatcher->enqueue(ss);
|
_pDispatcher->enqueue(ss);
|
||||||
}
|
}
|
||||||
}
|
else
|
||||||
catch (Poco::Exception& exc)
|
{
|
||||||
{
|
ErrorHandler::logMessage(Message::PRIO_WARNING, "Filtered out connection from " + ss.peerAddress().toString());
|
||||||
ErrorHandler::handle(exc);
|
}
|
||||||
}
|
}
|
||||||
catch (std::exception& exc)
|
catch (Poco::Exception& exc)
|
||||||
{
|
{
|
||||||
ErrorHandler::handle(exc);
|
ErrorHandler::handle(exc);
|
||||||
}
|
}
|
||||||
catch (...)
|
catch (std::exception& exc)
|
||||||
{
|
{
|
||||||
ErrorHandler::handle();
|
ErrorHandler::handle(exc);
|
||||||
}
|
}
|
||||||
}
|
catch (...)
|
||||||
}
|
{
|
||||||
catch (Poco::Exception& exc)
|
ErrorHandler::handle();
|
||||||
{
|
}
|
||||||
ErrorHandler::handle(exc);
|
}
|
||||||
// possibly a resource issue since poll() failed;
|
}
|
||||||
// give some time to recover before trying again
|
catch (Poco::Exception& exc)
|
||||||
Poco::Thread::sleep(50);
|
{
|
||||||
}
|
ErrorHandler::handle(exc);
|
||||||
}
|
// possibly a resource issue since poll() failed;
|
||||||
|
// give some time to recover before trying again
|
||||||
|
Poco::Thread::sleep(50);
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
int TCPServer::currentThreads() const
|
int TCPServer::currentThreads() const
|
||||||
{
|
{
|
||||||
return _pDispatcher->currentThreads();
|
return _pDispatcher->currentThreads();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
int TCPServer::maxThreads() const
|
int TCPServer::maxThreads() const
|
||||||
{
|
{
|
||||||
return _pDispatcher->maxThreads();
|
return _pDispatcher->maxThreads();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
int TCPServer::totalConnections() const
|
int TCPServer::totalConnections() const
|
||||||
{
|
{
|
||||||
return _pDispatcher->totalConnections();
|
return _pDispatcher->totalConnections();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
int TCPServer::currentConnections() const
|
int TCPServer::currentConnections() const
|
||||||
{
|
{
|
||||||
return _pDispatcher->currentConnections();
|
return _pDispatcher->currentConnections();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
int TCPServer::maxConcurrentConnections() const
|
int TCPServer::maxConcurrentConnections() const
|
||||||
{
|
{
|
||||||
return _pDispatcher->maxConcurrentConnections();
|
return _pDispatcher->maxConcurrentConnections();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
int TCPServer::queuedConnections() const
|
int TCPServer::queuedConnections() const
|
||||||
{
|
{
|
||||||
return _pDispatcher->queuedConnections();
|
return _pDispatcher->queuedConnections();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
int TCPServer::refusedConnections() const
|
int TCPServer::refusedConnections() const
|
||||||
{
|
{
|
||||||
return _pDispatcher->refusedConnections();
|
return _pDispatcher->refusedConnections();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
void TCPServer::setConnectionFilter(const TCPServerConnectionFilter::Ptr& pConnectionFilter)
|
void TCPServer::setConnectionFilter(const TCPServerConnectionFilter::Ptr& pConnectionFilter)
|
||||||
{
|
{
|
||||||
poco_assert (_stopped);
|
poco_assert (_stopped);
|
||||||
|
|
||||||
_pConnectionFilter = pConnectionFilter;
|
_pConnectionFilter = pConnectionFilter;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
std::string TCPServer::threadName(const ServerSocket& socket)
|
std::string TCPServer::threadName(const ServerSocket& socket)
|
||||||
{
|
{
|
||||||
std::string name("TCPServer: ");
|
std::string name("TCPServer: ");
|
||||||
name.append(socket.address().toString());
|
name.append(socket.address().toString());
|
||||||
return name;
|
return name;
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -8,7 +8,7 @@
|
|||||||
// Copyright (c) 2005-2007, Applied Informatics Software Engineering GmbH.
|
// Copyright (c) 2005-2007, Applied Informatics Software Engineering GmbH.
|
||||||
// and Contributors.
|
// and Contributors.
|
||||||
//
|
//
|
||||||
// SPDX-License-Identifier: BSL-1.0
|
// SPDX-License-Identifier: BSL-1.0
|
||||||
//
|
//
|
||||||
|
|
||||||
|
|
||||||
@ -33,44 +33,44 @@ namespace Net {
|
|||||||
class TCPConnectionNotification: public Notification
|
class TCPConnectionNotification: public Notification
|
||||||
{
|
{
|
||||||
public:
|
public:
|
||||||
TCPConnectionNotification(const StreamSocket& socket):
|
TCPConnectionNotification(const StreamSocket& socket):
|
||||||
_socket(socket)
|
_socket(socket)
|
||||||
{
|
{
|
||||||
}
|
}
|
||||||
|
|
||||||
~TCPConnectionNotification()
|
~TCPConnectionNotification()
|
||||||
{
|
{
|
||||||
}
|
}
|
||||||
|
|
||||||
const StreamSocket& socket() const
|
const StreamSocket& socket() const
|
||||||
{
|
{
|
||||||
return _socket;
|
return _socket;
|
||||||
}
|
}
|
||||||
|
|
||||||
private:
|
private:
|
||||||
StreamSocket _socket;
|
StreamSocket _socket;
|
||||||
};
|
};
|
||||||
|
|
||||||
|
|
||||||
TCPServerDispatcher::TCPServerDispatcher(TCPServerConnectionFactory::Ptr pFactory, Poco::ThreadPool& threadPool, TCPServerParams::Ptr pParams):
|
TCPServerDispatcher::TCPServerDispatcher(TCPServerConnectionFactory::Ptr pFactory, Poco::ThreadPool& threadPool, TCPServerParams::Ptr pParams):
|
||||||
_rc(1),
|
_rc(1),
|
||||||
_pParams(pParams),
|
_pParams(pParams),
|
||||||
_currentThreads(0),
|
_currentThreads(0),
|
||||||
_totalConnections(0),
|
_totalConnections(0),
|
||||||
_currentConnections(0),
|
_currentConnections(0),
|
||||||
_maxConcurrentConnections(0),
|
_maxConcurrentConnections(0),
|
||||||
_refusedConnections(0),
|
_refusedConnections(0),
|
||||||
_stopped(false),
|
_stopped(false),
|
||||||
_pConnectionFactory(pFactory),
|
_pConnectionFactory(pFactory),
|
||||||
_threadPool(threadPool)
|
_threadPool(threadPool)
|
||||||
{
|
{
|
||||||
poco_check_ptr (pFactory);
|
poco_check_ptr (pFactory);
|
||||||
|
|
||||||
if (!_pParams)
|
if (!_pParams)
|
||||||
_pParams = new TCPServerParams;
|
_pParams = new TCPServerParams;
|
||||||
|
|
||||||
if (_pParams->getMaxThreads() == 0)
|
if (_pParams->getMaxThreads() == 0)
|
||||||
_pParams->setMaxThreads(threadPool.capacity());
|
_pParams->setMaxThreads(threadPool.capacity());
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
@ -81,161 +81,184 @@ TCPServerDispatcher::~TCPServerDispatcher()
|
|||||||
|
|
||||||
void TCPServerDispatcher::duplicate()
|
void TCPServerDispatcher::duplicate()
|
||||||
{
|
{
|
||||||
++_rc;
|
++_rc;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
void TCPServerDispatcher::release()
|
void TCPServerDispatcher::release()
|
||||||
{
|
{
|
||||||
if (--_rc == 0) delete this;
|
if (--_rc == 0) delete this;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
void TCPServerDispatcher::run()
|
void TCPServerDispatcher::run()
|
||||||
{
|
{
|
||||||
AutoPtr<TCPServerDispatcher> guard(this); // ensure object stays alive
|
AutoPtr<TCPServerDispatcher> guard(this); // ensure object stays alive
|
||||||
|
|
||||||
int idleTime = (int) _pParams->getThreadIdleTime().totalMilliseconds();
|
int idleTime = (int) _pParams->getThreadIdleTime().totalMilliseconds();
|
||||||
|
|
||||||
for (;;)
|
for (;;)
|
||||||
{
|
{
|
||||||
try
|
try
|
||||||
{
|
{
|
||||||
AutoPtr<Notification> pNf = _queue.waitDequeueNotification(idleTime);
|
AutoPtr<Notification> pNf = _queue.waitDequeueNotification(idleTime);
|
||||||
if (pNf && !_stopped)
|
if (pNf && !_stopped)
|
||||||
{
|
{
|
||||||
TCPConnectionNotification* pCNf = dynamic_cast<TCPConnectionNotification*>(pNf.get());
|
TCPConnectionNotification* pCNf = dynamic_cast<TCPConnectionNotification*>(pNf.get());
|
||||||
if (pCNf)
|
if (pCNf)
|
||||||
{
|
{
|
||||||
beginConnection();
|
beginConnection();
|
||||||
if (!_stopped)
|
if (!_stopped)
|
||||||
{
|
{
|
||||||
std::unique_ptr<TCPServerConnection> pConnection(_pConnectionFactory->createConnection(pCNf->socket()));
|
std::unique_ptr<TCPServerConnection> pConnection(_pConnectionFactory->createConnection(pCNf->socket()));
|
||||||
poco_check_ptr(pConnection.get());
|
poco_check_ptr(pConnection.get());
|
||||||
pConnection->start();
|
pConnection->start();
|
||||||
}
|
}
|
||||||
/// endConnection() should be called after destroying TCPServerConnection,
|
/// endConnection() should be called after destroying TCPServerConnection,
|
||||||
/// otherwise currentConnections() could become zero while some connections are yet still alive.
|
/// otherwise currentConnections() could become zero while some connections are yet still alive.
|
||||||
endConnection();
|
endConnection();
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
catch (Poco::Exception &exc) { ErrorHandler::handle(exc); }
|
catch (Poco::Exception &exc) { ErrorHandler::handle(exc); }
|
||||||
catch (std::exception &exc) { ErrorHandler::handle(exc); }
|
catch (std::exception &exc) { ErrorHandler::handle(exc); }
|
||||||
catch (...) { ErrorHandler::handle(); }
|
catch (...) { ErrorHandler::handle(); }
|
||||||
FastMutex::ScopedLock lock(_mutex);
|
FastMutex::ScopedLock lock(_mutex);
|
||||||
if (_stopped || (_currentThreads > 1 && _queue.empty()))
|
if (_stopped || (_currentThreads > 1 && _queue.empty()))
|
||||||
{
|
{
|
||||||
--_currentThreads;
|
--_currentThreads;
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
namespace
|
namespace
|
||||||
{
|
{
|
||||||
static const std::string threadName("TCPServerConnection");
|
static const std::string threadName("TCPServerConnection");
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
void TCPServerDispatcher::enqueue(const StreamSocket& socket)
|
void TCPServerDispatcher::enqueue(const StreamSocket& socket)
|
||||||
{
|
{
|
||||||
FastMutex::ScopedLock lock(_mutex);
|
FastMutex::ScopedLock lock(_mutex);
|
||||||
|
|
||||||
if (_queue.size() < _pParams->getMaxQueued())
|
ErrorHandler::logMessage(Message::PRIO_TEST, "Queue size: " + std::to_string(_queue.size()) +
|
||||||
{
|
", current threads: " + std::to_string(_currentThreads) +
|
||||||
if (!_queue.hasIdleThreads() && _currentThreads < _pParams->getMaxThreads())
|
", threads in pool: " + std::to_string(_threadPool.allocated()) +
|
||||||
{
|
", current connections: " + std::to_string(_currentConnections));
|
||||||
try
|
|
||||||
{
|
|
||||||
|
if (_queue.size() < _pParams->getMaxQueued())
|
||||||
|
{
|
||||||
|
/// NOTE: the condition below is wrong.
|
||||||
|
/// Since the thread pool is shared between multiple servers/TCPServerDispatchers,
|
||||||
|
/// _currentThreads < _pParams->getMaxThreads() will be true when the pool is actually saturated.
|
||||||
|
/// As a result, queue is useless and connections never wait in queue.
|
||||||
|
/// Instead, we (mistakenly) think that we can create a thread for this connection, but we fail to create it
|
||||||
|
/// and the connection get rejected.
|
||||||
|
/// We could check _currentThreads < _threadPool.allocated() to make it work,
|
||||||
|
/// but it's not clear if we want to make it work
|
||||||
|
/// because it may be better to reject connection immediately if we don't have resources to handle it.
|
||||||
|
if (!_queue.hasIdleThreads() && _currentThreads < _pParams->getMaxThreads())
|
||||||
|
{
|
||||||
|
try
|
||||||
|
{
|
||||||
this->duplicate();
|
this->duplicate();
|
||||||
_threadPool.startWithPriority(_pParams->getThreadPriority(), *this, threadName);
|
_threadPool.startWithPriority(_pParams->getThreadPriority(), *this, threadName);
|
||||||
++_currentThreads;
|
++_currentThreads;
|
||||||
}
|
}
|
||||||
catch (Poco::Exception& exc)
|
catch (Poco::Exception& exc)
|
||||||
{
|
{
|
||||||
|
ErrorHandler::logMessage(Message::PRIO_WARNING, "Got an exception while starting thread for connection from " +
|
||||||
|
socket.peerAddress().toString());
|
||||||
|
ErrorHandler::handle(exc);
|
||||||
this->release();
|
this->release();
|
||||||
++_refusedConnections;
|
++_refusedConnections;
|
||||||
std::cerr << "Got exception while starting thread for connection. Error code: "
|
return;
|
||||||
<< exc.code() << ", message: '" << exc.displayText() << "'" << std::endl;
|
}
|
||||||
return;
|
}
|
||||||
}
|
else if (!_queue.hasIdleThreads())
|
||||||
}
|
{
|
||||||
_queue.enqueueNotification(new TCPConnectionNotification(socket));
|
ErrorHandler::logMessage(Message::PRIO_TRACE, "Don't have idle threads, adding connection from " +
|
||||||
}
|
socket.peerAddress().toString() + " to the queue, size: " + std::to_string(_queue.size()));
|
||||||
else
|
}
|
||||||
{
|
_queue.enqueueNotification(new TCPConnectionNotification(socket));
|
||||||
++_refusedConnections;
|
}
|
||||||
}
|
else
|
||||||
|
{
|
||||||
|
ErrorHandler::logMessage(Message::PRIO_WARNING, "Refusing connection from " + socket.peerAddress().toString() +
|
||||||
|
", reached max queue size " + std::to_string(_pParams->getMaxQueued()));
|
||||||
|
++_refusedConnections;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
void TCPServerDispatcher::stop()
|
void TCPServerDispatcher::stop()
|
||||||
{
|
{
|
||||||
_stopped = true;
|
_stopped = true;
|
||||||
_queue.clear();
|
_queue.clear();
|
||||||
_queue.wakeUpAll();
|
_queue.wakeUpAll();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
int TCPServerDispatcher::currentThreads() const
|
int TCPServerDispatcher::currentThreads() const
|
||||||
{
|
{
|
||||||
return _currentThreads;
|
return _currentThreads;
|
||||||
}
|
}
|
||||||
|
|
||||||
int TCPServerDispatcher::maxThreads() const
|
int TCPServerDispatcher::maxThreads() const
|
||||||
{
|
{
|
||||||
FastMutex::ScopedLock lock(_mutex);
|
FastMutex::ScopedLock lock(_mutex);
|
||||||
|
|
||||||
return _threadPool.capacity();
|
return _threadPool.capacity();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
int TCPServerDispatcher::totalConnections() const
|
int TCPServerDispatcher::totalConnections() const
|
||||||
{
|
{
|
||||||
return _totalConnections;
|
return _totalConnections;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
int TCPServerDispatcher::currentConnections() const
|
int TCPServerDispatcher::currentConnections() const
|
||||||
{
|
{
|
||||||
return _currentConnections;
|
return _currentConnections;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
int TCPServerDispatcher::maxConcurrentConnections() const
|
int TCPServerDispatcher::maxConcurrentConnections() const
|
||||||
{
|
{
|
||||||
return _maxConcurrentConnections;
|
return _maxConcurrentConnections;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
int TCPServerDispatcher::queuedConnections() const
|
int TCPServerDispatcher::queuedConnections() const
|
||||||
{
|
{
|
||||||
return _queue.size();
|
return _queue.size();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
int TCPServerDispatcher::refusedConnections() const
|
int TCPServerDispatcher::refusedConnections() const
|
||||||
{
|
{
|
||||||
return _refusedConnections;
|
return _refusedConnections;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
void TCPServerDispatcher::beginConnection()
|
void TCPServerDispatcher::beginConnection()
|
||||||
{
|
{
|
||||||
FastMutex::ScopedLock lock(_mutex);
|
FastMutex::ScopedLock lock(_mutex);
|
||||||
|
|
||||||
++_totalConnections;
|
++_totalConnections;
|
||||||
++_currentConnections;
|
++_currentConnections;
|
||||||
if (_currentConnections > _maxConcurrentConnections)
|
if (_currentConnections > _maxConcurrentConnections)
|
||||||
_maxConcurrentConnections.store(_currentConnections);
|
_maxConcurrentConnections.store(_currentConnections);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
void TCPServerDispatcher::endConnection()
|
void TCPServerDispatcher::endConnection()
|
||||||
{
|
{
|
||||||
--_currentConnections;
|
--_currentConnections;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
@ -261,6 +261,11 @@ namespace Util
|
|||||||
///
|
///
|
||||||
/// Throws a NullPointerException if no Application instance exists.
|
/// Throws a NullPointerException if no Application instance exists.
|
||||||
|
|
||||||
|
static Application * instanceRawPtr();
|
||||||
|
/// Returns a raw pointer to the Application singleton.
|
||||||
|
///
|
||||||
|
/// The caller should check whether the result is nullptr.
|
||||||
|
|
||||||
const Poco::Timestamp & startTime() const;
|
const Poco::Timestamp & startTime() const;
|
||||||
/// Returns the application start time (UTC).
|
/// Returns the application start time (UTC).
|
||||||
|
|
||||||
@ -448,6 +453,12 @@ namespace Util
|
|||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
inline Application * Application::instanceRawPtr()
|
||||||
|
{
|
||||||
|
return _pInstance;
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
inline const Poco::Timestamp & Application::startTime() const
|
inline const Poco::Timestamp & Application::startTime() const
|
||||||
{
|
{
|
||||||
return _startTime;
|
return _startTime;
|
||||||
|
@ -2,11 +2,11 @@
|
|||||||
|
|
||||||
# NOTE: VERSION_REVISION has nothing common with DBMS_TCP_PROTOCOL_VERSION,
|
# NOTE: VERSION_REVISION has nothing common with DBMS_TCP_PROTOCOL_VERSION,
|
||||||
# only DBMS_TCP_PROTOCOL_VERSION should be incremented on protocol changes.
|
# only DBMS_TCP_PROTOCOL_VERSION should be incremented on protocol changes.
|
||||||
SET(VERSION_REVISION 54488)
|
SET(VERSION_REVISION 54490)
|
||||||
SET(VERSION_MAJOR 24)
|
SET(VERSION_MAJOR 24)
|
||||||
SET(VERSION_MINOR 7)
|
SET(VERSION_MINOR 9)
|
||||||
SET(VERSION_PATCH 1)
|
SET(VERSION_PATCH 1)
|
||||||
SET(VERSION_GITHASH aa023477a9265e403982fca5ee29a714db5133d9)
|
SET(VERSION_GITHASH e02b434d2fc0c4fbee29ca675deab7474d274608)
|
||||||
SET(VERSION_DESCRIBE v24.7.1.1-testing)
|
SET(VERSION_DESCRIBE v24.9.1.1-testing)
|
||||||
SET(VERSION_STRING 24.7.1.1)
|
SET(VERSION_STRING 24.9.1.1)
|
||||||
# end of autochange
|
# end of autochange
|
||||||
|
@ -42,19 +42,9 @@ endif ()
|
|||||||
# But use 2 parallel jobs, since:
|
# But use 2 parallel jobs, since:
|
||||||
# - this is what llvm does
|
# - this is what llvm does
|
||||||
# - and I've verfied that lld-11 does not use all available CPU time (in peak) while linking one binary
|
# - and I've verfied that lld-11 does not use all available CPU time (in peak) while linking one binary
|
||||||
if (CMAKE_BUILD_TYPE_UC STREQUAL "RELWITHDEBINFO" AND ENABLE_THINLTO)
|
if (CMAKE_BUILD_TYPE_UC STREQUAL "RELWITHDEBINFO" AND ENABLE_THINLTO AND PARALLEL_LINK_JOBS GREATER 2)
|
||||||
if (ARCH_AARCH64)
|
message(STATUS "ThinLTO provides its own parallel linking - limiting parallel link jobs to 2.")
|
||||||
# aarch64 builds start to often fail with OOMs (reason not yet clear), for now let's limit the concurrency
|
set (PARALLEL_LINK_JOBS 2)
|
||||||
message(STATUS "ThinLTO provides its own parallel linking - limiting parallel link jobs to 1.")
|
|
||||||
set (PARALLEL_LINK_JOBS 1)
|
|
||||||
if (LINKER_NAME MATCHES "lld")
|
|
||||||
math(EXPR LTO_JOBS ${NUMBER_OF_LOGICAL_CORES}/4)
|
|
||||||
set (CMAKE_EXE_LINKER_FLAGS_RELWITHDEBINFO "${CMAKE_EXE_LINKER_FLAGS_RELWITHDEBINFO} -Wl,--thinlto-jobs=${LTO_JOBS}")
|
|
||||||
endif()
|
|
||||||
elseif (PARALLEL_LINK_JOBS GREATER 2)
|
|
||||||
message(STATUS "ThinLTO provides its own parallel linking - limiting parallel link jobs to 2.")
|
|
||||||
set (PARALLEL_LINK_JOBS 2)
|
|
||||||
endif ()
|
|
||||||
endif()
|
endif()
|
||||||
|
|
||||||
message(STATUS "Building sub-tree with ${PARALLEL_COMPILE_JOBS} compile jobs and ${PARALLEL_LINK_JOBS} linker jobs (system: ${NUMBER_OF_LOGICAL_CORES} cores, ${TOTAL_PHYSICAL_MEMORY} MB RAM, 'OFF' means the native core count).")
|
message(STATUS "Building sub-tree with ${PARALLEL_COMPILE_JOBS} compile jobs and ${PARALLEL_LINK_JOBS} linker jobs (system: ${NUMBER_OF_LOGICAL_CORES} cores, ${TOTAL_PHYSICAL_MEMORY} MB RAM, 'OFF' means the native core count).")
|
||||||
|
@ -57,7 +57,8 @@ option(WITH_COVERAGE "Instrumentation for code coverage with default implementat
|
|||||||
|
|
||||||
if (WITH_COVERAGE)
|
if (WITH_COVERAGE)
|
||||||
message (STATUS "Enabled instrumentation for code coverage")
|
message (STATUS "Enabled instrumentation for code coverage")
|
||||||
set(COVERAGE_FLAGS "-fprofile-instr-generate -fcoverage-mapping")
|
set (COVERAGE_FLAGS -fprofile-instr-generate -fcoverage-mapping)
|
||||||
|
set (CMAKE_EXE_LINKER_FLAGS "${CMAKE_EXE_LINKER_FLAGS} -fprofile-instr-generate -fcoverage-mapping")
|
||||||
endif()
|
endif()
|
||||||
|
|
||||||
option (SANITIZE_COVERAGE "Instrumentation for code coverage with custom callbacks" OFF)
|
option (SANITIZE_COVERAGE "Instrumentation for code coverage with custom callbacks" OFF)
|
||||||
|
12
contrib/CMakeLists.txt
vendored
12
contrib/CMakeLists.txt
vendored
@ -71,7 +71,6 @@ add_contrib (zlib-ng-cmake zlib-ng)
|
|||||||
add_contrib (bzip2-cmake bzip2)
|
add_contrib (bzip2-cmake bzip2)
|
||||||
add_contrib (minizip-ng-cmake minizip-ng)
|
add_contrib (minizip-ng-cmake minizip-ng)
|
||||||
add_contrib (snappy-cmake snappy)
|
add_contrib (snappy-cmake snappy)
|
||||||
add_contrib (rocksdb-cmake rocksdb)
|
|
||||||
add_contrib (thrift-cmake thrift)
|
add_contrib (thrift-cmake thrift)
|
||||||
# parquet/arrow/orc
|
# parquet/arrow/orc
|
||||||
add_contrib (arrow-cmake arrow) # requires: snappy, thrift, double-conversion
|
add_contrib (arrow-cmake arrow) # requires: snappy, thrift, double-conversion
|
||||||
@ -148,6 +147,7 @@ add_contrib (hive-metastore-cmake hive-metastore) # requires: thrift, avro, arro
|
|||||||
add_contrib (cppkafka-cmake cppkafka)
|
add_contrib (cppkafka-cmake cppkafka)
|
||||||
add_contrib (libpqxx-cmake libpqxx)
|
add_contrib (libpqxx-cmake libpqxx)
|
||||||
add_contrib (libpq-cmake libpq)
|
add_contrib (libpq-cmake libpq)
|
||||||
|
add_contrib (rocksdb-cmake rocksdb) # requires: jemalloc, snappy, zlib, lz4, zstd, liburing
|
||||||
add_contrib (nuraft-cmake NuRaft)
|
add_contrib (nuraft-cmake NuRaft)
|
||||||
add_contrib (fast_float-cmake fast_float)
|
add_contrib (fast_float-cmake fast_float)
|
||||||
add_contrib (idna-cmake idna)
|
add_contrib (idna-cmake idna)
|
||||||
@ -179,7 +179,7 @@ else()
|
|||||||
message(STATUS "Not using QPL")
|
message(STATUS "Not using QPL")
|
||||||
endif ()
|
endif ()
|
||||||
|
|
||||||
if (OS_LINUX AND ARCH_AMD64)
|
if (OS_LINUX AND ARCH_AMD64 AND NOT NO_SSE3_OR_HIGHER)
|
||||||
option (ENABLE_QATLIB "Enable Intel® QuickAssist Technology Library (QATlib)" ${ENABLE_LIBRARIES})
|
option (ENABLE_QATLIB "Enable Intel® QuickAssist Technology Library (QATlib)" ${ENABLE_LIBRARIES})
|
||||||
elseif(ENABLE_QATLIB)
|
elseif(ENABLE_QATLIB)
|
||||||
message (${RECONFIGURE_MESSAGE_LEVEL} "QATLib is only supported on x86_64")
|
message (${RECONFIGURE_MESSAGE_LEVEL} "QATLib is only supported on x86_64")
|
||||||
@ -205,14 +205,12 @@ add_contrib (morton-nd-cmake morton-nd)
|
|||||||
if (ARCH_S390X)
|
if (ARCH_S390X)
|
||||||
add_contrib(crc32-s390x-cmake crc32-s390x)
|
add_contrib(crc32-s390x-cmake crc32-s390x)
|
||||||
endif()
|
endif()
|
||||||
add_contrib (annoy-cmake annoy)
|
|
||||||
|
|
||||||
option(ENABLE_USEARCH "Enable USearch (Approximate Neighborhood Search, HNSW) support" ${ENABLE_LIBRARIES})
|
option(ENABLE_USEARCH "Enable USearch" ${ENABLE_LIBRARIES})
|
||||||
if (ENABLE_USEARCH)
|
if (ENABLE_USEARCH)
|
||||||
add_contrib (FP16-cmake FP16)
|
add_contrib (FP16-cmake FP16)
|
||||||
add_contrib (robin-map-cmake robin-map)
|
|
||||||
add_contrib (SimSIMD-cmake SimSIMD)
|
add_contrib (SimSIMD-cmake SimSIMD)
|
||||||
add_contrib (usearch-cmake usearch) # requires: FP16, robin-map, SimdSIMD
|
add_contrib (usearch-cmake usearch) # requires: FP16, SimdSIMD
|
||||||
else ()
|
else ()
|
||||||
message(STATUS "Not using USearch")
|
message(STATUS "Not using USearch")
|
||||||
endif ()
|
endif ()
|
||||||
@ -230,6 +228,8 @@ add_contrib (libssh-cmake libssh)
|
|||||||
|
|
||||||
add_contrib (prometheus-protobufs-cmake prometheus-protobufs prometheus-protobufs-gogo)
|
add_contrib (prometheus-protobufs-cmake prometheus-protobufs prometheus-protobufs-gogo)
|
||||||
|
|
||||||
|
add_contrib(numactl-cmake numactl)
|
||||||
|
|
||||||
# Put all targets defined here and in subdirectories under "contrib/<immediate-subdir>" folders in GUI-based IDEs.
|
# Put all targets defined here and in subdirectories under "contrib/<immediate-subdir>" folders in GUI-based IDEs.
|
||||||
# Some of third-party projects may override CMAKE_FOLDER or FOLDER property of their targets, so they would not appear
|
# Some of third-party projects may override CMAKE_FOLDER or FOLDER property of their targets, so they would not appear
|
||||||
# in "contrib/..." as originally planned, so we workaround this by fixing FOLDER properties of all targets manually,
|
# in "contrib/..." as originally planned, so we workaround this by fixing FOLDER properties of all targets manually,
|
||||||
|
2
contrib/NuRaft
vendored
2
contrib/NuRaft
vendored
@ -1 +1 @@
|
|||||||
Subproject commit cb5dc3c906e80f253e9ce9535807caef827cc2e0
|
Subproject commit c2b0811f164a7948208489562dab4f186eb305ce
|
@ -27,7 +27,7 @@ if (ENABLE_QAT_OUT_OF_TREE_BUILD)
|
|||||||
${QAT_AL_INCLUDE_DIR}
|
${QAT_AL_INCLUDE_DIR}
|
||||||
${QAT_USDM_INCLUDE_DIR}
|
${QAT_USDM_INCLUDE_DIR}
|
||||||
${ZSTD_LIBRARY_DIR})
|
${ZSTD_LIBRARY_DIR})
|
||||||
target_compile_definitions(_qatzstd_plugin PRIVATE -DDEBUGLEVEL=0 PUBLIC -DENABLE_ZSTD_QAT_CODEC)
|
target_compile_definitions(_qatzstd_plugin PRIVATE -DDEBUGLEVEL=0)
|
||||||
add_library (ch_contrib::qatzstd_plugin ALIAS _qatzstd_plugin)
|
add_library (ch_contrib::qatzstd_plugin ALIAS _qatzstd_plugin)
|
||||||
else () # In-tree build
|
else () # In-tree build
|
||||||
message(STATUS "Intel QATZSTD in-tree build")
|
message(STATUS "Intel QATZSTD in-tree build")
|
||||||
@ -78,7 +78,7 @@ else () # In-tree build
|
|||||||
${QAT_USDM_INCLUDE_DIR}
|
${QAT_USDM_INCLUDE_DIR}
|
||||||
${ZSTD_LIBRARY_DIR}
|
${ZSTD_LIBRARY_DIR}
|
||||||
${LIBQAT_HEADER_DIR})
|
${LIBQAT_HEADER_DIR})
|
||||||
target_compile_definitions(_qatzstd_plugin PRIVATE -DDEBUGLEVEL=0 PUBLIC -DENABLE_ZSTD_QAT_CODEC -DINTREE)
|
target_compile_definitions(_qatzstd_plugin PRIVATE -DDEBUGLEVEL=0 PUBLIC -DINTREE)
|
||||||
target_include_directories(_qatzstd_plugin SYSTEM PUBLIC $<BUILD_INTERFACE:${QATZSTD_SRC_DIR}> $<INSTALL_INTERFACE:include>)
|
target_include_directories(_qatzstd_plugin SYSTEM PUBLIC $<BUILD_INTERFACE:${QATZSTD_SRC_DIR}> $<INSTALL_INTERFACE:include>)
|
||||||
add_library (ch_contrib::qatzstd_plugin ALIAS _qatzstd_plugin)
|
add_library (ch_contrib::qatzstd_plugin ALIAS _qatzstd_plugin)
|
||||||
endif ()
|
endif ()
|
||||||
|
2
contrib/SimSIMD
vendored
2
contrib/SimSIMD
vendored
@ -1 +1 @@
|
|||||||
Subproject commit de2cb75b9e9e3389d5e1e51fd9f8ed151f3c17cf
|
Subproject commit 91a76d1ac519b3b9dc8957734a3dabd985f00c26
|
1
contrib/annoy
vendored
1
contrib/annoy
vendored
@ -1 +0,0 @@
|
|||||||
Subproject commit f2ac8e7b48f9a9cf676d3b58286e5455aba8e956
|
|
@ -1,24 +0,0 @@
|
|||||||
option(ENABLE_ANNOY "Enable Annoy index support" ${ENABLE_LIBRARIES})
|
|
||||||
|
|
||||||
# Annoy index should be disabled with undefined sanitizer. Because of memory storage optimizations
|
|
||||||
# (https://github.com/ClickHouse/annoy/blob/9d8a603a4cd252448589e84c9846f94368d5a289/src/annoylib.h#L442-L463)
|
|
||||||
# UBSan fails and leads to crash. Simmilar issue is already opened in Annoy repo
|
|
||||||
# https://github.com/spotify/annoy/issues/456
|
|
||||||
# Problem with aligment can lead to errors like
|
|
||||||
# (https://stackoverflow.com/questions/46790550/c-undefined-behavior-strict-aliasing-rule-or-incorrect-alignment)
|
|
||||||
# or will lead to crash on arm https://developer.arm.com/documentation/ka003038/latest
|
|
||||||
# This issues should be resolved before annoy became non-experimental (--> setting "allow_experimental_annoy_index")
|
|
||||||
if ((NOT ENABLE_ANNOY) OR (SANITIZE STREQUAL "undefined") OR (ARCH_AARCH64))
|
|
||||||
message (STATUS "Not using annoy")
|
|
||||||
return()
|
|
||||||
endif()
|
|
||||||
|
|
||||||
set(ANNOY_PROJECT_DIR "${ClickHouse_SOURCE_DIR}/contrib/annoy")
|
|
||||||
set(ANNOY_SOURCE_DIR "${ANNOY_PROJECT_DIR}/src")
|
|
||||||
|
|
||||||
add_library(_annoy INTERFACE)
|
|
||||||
target_include_directories(_annoy SYSTEM INTERFACE ${ANNOY_SOURCE_DIR})
|
|
||||||
|
|
||||||
add_library(ch_contrib::annoy ALIAS _annoy)
|
|
||||||
target_compile_definitions(_annoy INTERFACE ENABLE_ANNOY)
|
|
||||||
target_compile_definitions(_annoy INTERFACE ANNOYLIB_MULTITHREADED_BUILD)
|
|
2
contrib/aws
vendored
2
contrib/aws
vendored
@ -1 +1 @@
|
|||||||
Subproject commit 1c2946bfcb7f1e3ae0a858de0b59d4f1a7b4ccaf
|
Subproject commit d5450d76abda556ce145ddabe7e0cc6a7644ec59
|
2
contrib/aws-crt-cpp
vendored
2
contrib/aws-crt-cpp
vendored
@ -1 +1 @@
|
|||||||
Subproject commit f532d6abc0d2b0d8b5d6fe9e7c51eaedbe4afbd0
|
Subproject commit e5aa45cacfdcda7719ead38760e7c61076f5745f
|
2
contrib/azure
vendored
2
contrib/azure
vendored
@ -1 +1 @@
|
|||||||
Subproject commit ea3e19a7be08519134c643177d56c7484dfec884
|
Subproject commit 67272b7ee0adff6b69921b26eb071ba1a353062c
|
@ -9,6 +9,7 @@ set(DATASKETCHES_LIBRARY theta)
|
|||||||
add_library(_datasketches INTERFACE)
|
add_library(_datasketches INTERFACE)
|
||||||
target_include_directories(_datasketches SYSTEM BEFORE INTERFACE
|
target_include_directories(_datasketches SYSTEM BEFORE INTERFACE
|
||||||
"${ClickHouse_SOURCE_DIR}/contrib/datasketches-cpp/common/include"
|
"${ClickHouse_SOURCE_DIR}/contrib/datasketches-cpp/common/include"
|
||||||
|
"${ClickHouse_SOURCE_DIR}/contrib/datasketches-cpp/count/include"
|
||||||
"${ClickHouse_SOURCE_DIR}/contrib/datasketches-cpp/theta/include")
|
"${ClickHouse_SOURCE_DIR}/contrib/datasketches-cpp/theta/include")
|
||||||
|
|
||||||
add_library(ch_contrib::datasketches ALIAS _datasketches)
|
add_library(ch_contrib::datasketches ALIAS _datasketches)
|
||||||
|
2
contrib/icu
vendored
2
contrib/icu
vendored
@ -1 +1 @@
|
|||||||
Subproject commit a56dde820dc35665a66f2e9ee8ba58e75049b668
|
Subproject commit 7750081bda4b3bc1768ae03849ec70f67ea10625
|
@ -12,8 +12,6 @@ endif()
|
|||||||
set(ICU_SOURCE_DIR "${ClickHouse_SOURCE_DIR}/contrib/icu/icu4c/source")
|
set(ICU_SOURCE_DIR "${ClickHouse_SOURCE_DIR}/contrib/icu/icu4c/source")
|
||||||
set(ICUDATA_SOURCE_DIR "${ClickHouse_SOURCE_DIR}/contrib/icudata/")
|
set(ICUDATA_SOURCE_DIR "${ClickHouse_SOURCE_DIR}/contrib/icudata/")
|
||||||
|
|
||||||
set (CMAKE_CXX_STANDARD 17)
|
|
||||||
|
|
||||||
# These lists of sources were generated from build log of the original ICU build system (configure + make).
|
# These lists of sources were generated from build log of the original ICU build system (configure + make).
|
||||||
|
|
||||||
set(ICUUC_SOURCES
|
set(ICUUC_SOURCES
|
||||||
@ -462,9 +460,9 @@ file(GENERATE OUTPUT "${CMAKE_CURRENT_BINARY_DIR}/empty.cpp" CONTENT " ")
|
|||||||
enable_language(ASM)
|
enable_language(ASM)
|
||||||
|
|
||||||
if (ARCH_S390X)
|
if (ARCH_S390X)
|
||||||
set(ICUDATA_SOURCE_FILE "${ICUDATA_SOURCE_DIR}/icudt70b_dat.S" )
|
set(ICUDATA_SOURCE_FILE "${ICUDATA_SOURCE_DIR}/icudt75b_dat.S" )
|
||||||
else()
|
else()
|
||||||
set(ICUDATA_SOURCE_FILE "${ICUDATA_SOURCE_DIR}/icudt70l_dat.S" )
|
set(ICUDATA_SOURCE_FILE "${ICUDATA_SOURCE_DIR}/icudt75l_dat.S" )
|
||||||
endif()
|
endif()
|
||||||
|
|
||||||
set(ICUDATA_SOURCES
|
set(ICUDATA_SOURCES
|
||||||
|
2
contrib/icudata
vendored
2
contrib/icudata
vendored
@ -1 +1 @@
|
|||||||
Subproject commit c8e717892a557b4d2852317c7d628aacc0a0e5ab
|
Subproject commit 4904951339a70b4814d2d3723436b20d079cb01b
|
@ -1,20 +1,21 @@
|
|||||||
if (NOT ENABLE_FIU)
|
if (NOT ENABLE_LIBFIU)
|
||||||
message (STATUS "Not using fiu")
|
message (STATUS "Not using libfiu")
|
||||||
return ()
|
return ()
|
||||||
endif ()
|
endif ()
|
||||||
|
|
||||||
set(FIU_DIR "${ClickHouse_SOURCE_DIR}/contrib/libfiu/")
|
set(LIBFIU_DIR "${ClickHouse_SOURCE_DIR}/contrib/libfiu/")
|
||||||
|
|
||||||
set(FIU_SOURCES
|
set(LIBFIU_SOURCES
|
||||||
${FIU_DIR}/libfiu/fiu.c
|
${LIBFIU_DIR}/libfiu/fiu.c
|
||||||
${FIU_DIR}/libfiu/fiu-rc.c
|
${LIBFIU_DIR}/libfiu/fiu-rc.c
|
||||||
${FIU_DIR}/libfiu/backtrace.c
|
${LIBFIU_DIR}/libfiu/backtrace.c
|
||||||
${FIU_DIR}/libfiu/wtable.c
|
${LIBFIU_DIR}/libfiu/wtable.c
|
||||||
)
|
)
|
||||||
|
|
||||||
set(FIU_HEADERS "${FIU_DIR}/libfiu")
|
set(LIBFIU_HEADERS "${LIBFIU_DIR}/libfiu")
|
||||||
|
|
||||||
add_library(_fiu ${FIU_SOURCES})
|
add_library(_libfiu ${LIBFIU_SOURCES})
|
||||||
target_compile_definitions(_fiu PUBLIC DUMMY_BACKTRACE)
|
target_compile_definitions(_libfiu PUBLIC DUMMY_BACKTRACE)
|
||||||
target_include_directories(_fiu PUBLIC ${FIU_HEADERS})
|
target_compile_definitions(_libfiu PUBLIC FIU_ENABLE)
|
||||||
add_library(ch_contrib::fiu ALIAS _fiu)
|
target_include_directories(_libfiu PUBLIC ${LIBFIU_HEADERS})
|
||||||
|
add_library(ch_contrib::libfiu ALIAS _libfiu)
|
||||||
|
2
contrib/libprotobuf-mutator
vendored
2
contrib/libprotobuf-mutator
vendored
@ -1 +1 @@
|
|||||||
Subproject commit a304ec48dcf15d942607032151f7e9ee504b5dcf
|
Subproject commit b922c8ab9004ef9944982e4f165e2747b13223fa
|
2
contrib/librdkafka
vendored
2
contrib/librdkafka
vendored
@ -1 +1 @@
|
|||||||
Subproject commit 2d2aab6f5b79db1cfca15d7bf0dee75d00d82082
|
Subproject commit 39d4ed49ccf3406e2bf825d5d7b0903b5a290782
|
2
contrib/libunwind
vendored
2
contrib/libunwind
vendored
@ -1 +1 @@
|
|||||||
Subproject commit 8f28e64d15819d2d096badd598c7d85bebddb1f2
|
Subproject commit 601db0b0e03018c01710470a37703b618f9cf08b
|
1
contrib/numactl
vendored
Submodule
1
contrib/numactl
vendored
Submodule
@ -0,0 +1 @@
|
|||||||
|
Subproject commit 8d13d63a05f0c3cd88bf777cbb61541202b7da08
|
30
contrib/numactl-cmake/CMakeLists.txt
Normal file
30
contrib/numactl-cmake/CMakeLists.txt
Normal file
@ -0,0 +1,30 @@
|
|||||||
|
if (NOT (
|
||||||
|
OS_LINUX AND (ARCH_AMD64 OR ARCH_AARCH64 OR ARCH_LOONGARCH64))
|
||||||
|
)
|
||||||
|
if (ENABLE_NUMACTL)
|
||||||
|
message (${RECONFIGURE_MESSAGE_LEVEL}
|
||||||
|
"numactl is disabled implicitly because the OS or architecture is not supported. Use -DENABLE_NUMACTL=0")
|
||||||
|
endif ()
|
||||||
|
set (ENABLE_NUMACTL OFF)
|
||||||
|
else()
|
||||||
|
option (ENABLE_NUMACTL "Enable numactl" ${ENABLE_LIBRARIES})
|
||||||
|
endif()
|
||||||
|
|
||||||
|
if (NOT ENABLE_NUMACTL)
|
||||||
|
message (STATUS "Not using numactl")
|
||||||
|
return()
|
||||||
|
endif ()
|
||||||
|
|
||||||
|
set (LIBRARY_DIR "${ClickHouse_SOURCE_DIR}/contrib/numactl")
|
||||||
|
|
||||||
|
set (SRCS
|
||||||
|
"${LIBRARY_DIR}/libnuma.c"
|
||||||
|
"${LIBRARY_DIR}/syscall.c"
|
||||||
|
)
|
||||||
|
|
||||||
|
add_library(_numactl ${SRCS})
|
||||||
|
|
||||||
|
target_include_directories(_numactl SYSTEM PRIVATE include)
|
||||||
|
target_include_directories(_numactl SYSTEM PUBLIC "${LIBRARY_DIR}")
|
||||||
|
|
||||||
|
add_library(ch_contrib::numactl ALIAS _numactl)
|
82
contrib/numactl-cmake/include/config.h
Normal file
82
contrib/numactl-cmake/include/config.h
Normal file
@ -0,0 +1,82 @@
|
|||||||
|
/* config.h. Generated from config.h.in by configure. */
|
||||||
|
/* config.h.in. Generated from configure.ac by autoheader. */
|
||||||
|
|
||||||
|
/* Checking for symver attribute */
|
||||||
|
#define HAVE_ATTRIBUTE_SYMVER 0
|
||||||
|
|
||||||
|
/* Define to 1 if you have the <dlfcn.h> header file. */
|
||||||
|
#define HAVE_DLFCN_H 1
|
||||||
|
|
||||||
|
/* Define to 1 if you have the <inttypes.h> header file. */
|
||||||
|
#define HAVE_INTTYPES_H 1
|
||||||
|
|
||||||
|
/* Define to 1 if you have the <stdint.h> header file. */
|
||||||
|
#define HAVE_STDINT_H 1
|
||||||
|
|
||||||
|
/* Define to 1 if you have the <stdio.h> header file. */
|
||||||
|
#define HAVE_STDIO_H 1
|
||||||
|
|
||||||
|
/* Define to 1 if you have the <stdlib.h> header file. */
|
||||||
|
#define HAVE_STDLIB_H 1
|
||||||
|
|
||||||
|
/* Define to 1 if you have the <strings.h> header file. */
|
||||||
|
#define HAVE_STRINGS_H 1
|
||||||
|
|
||||||
|
/* Define to 1 if you have the <string.h> header file. */
|
||||||
|
#define HAVE_STRING_H 1
|
||||||
|
|
||||||
|
/* Define to 1 if you have the <sys/stat.h> header file. */
|
||||||
|
#define HAVE_SYS_STAT_H 1
|
||||||
|
|
||||||
|
/* Define to 1 if you have the <sys/types.h> header file. */
|
||||||
|
#define HAVE_SYS_TYPES_H 1
|
||||||
|
|
||||||
|
/* Define to 1 if you have the <unistd.h> header file. */
|
||||||
|
#define HAVE_UNISTD_H 1
|
||||||
|
|
||||||
|
/* Define to the sub-directory where libtool stores uninstalled libraries. */
|
||||||
|
#define LT_OBJDIR ".libs/"
|
||||||
|
|
||||||
|
/* Name of package */
|
||||||
|
#define PACKAGE "numactl"
|
||||||
|
|
||||||
|
/* Define to the address where bug reports for this package should be sent. */
|
||||||
|
#define PACKAGE_BUGREPORT ""
|
||||||
|
|
||||||
|
/* Define to the full name of this package. */
|
||||||
|
#define PACKAGE_NAME "numactl"
|
||||||
|
|
||||||
|
/* Define to the full name and version of this package. */
|
||||||
|
#define PACKAGE_STRING "numactl 2.1"
|
||||||
|
|
||||||
|
/* Define to the one symbol short name of this package. */
|
||||||
|
#define PACKAGE_TARNAME "numactl"
|
||||||
|
|
||||||
|
/* Define to the home page for this package. */
|
||||||
|
#define PACKAGE_URL ""
|
||||||
|
|
||||||
|
/* Define to the version of this package. */
|
||||||
|
#define PACKAGE_VERSION "2.1"
|
||||||
|
|
||||||
|
/* Define to 1 if all of the C89 standard headers exist (not just the ones
|
||||||
|
required in a freestanding environment). This macro is provided for
|
||||||
|
backward compatibility; new code need not use it. */
|
||||||
|
#define STDC_HEADERS 1
|
||||||
|
|
||||||
|
/* If the compiler supports a TLS storage class define it to that here */
|
||||||
|
#define TLS __thread
|
||||||
|
|
||||||
|
/* Version number of package */
|
||||||
|
#define VERSION "2.1"
|
||||||
|
|
||||||
|
/* Number of bits in a file offset, on hosts where this is settable. */
|
||||||
|
/* #undef _FILE_OFFSET_BITS */
|
||||||
|
|
||||||
|
/* Define to 1 on platforms where this makes off_t a 64-bit type. */
|
||||||
|
/* #undef _LARGE_FILES */
|
||||||
|
|
||||||
|
/* Number of bits in time_t, on hosts where this is settable. */
|
||||||
|
/* #undef _TIME_BITS */
|
||||||
|
|
||||||
|
/* Define to 1 on platforms where this makes time_t a 64-bit type. */
|
||||||
|
/* #undef __MINGW_USE_VC2005_COMPAT */
|
2
contrib/qpl
vendored
2
contrib/qpl
vendored
@ -1 +1 @@
|
|||||||
Subproject commit d4715e0e79896b85612158e135ee1a85f3b3e04d
|
Subproject commit c2ced94c53c1ee22191201a59878e9280bc9b9b8
|
@ -4,7 +4,6 @@ set (QPL_PROJECT_DIR "${ClickHouse_SOURCE_DIR}/contrib/qpl")
|
|||||||
set (QPL_SRC_DIR "${ClickHouse_SOURCE_DIR}/contrib/qpl/sources")
|
set (QPL_SRC_DIR "${ClickHouse_SOURCE_DIR}/contrib/qpl/sources")
|
||||||
set (QPL_BINARY_DIR "${ClickHouse_BINARY_DIR}/build/contrib/qpl")
|
set (QPL_BINARY_DIR "${ClickHouse_BINARY_DIR}/build/contrib/qpl")
|
||||||
set (EFFICIENT_WAIT OFF)
|
set (EFFICIENT_WAIT OFF)
|
||||||
set (BLOCK_ON_FAULT ON)
|
|
||||||
set (LOG_HW_INIT OFF)
|
set (LOG_HW_INIT OFF)
|
||||||
set (SANITIZE_MEMORY OFF)
|
set (SANITIZE_MEMORY OFF)
|
||||||
set (SANITIZE_THREADS OFF)
|
set (SANITIZE_THREADS OFF)
|
||||||
@ -16,16 +15,20 @@ function(GetLibraryVersion _content _outputVar)
|
|||||||
SET(${_outputVar} ${CMAKE_MATCH_1} PARENT_SCOPE)
|
SET(${_outputVar} ${CMAKE_MATCH_1} PARENT_SCOPE)
|
||||||
endfunction()
|
endfunction()
|
||||||
|
|
||||||
set (QPL_VERSION 1.2.0)
|
set (QPL_VERSION 1.6.0)
|
||||||
|
|
||||||
message(STATUS "Intel QPL version: ${QPL_VERSION}")
|
message(STATUS "Intel QPL version: ${QPL_VERSION}")
|
||||||
|
|
||||||
# There are 5 source subdirectories under $QPL_SRC_DIR: isal, c_api, core-sw, middle-layer, c_api.
|
# There are 5 source subdirectories under $QPL_SRC_DIR: c_api, core-iaa, core-sw, middle-layer and isal.
|
||||||
# Generate 8 library targets: middle_layer_lib, isal, isal_asm, qplcore_px, qplcore_avx512, qplcore_sw_dispatcher, core_iaa, middle_layer_lib.
|
# Generate 8 library targets: qpl_c_api, core_iaa, qplcore_px, qplcore_avx512, qplcore_sw_dispatcher, middle_layer_lib, isal and isal_asm,
|
||||||
|
# which are then combined into static or shared qpl.
|
||||||
# Output ch_contrib::qpl by linking with 8 library targets.
|
# Output ch_contrib::qpl by linking with 8 library targets.
|
||||||
|
|
||||||
# The qpl submodule comes with its own version of isal. It contains code which does not exist in upstream isal. It would be nice to link
|
# Note, QPL has integrated a customized version of ISA-L to meet specific needs.
|
||||||
# only upstream isal (ch_contrib::isal) but at this point we can't.
|
# This version has been significantly modified and there are no plans to maintain compatibility with the upstream version
|
||||||
|
# or upgrade the current copy.
|
||||||
|
|
||||||
|
## cmake/CompileOptions.cmake and automatic wrappers generation
|
||||||
|
|
||||||
# ==========================================================================
|
# ==========================================================================
|
||||||
# Copyright (C) 2022 Intel Corporation
|
# Copyright (C) 2022 Intel Corporation
|
||||||
@ -442,6 +445,7 @@ function(generate_unpack_kernel_arrays current_directory PLATFORMS_LIST)
|
|||||||
endforeach()
|
endforeach()
|
||||||
endfunction()
|
endfunction()
|
||||||
|
|
||||||
|
# [SUBDIR]isal
|
||||||
|
|
||||||
enable_language(ASM_NASM)
|
enable_language(ASM_NASM)
|
||||||
|
|
||||||
@ -479,7 +483,6 @@ set(ISAL_ASM_SRC ${QPL_SRC_DIR}/isal/igzip/igzip_body.asm
|
|||||||
${QPL_SRC_DIR}/isal/igzip/igzip_set_long_icf_fg_04.asm
|
${QPL_SRC_DIR}/isal/igzip/igzip_set_long_icf_fg_04.asm
|
||||||
${QPL_SRC_DIR}/isal/igzip/igzip_set_long_icf_fg_06.asm
|
${QPL_SRC_DIR}/isal/igzip/igzip_set_long_icf_fg_06.asm
|
||||||
${QPL_SRC_DIR}/isal/igzip/igzip_multibinary.asm
|
${QPL_SRC_DIR}/isal/igzip/igzip_multibinary.asm
|
||||||
${QPL_SRC_DIR}/isal/igzip/stdmac.asm
|
|
||||||
${QPL_SRC_DIR}/isal/crc/crc_multibinary.asm
|
${QPL_SRC_DIR}/isal/crc/crc_multibinary.asm
|
||||||
${QPL_SRC_DIR}/isal/crc/crc32_gzip_refl_by8.asm
|
${QPL_SRC_DIR}/isal/crc/crc32_gzip_refl_by8.asm
|
||||||
${QPL_SRC_DIR}/isal/crc/crc32_gzip_refl_by8_02.asm
|
${QPL_SRC_DIR}/isal/crc/crc32_gzip_refl_by8_02.asm
|
||||||
@ -505,7 +508,6 @@ set_property(GLOBAL APPEND PROPERTY QPL_LIB_DEPS
|
|||||||
# Setting external and internal interfaces for ISA-L library
|
# Setting external and internal interfaces for ISA-L library
|
||||||
target_include_directories(isal
|
target_include_directories(isal
|
||||||
PUBLIC $<BUILD_INTERFACE:${QPL_SRC_DIR}/isal/include>
|
PUBLIC $<BUILD_INTERFACE:${QPL_SRC_DIR}/isal/include>
|
||||||
PRIVATE ${QPL_SRC_DIR}/isal/include
|
|
||||||
PUBLIC ${QPL_SRC_DIR}/isal/igzip)
|
PUBLIC ${QPL_SRC_DIR}/isal/igzip)
|
||||||
|
|
||||||
set_target_properties(isal PROPERTIES
|
set_target_properties(isal PROPERTIES
|
||||||
@ -617,12 +619,9 @@ target_compile_options(qplcore_sw_dispatcher
|
|||||||
|
|
||||||
# [SUBDIR]core-iaa
|
# [SUBDIR]core-iaa
|
||||||
file(GLOB HW_PATH_SRC ${QPL_SRC_DIR}/core-iaa/sources/aecs/*.c
|
file(GLOB HW_PATH_SRC ${QPL_SRC_DIR}/core-iaa/sources/aecs/*.c
|
||||||
${QPL_SRC_DIR}/core-iaa/sources/aecs/*.cpp
|
|
||||||
${QPL_SRC_DIR}/core-iaa/sources/driver_loader/*.c
|
${QPL_SRC_DIR}/core-iaa/sources/driver_loader/*.c
|
||||||
${QPL_SRC_DIR}/core-iaa/sources/driver_loader/*.cpp
|
|
||||||
${QPL_SRC_DIR}/core-iaa/sources/descriptors/*.c
|
${QPL_SRC_DIR}/core-iaa/sources/descriptors/*.c
|
||||||
${QPL_SRC_DIR}/core-iaa/sources/descriptors/*.cpp
|
${QPL_SRC_DIR}/core-iaa/sources/*.c)
|
||||||
${QPL_SRC_DIR}/core-iaa/sources/bit_rev.c)
|
|
||||||
|
|
||||||
# Create library
|
# Create library
|
||||||
add_library(core_iaa OBJECT ${HW_PATH_SRC})
|
add_library(core_iaa OBJECT ${HW_PATH_SRC})
|
||||||
@ -634,31 +633,27 @@ target_include_directories(core_iaa
|
|||||||
PRIVATE ${UUID_DIR}
|
PRIVATE ${UUID_DIR}
|
||||||
PUBLIC $<BUILD_INTERFACE:${QPL_SRC_DIR}/core-iaa/include>
|
PUBLIC $<BUILD_INTERFACE:${QPL_SRC_DIR}/core-iaa/include>
|
||||||
PUBLIC $<BUILD_INTERFACE:${QPL_SRC_DIR}/core-iaa/sources/include>
|
PUBLIC $<BUILD_INTERFACE:${QPL_SRC_DIR}/core-iaa/sources/include>
|
||||||
PRIVATE $<BUILD_INTERFACE:${QPL_PROJECT_DIR}/include> # status.h in own_checkers.h
|
PRIVATE $<BUILD_INTERFACE:${QPL_PROJECT_DIR}/include> # status.h in own_checkers.h
|
||||||
PRIVATE $<BUILD_INTERFACE:${QPL_PROJECT_DIR}/sources/c_api> # own_checkers.h
|
PRIVATE $<TARGET_PROPERTY:qpl_c_api,INTERFACE_INCLUDE_DIRECTORIES> # for own_checkers.h
|
||||||
PRIVATE $<TARGET_PROPERTY:qplcore_sw_dispatcher,INTERFACE_INCLUDE_DIRECTORIES>)
|
PRIVATE $<TARGET_PROPERTY:qplcore_sw_dispatcher,INTERFACE_INCLUDE_DIRECTORIES>)
|
||||||
|
|
||||||
target_compile_features(core_iaa PRIVATE c_std_11)
|
target_compile_features(core_iaa PRIVATE c_std_11)
|
||||||
|
|
||||||
target_compile_definitions(core_iaa PRIVATE QPL_BADARG_CHECK
|
target_compile_definitions(core_iaa PRIVATE QPL_BADARG_CHECK
|
||||||
PRIVATE $<$<BOOL:${BLOCK_ON_FAULT}>: BLOCK_ON_FAULT_ENABLED>
|
|
||||||
PRIVATE $<$<BOOL:${LOG_HW_INIT}>:LOG_HW_INIT>
|
PRIVATE $<$<BOOL:${LOG_HW_INIT}>:LOG_HW_INIT>
|
||||||
PRIVATE $<$<BOOL:${DYNAMIC_LOADING_LIBACCEL_CONFIG}>:DYNAMIC_LOADING_LIBACCEL_CONFIG>)
|
PRIVATE $<$<BOOL:${DYNAMIC_LOADING_LIBACCEL_CONFIG}>:DYNAMIC_LOADING_LIBACCEL_CONFIG>)
|
||||||
|
|
||||||
# [SUBDIR]middle-layer
|
# [SUBDIR]middle-layer
|
||||||
file(GLOB MIDDLE_LAYER_SRC
|
file(GLOB MIDDLE_LAYER_SRC
|
||||||
${QPL_SRC_DIR}/middle-layer/analytics/*.cpp
|
${QPL_SRC_DIR}/middle-layer/accelerator/*.cpp
|
||||||
${QPL_SRC_DIR}/middle-layer/c_wrapper/*.cpp
|
${QPL_SRC_DIR}/middle-layer/analytics/*.cpp
|
||||||
${QPL_SRC_DIR}/middle-layer/checksum/*.cpp
|
|
||||||
${QPL_SRC_DIR}/middle-layer/common/*.cpp
|
${QPL_SRC_DIR}/middle-layer/common/*.cpp
|
||||||
${QPL_SRC_DIR}/middle-layer/compression/*.cpp
|
${QPL_SRC_DIR}/middle-layer/compression/*.cpp
|
||||||
${QPL_SRC_DIR}/middle-layer/compression/*/*.cpp
|
${QPL_SRC_DIR}/middle-layer/compression/*/*.cpp
|
||||||
${QPL_SRC_DIR}/middle-layer/compression/*/*/*.cpp
|
${QPL_SRC_DIR}/middle-layer/compression/*/*/*.cpp
|
||||||
${QPL_SRC_DIR}/middle-layer/dispatcher/*.cpp
|
${QPL_SRC_DIR}/middle-layer/dispatcher/*.cpp
|
||||||
${QPL_SRC_DIR}/middle-layer/other/*.cpp
|
${QPL_SRC_DIR}/middle-layer/other/*.cpp
|
||||||
${QPL_SRC_DIR}/middle-layer/util/*.cpp
|
${QPL_SRC_DIR}/middle-layer/util/*.cpp)
|
||||||
${QPL_SRC_DIR}/middle-layer/inflate/*.cpp
|
|
||||||
${QPL_SRC_DIR}/core-iaa/sources/accelerator/*.cpp) # todo
|
|
||||||
|
|
||||||
add_library(middle_layer_lib OBJECT
|
add_library(middle_layer_lib OBJECT
|
||||||
${MIDDLE_LAYER_SRC})
|
${MIDDLE_LAYER_SRC})
|
||||||
@ -667,6 +662,7 @@ set_property(GLOBAL APPEND PROPERTY QPL_LIB_DEPS
|
|||||||
$<TARGET_OBJECTS:middle_layer_lib>)
|
$<TARGET_OBJECTS:middle_layer_lib>)
|
||||||
|
|
||||||
target_compile_options(middle_layer_lib
|
target_compile_options(middle_layer_lib
|
||||||
|
PRIVATE $<$<C_COMPILER_ID:GNU,Clang>:$<$<CONFIG:Release>:-O3;-U_FORTIFY_SOURCE;-D_FORTIFY_SOURCE=2>>
|
||||||
PRIVATE ${QPL_LINUX_TOOLCHAIN_CPP_EMBEDDED_FLAGS})
|
PRIVATE ${QPL_LINUX_TOOLCHAIN_CPP_EMBEDDED_FLAGS})
|
||||||
|
|
||||||
target_compile_definitions(middle_layer_lib
|
target_compile_definitions(middle_layer_lib
|
||||||
@ -682,6 +678,7 @@ target_include_directories(middle_layer_lib
|
|||||||
PRIVATE ${UUID_DIR}
|
PRIVATE ${UUID_DIR}
|
||||||
PUBLIC $<BUILD_INTERFACE:${QPL_SRC_DIR}/middle-layer>
|
PUBLIC $<BUILD_INTERFACE:${QPL_SRC_DIR}/middle-layer>
|
||||||
PUBLIC $<TARGET_PROPERTY:_qpl,INTERFACE_INCLUDE_DIRECTORIES>
|
PUBLIC $<TARGET_PROPERTY:_qpl,INTERFACE_INCLUDE_DIRECTORIES>
|
||||||
|
PRIVATE $<TARGET_PROPERTY:qpl_c_api,INTERFACE_INCLUDE_DIRECTORIES>
|
||||||
PUBLIC $<TARGET_PROPERTY:qplcore_sw_dispatcher,INTERFACE_INCLUDE_DIRECTORIES>
|
PUBLIC $<TARGET_PROPERTY:qplcore_sw_dispatcher,INTERFACE_INCLUDE_DIRECTORIES>
|
||||||
PUBLIC $<TARGET_PROPERTY:isal,INTERFACE_INCLUDE_DIRECTORIES>
|
PUBLIC $<TARGET_PROPERTY:isal,INTERFACE_INCLUDE_DIRECTORIES>
|
||||||
PUBLIC $<TARGET_PROPERTY:core_iaa,INTERFACE_INCLUDE_DIRECTORIES>)
|
PUBLIC $<TARGET_PROPERTY:core_iaa,INTERFACE_INCLUDE_DIRECTORIES>)
|
||||||
@ -689,31 +686,50 @@ target_include_directories(middle_layer_lib
|
|||||||
target_compile_definitions(middle_layer_lib PUBLIC -DQPL_LIB)
|
target_compile_definitions(middle_layer_lib PUBLIC -DQPL_LIB)
|
||||||
|
|
||||||
# [SUBDIR]c_api
|
# [SUBDIR]c_api
|
||||||
file(GLOB_RECURSE QPL_C_API_SRC
|
file(GLOB QPL_C_API_SRC
|
||||||
${QPL_SRC_DIR}/c_api/*.c
|
${QPL_SRC_DIR}/c_api/compression_operations/*.c
|
||||||
${QPL_SRC_DIR}/c_api/*.cpp)
|
${QPL_SRC_DIR}/c_api/compression_operations/*.cpp
|
||||||
|
${QPL_SRC_DIR}/c_api/filter_operations/*.cpp
|
||||||
|
${QPL_SRC_DIR}/c_api/legacy_hw_path/*.c
|
||||||
|
${QPL_SRC_DIR}/c_api/legacy_hw_path/*.cpp
|
||||||
|
${QPL_SRC_DIR}/c_api/other_operations/*.cpp
|
||||||
|
${QPL_SRC_DIR}/c_api/serialization/*.cpp
|
||||||
|
${QPL_SRC_DIR}/c_api/*.cpp)
|
||||||
|
|
||||||
|
add_library(qpl_c_api OBJECT ${QPL_C_API_SRC})
|
||||||
|
|
||||||
|
target_include_directories(qpl_c_api
|
||||||
|
PUBLIC $<BUILD_INTERFACE:${QPL_SRC_DIR}/c_api/>
|
||||||
|
PUBLIC $<BUILD_INTERFACE:${QPL_SRC_DIR}/include/> $<INSTALL_INTERFACE:include>
|
||||||
|
PRIVATE $<TARGET_PROPERTY:middle_layer_lib,INTERFACE_INCLUDE_DIRECTORIES>)
|
||||||
|
|
||||||
|
set_target_properties(qpl_c_api PROPERTIES
|
||||||
|
$<$<C_COMPILER_ID:GNU,Clang>:C_STANDARD 17
|
||||||
|
CXX_STANDARD 17)
|
||||||
|
|
||||||
|
target_compile_options(qpl_c_api
|
||||||
|
PRIVATE $<$<C_COMPILER_ID:GNU,Clang>:$<$<CONFIG:Release>:-O3;-U_FORTIFY_SOURCE;-D_FORTIFY_SOURCE=2>>
|
||||||
|
PRIVATE $<$<COMPILE_LANG_AND_ID:CXX,GNU,Clang>:${QPL_LINUX_TOOLCHAIN_CPP_EMBEDDED_FLAGS}>)
|
||||||
|
|
||||||
|
target_compile_definitions(qpl_c_api
|
||||||
|
PUBLIC -DQPL_BADARG_CHECK # own_checkers.h
|
||||||
|
PUBLIC -DQPL_LIB # needed for middle_layer_lib
|
||||||
|
PUBLIC $<$<BOOL:${LOG_HW_INIT}>:LOG_HW_INIT>) # needed for middle_layer_lib
|
||||||
|
|
||||||
|
set_property(GLOBAL APPEND PROPERTY QPL_LIB_DEPS
|
||||||
|
$<TARGET_OBJECTS:qpl_c_api>)
|
||||||
|
|
||||||
|
# Final _qpl target
|
||||||
|
|
||||||
get_property(LIB_DEPS GLOBAL PROPERTY QPL_LIB_DEPS)
|
get_property(LIB_DEPS GLOBAL PROPERTY QPL_LIB_DEPS)
|
||||||
|
|
||||||
add_library(_qpl STATIC ${QPL_C_API_SRC} ${LIB_DEPS})
|
add_library(_qpl STATIC ${LIB_DEPS})
|
||||||
|
|
||||||
target_include_directories(_qpl
|
target_include_directories(_qpl
|
||||||
PUBLIC $<BUILD_INTERFACE:${QPL_PROJECT_DIR}/include/> $<INSTALL_INTERFACE:include>
|
PUBLIC $<BUILD_INTERFACE:${QPL_PROJECT_DIR}/include/> $<INSTALL_INTERFACE:include>)
|
||||||
PRIVATE $<TARGET_PROPERTY:middle_layer_lib,INTERFACE_INCLUDE_DIRECTORIES>
|
|
||||||
PRIVATE $<BUILD_INTERFACE:${QPL_SRC_DIR}/c_api>)
|
|
||||||
|
|
||||||
target_compile_options(_qpl
|
|
||||||
PRIVATE ${QPL_LINUX_TOOLCHAIN_CPP_EMBEDDED_FLAGS})
|
|
||||||
|
|
||||||
target_compile_definitions(_qpl
|
|
||||||
PRIVATE -DQPL_LIB
|
|
||||||
PRIVATE -DQPL_BADARG_CHECK
|
|
||||||
PRIVATE $<$<BOOL:${DYNAMIC_LOADING_LIBACCEL_CONFIG}>:DYNAMIC_LOADING_LIBACCEL_CONFIG>
|
|
||||||
PUBLIC -DENABLE_QPL_COMPRESSION)
|
|
||||||
|
|
||||||
target_link_libraries(_qpl
|
target_link_libraries(_qpl
|
||||||
PRIVATE ch_contrib::accel-config
|
PRIVATE ch_contrib::accel-config)
|
||||||
PRIVATE ch_contrib::isal)
|
|
||||||
|
|
||||||
target_include_directories(_qpl SYSTEM BEFORE
|
target_include_directories(_qpl SYSTEM BEFORE
|
||||||
PUBLIC "${QPL_PROJECT_DIR}/include"
|
PUBLIC "${QPL_PROJECT_DIR}/include"
|
||||||
|
1
contrib/robin-map
vendored
1
contrib/robin-map
vendored
@ -1 +0,0 @@
|
|||||||
Subproject commit 851a59e0e3063ee0e23089062090a73fd3de482d
|
|
@ -1 +0,0 @@
|
|||||||
# See contrib/usearch-cmake/CMakeLists.txt
|
|
2
contrib/rocksdb
vendored
2
contrib/rocksdb
vendored
@ -1 +1 @@
|
|||||||
Subproject commit be366233921293bd07a84dc4ea6991858665f202
|
Subproject commit 5f003e4a22d2e48e37c98d9620241237cd30dd24
|
@ -1,56 +1,46 @@
|
|||||||
option (ENABLE_ROCKSDB "Enable RocksDB" ${ENABLE_LIBRARIES})
|
option (ENABLE_ROCKSDB "Enable RocksDB" ${ENABLE_LIBRARIES})
|
||||||
|
|
||||||
if (NOT ENABLE_ROCKSDB)
|
if (NOT ENABLE_ROCKSDB OR NO_SSE3_OR_HIGHER) # assumes SSE4.2 and PCLMUL
|
||||||
message (STATUS "Not using RocksDB")
|
message (STATUS "Not using RocksDB")
|
||||||
return()
|
return()
|
||||||
endif()
|
endif()
|
||||||
|
|
||||||
# Always disable jemalloc for rocksdb by default because it introduces non-standard jemalloc APIs
|
|
||||||
option(WITH_JEMALLOC "build with JeMalloc" OFF)
|
|
||||||
|
|
||||||
option(WITH_LIBURING "build with liburing" OFF) # TODO could try to enable this conditionally, depending on ClickHouse's ENABLE_LIBURING
|
|
||||||
|
|
||||||
# ClickHouse cannot be compiled without snappy, lz4, zlib, zstd
|
# ClickHouse cannot be compiled without snappy, lz4, zlib, zstd
|
||||||
option(WITH_SNAPPY "build with SNAPPY" ON)
|
option(WITH_SNAPPY "build with SNAPPY" ON)
|
||||||
option(WITH_LZ4 "build with lz4" ON)
|
option(WITH_LZ4 "build with lz4" ON)
|
||||||
option(WITH_ZLIB "build with zlib" ON)
|
option(WITH_ZLIB "build with zlib" ON)
|
||||||
option(WITH_ZSTD "build with zstd" ON)
|
option(WITH_ZSTD "build with zstd" ON)
|
||||||
|
|
||||||
# third-party/folly is only validated to work on Linux and Windows for now.
|
if (ENABLE_JEMALLOC AND OS_LINUX) # gives compile errors with jemalloc enabled for rocksdb on non-Linux
|
||||||
# So only turn it on there by default.
|
add_definitions(-DROCKSDB_JEMALLOC -DJEMALLOC_NO_DEMANGLE)
|
||||||
if(CMAKE_SYSTEM_NAME MATCHES "Linux|Windows")
|
list (APPEND THIRDPARTY_LIBS ch_contrib::jemalloc)
|
||||||
option(WITH_FOLLY_DISTRIBUTED_MUTEX "build with folly::DistributedMutex" ON)
|
endif ()
|
||||||
else()
|
|
||||||
option(WITH_FOLLY_DISTRIBUTED_MUTEX "build with folly::DistributedMutex" OFF)
|
|
||||||
endif()
|
|
||||||
|
|
||||||
if(WITH_SNAPPY)
|
if (ENABLE_LIBURING)
|
||||||
|
add_definitions(-DROCKSDB_IOURING_PRESENT)
|
||||||
|
list (APPEND THIRDPARTY_LIBS ch_contrib::liburing)
|
||||||
|
endif ()
|
||||||
|
|
||||||
|
if (WITH_SNAPPY)
|
||||||
add_definitions(-DSNAPPY)
|
add_definitions(-DSNAPPY)
|
||||||
list(APPEND THIRDPARTY_LIBS ch_contrib::snappy)
|
list(APPEND THIRDPARTY_LIBS ch_contrib::snappy)
|
||||||
endif()
|
endif()
|
||||||
|
|
||||||
if(WITH_ZLIB)
|
if (WITH_ZLIB)
|
||||||
add_definitions(-DZLIB)
|
add_definitions(-DZLIB)
|
||||||
list(APPEND THIRDPARTY_LIBS ch_contrib::zlib)
|
list(APPEND THIRDPARTY_LIBS ch_contrib::zlib)
|
||||||
endif()
|
endif()
|
||||||
|
|
||||||
if(WITH_LZ4)
|
if (WITH_LZ4)
|
||||||
add_definitions(-DLZ4)
|
add_definitions(-DLZ4)
|
||||||
list(APPEND THIRDPARTY_LIBS ch_contrib::lz4)
|
list(APPEND THIRDPARTY_LIBS ch_contrib::lz4)
|
||||||
endif()
|
endif()
|
||||||
|
|
||||||
if(WITH_ZSTD)
|
if (WITH_ZSTD)
|
||||||
add_definitions(-DZSTD)
|
add_definitions(-DZSTD)
|
||||||
list(APPEND THIRDPARTY_LIBS ch_contrib::zstd)
|
list(APPEND THIRDPARTY_LIBS ch_contrib::zstd)
|
||||||
endif()
|
endif()
|
||||||
|
|
||||||
option(PORTABLE "build a portable binary" ON)
|
|
||||||
|
|
||||||
if(ENABLE_SSE42 AND ENABLE_PCLMULQDQ)
|
|
||||||
add_definitions(-DHAVE_SSE42)
|
|
||||||
add_definitions(-DHAVE_PCLMUL)
|
|
||||||
endif()
|
|
||||||
|
|
||||||
if(CMAKE_SYSTEM_PROCESSOR MATCHES "arm64|aarch64|AARCH64")
|
if(CMAKE_SYSTEM_PROCESSOR MATCHES "arm64|aarch64|AARCH64")
|
||||||
set (HAS_ARMV8_CRC 1)
|
set (HAS_ARMV8_CRC 1)
|
||||||
# the original build descriptions set specific flags for ARM. These flags are already subsumed by ClickHouse's general
|
# the original build descriptions set specific flags for ARM. These flags are already subsumed by ClickHouse's general
|
||||||
@ -59,11 +49,6 @@ if(CMAKE_SYSTEM_PROCESSOR MATCHES "arm64|aarch64|AARCH64")
|
|||||||
# set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -march=armv8-a+crc+crypto -Wno-unused-function")
|
# set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -march=armv8-a+crc+crypto -Wno-unused-function")
|
||||||
endif()
|
endif()
|
||||||
|
|
||||||
set (HAVE_THREAD_LOCAL 1)
|
|
||||||
if(HAVE_THREAD_LOCAL)
|
|
||||||
add_definitions(-DROCKSDB_SUPPORT_THREAD_LOCAL)
|
|
||||||
endif()
|
|
||||||
|
|
||||||
if(CMAKE_SYSTEM_NAME MATCHES "Darwin")
|
if(CMAKE_SYSTEM_NAME MATCHES "Darwin")
|
||||||
add_definitions(-DOS_MACOSX)
|
add_definitions(-DOS_MACOSX)
|
||||||
elseif(CMAKE_SYSTEM_NAME MATCHES "Linux")
|
elseif(CMAKE_SYSTEM_NAME MATCHES "Linux")
|
||||||
@ -89,19 +74,24 @@ set(ROCKSDB_SOURCE_DIR "${ClickHouse_SOURCE_DIR}/contrib/rocksdb")
|
|||||||
|
|
||||||
include_directories(${ROCKSDB_SOURCE_DIR})
|
include_directories(${ROCKSDB_SOURCE_DIR})
|
||||||
include_directories("${ROCKSDB_SOURCE_DIR}/include")
|
include_directories("${ROCKSDB_SOURCE_DIR}/include")
|
||||||
if(WITH_FOLLY_DISTRIBUTED_MUTEX)
|
|
||||||
include_directories("${ROCKSDB_SOURCE_DIR}/third-party/folly")
|
|
||||||
endif()
|
|
||||||
|
|
||||||
set(SOURCES
|
set(SOURCES
|
||||||
${ROCKSDB_SOURCE_DIR}/cache/cache.cc
|
${ROCKSDB_SOURCE_DIR}/cache/cache.cc
|
||||||
${ROCKSDB_SOURCE_DIR}/cache/cache_entry_roles.cc
|
${ROCKSDB_SOURCE_DIR}/cache/cache_entry_roles.cc
|
||||||
${ROCKSDB_SOURCE_DIR}/cache/cache_key.cc
|
${ROCKSDB_SOURCE_DIR}/cache/cache_key.cc
|
||||||
|
${ROCKSDB_SOURCE_DIR}/cache/cache_helpers.cc
|
||||||
${ROCKSDB_SOURCE_DIR}/cache/cache_reservation_manager.cc
|
${ROCKSDB_SOURCE_DIR}/cache/cache_reservation_manager.cc
|
||||||
|
${ROCKSDB_SOURCE_DIR}/cache/charged_cache.cc
|
||||||
${ROCKSDB_SOURCE_DIR}/cache/clock_cache.cc
|
${ROCKSDB_SOURCE_DIR}/cache/clock_cache.cc
|
||||||
|
${ROCKSDB_SOURCE_DIR}/cache/compressed_secondary_cache.cc
|
||||||
${ROCKSDB_SOURCE_DIR}/cache/lru_cache.cc
|
${ROCKSDB_SOURCE_DIR}/cache/lru_cache.cc
|
||||||
|
${ROCKSDB_SOURCE_DIR}/cache/secondary_cache.cc
|
||||||
|
${ROCKSDB_SOURCE_DIR}/cache/secondary_cache_adapter.cc
|
||||||
${ROCKSDB_SOURCE_DIR}/cache/sharded_cache.cc
|
${ROCKSDB_SOURCE_DIR}/cache/sharded_cache.cc
|
||||||
|
${ROCKSDB_SOURCE_DIR}/cache/tiered_secondary_cache.cc
|
||||||
${ROCKSDB_SOURCE_DIR}/db/arena_wrapped_db_iter.cc
|
${ROCKSDB_SOURCE_DIR}/db/arena_wrapped_db_iter.cc
|
||||||
|
${ROCKSDB_SOURCE_DIR}/db/attribute_group_iterator_impl.cc
|
||||||
|
${ROCKSDB_SOURCE_DIR}/db/blob/blob_contents.cc
|
||||||
${ROCKSDB_SOURCE_DIR}/db/blob/blob_fetcher.cc
|
${ROCKSDB_SOURCE_DIR}/db/blob/blob_fetcher.cc
|
||||||
${ROCKSDB_SOURCE_DIR}/db/blob/blob_file_addition.cc
|
${ROCKSDB_SOURCE_DIR}/db/blob/blob_file_addition.cc
|
||||||
${ROCKSDB_SOURCE_DIR}/db/blob/blob_file_builder.cc
|
${ROCKSDB_SOURCE_DIR}/db/blob/blob_file_builder.cc
|
||||||
@ -113,9 +103,11 @@ set(SOURCES
|
|||||||
${ROCKSDB_SOURCE_DIR}/db/blob/blob_log_format.cc
|
${ROCKSDB_SOURCE_DIR}/db/blob/blob_log_format.cc
|
||||||
${ROCKSDB_SOURCE_DIR}/db/blob/blob_log_sequential_reader.cc
|
${ROCKSDB_SOURCE_DIR}/db/blob/blob_log_sequential_reader.cc
|
||||||
${ROCKSDB_SOURCE_DIR}/db/blob/blob_log_writer.cc
|
${ROCKSDB_SOURCE_DIR}/db/blob/blob_log_writer.cc
|
||||||
|
${ROCKSDB_SOURCE_DIR}/db/blob/blob_source.cc
|
||||||
${ROCKSDB_SOURCE_DIR}/db/blob/prefetch_buffer_collection.cc
|
${ROCKSDB_SOURCE_DIR}/db/blob/prefetch_buffer_collection.cc
|
||||||
${ROCKSDB_SOURCE_DIR}/db/builder.cc
|
${ROCKSDB_SOURCE_DIR}/db/builder.cc
|
||||||
${ROCKSDB_SOURCE_DIR}/db/c.cc
|
${ROCKSDB_SOURCE_DIR}/db/c.cc
|
||||||
|
${ROCKSDB_SOURCE_DIR}/db/coalescing_iterator.cc
|
||||||
${ROCKSDB_SOURCE_DIR}/db/column_family.cc
|
${ROCKSDB_SOURCE_DIR}/db/column_family.cc
|
||||||
${ROCKSDB_SOURCE_DIR}/db/compaction/compaction.cc
|
${ROCKSDB_SOURCE_DIR}/db/compaction/compaction.cc
|
||||||
${ROCKSDB_SOURCE_DIR}/db/compaction/compaction_iterator.cc
|
${ROCKSDB_SOURCE_DIR}/db/compaction/compaction_iterator.cc
|
||||||
@ -124,7 +116,11 @@ set(SOURCES
|
|||||||
${ROCKSDB_SOURCE_DIR}/db/compaction/compaction_picker_fifo.cc
|
${ROCKSDB_SOURCE_DIR}/db/compaction/compaction_picker_fifo.cc
|
||||||
${ROCKSDB_SOURCE_DIR}/db/compaction/compaction_picker_level.cc
|
${ROCKSDB_SOURCE_DIR}/db/compaction/compaction_picker_level.cc
|
||||||
${ROCKSDB_SOURCE_DIR}/db/compaction/compaction_picker_universal.cc
|
${ROCKSDB_SOURCE_DIR}/db/compaction/compaction_picker_universal.cc
|
||||||
|
${ROCKSDB_SOURCE_DIR}/db/compaction/compaction_service_job.cc
|
||||||
|
${ROCKSDB_SOURCE_DIR}/db/compaction/compaction_state.cc
|
||||||
|
${ROCKSDB_SOURCE_DIR}/db/compaction/compaction_outputs.cc
|
||||||
${ROCKSDB_SOURCE_DIR}/db/compaction/sst_partitioner.cc
|
${ROCKSDB_SOURCE_DIR}/db/compaction/sst_partitioner.cc
|
||||||
|
${ROCKSDB_SOURCE_DIR}/db/compaction/subcompaction_state.cc
|
||||||
${ROCKSDB_SOURCE_DIR}/db/convenience.cc
|
${ROCKSDB_SOURCE_DIR}/db/convenience.cc
|
||||||
${ROCKSDB_SOURCE_DIR}/db/db_filesnapshot.cc
|
${ROCKSDB_SOURCE_DIR}/db/db_filesnapshot.cc
|
||||||
${ROCKSDB_SOURCE_DIR}/db/db_impl/compacted_db_impl.cc
|
${ROCKSDB_SOURCE_DIR}/db/db_impl/compacted_db_impl.cc
|
||||||
@ -132,6 +128,7 @@ set(SOURCES
|
|||||||
${ROCKSDB_SOURCE_DIR}/db/db_impl/db_impl_write.cc
|
${ROCKSDB_SOURCE_DIR}/db/db_impl/db_impl_write.cc
|
||||||
${ROCKSDB_SOURCE_DIR}/db/db_impl/db_impl_compaction_flush.cc
|
${ROCKSDB_SOURCE_DIR}/db/db_impl/db_impl_compaction_flush.cc
|
||||||
${ROCKSDB_SOURCE_DIR}/db/db_impl/db_impl_files.cc
|
${ROCKSDB_SOURCE_DIR}/db/db_impl/db_impl_files.cc
|
||||||
|
${ROCKSDB_SOURCE_DIR}/db/db_impl/db_impl_follower.cc
|
||||||
${ROCKSDB_SOURCE_DIR}/db/db_impl/db_impl_open.cc
|
${ROCKSDB_SOURCE_DIR}/db/db_impl/db_impl_open.cc
|
||||||
${ROCKSDB_SOURCE_DIR}/db/db_impl/db_impl_debug.cc
|
${ROCKSDB_SOURCE_DIR}/db/db_impl/db_impl_debug.cc
|
||||||
${ROCKSDB_SOURCE_DIR}/db/db_impl/db_impl_experimental.cc
|
${ROCKSDB_SOURCE_DIR}/db/db_impl/db_impl_experimental.cc
|
||||||
@ -159,10 +156,11 @@ set(SOURCES
|
|||||||
${ROCKSDB_SOURCE_DIR}/db/merge_helper.cc
|
${ROCKSDB_SOURCE_DIR}/db/merge_helper.cc
|
||||||
${ROCKSDB_SOURCE_DIR}/db/merge_operator.cc
|
${ROCKSDB_SOURCE_DIR}/db/merge_operator.cc
|
||||||
${ROCKSDB_SOURCE_DIR}/db/output_validator.cc
|
${ROCKSDB_SOURCE_DIR}/db/output_validator.cc
|
||||||
${ROCKSDB_SOURCE_DIR}/db/periodic_work_scheduler.cc
|
${ROCKSDB_SOURCE_DIR}/db/periodic_task_scheduler.cc
|
||||||
${ROCKSDB_SOURCE_DIR}/db/range_del_aggregator.cc
|
${ROCKSDB_SOURCE_DIR}/db/range_del_aggregator.cc
|
||||||
${ROCKSDB_SOURCE_DIR}/db/range_tombstone_fragmenter.cc
|
${ROCKSDB_SOURCE_DIR}/db/range_tombstone_fragmenter.cc
|
||||||
${ROCKSDB_SOURCE_DIR}/db/repair.cc
|
${ROCKSDB_SOURCE_DIR}/db/repair.cc
|
||||||
|
${ROCKSDB_SOURCE_DIR}/db/seqno_to_time_mapping.cc
|
||||||
${ROCKSDB_SOURCE_DIR}/db/snapshot_impl.cc
|
${ROCKSDB_SOURCE_DIR}/db/snapshot_impl.cc
|
||||||
${ROCKSDB_SOURCE_DIR}/db/table_cache.cc
|
${ROCKSDB_SOURCE_DIR}/db/table_cache.cc
|
||||||
${ROCKSDB_SOURCE_DIR}/db/table_properties_collector.cc
|
${ROCKSDB_SOURCE_DIR}/db/table_properties_collector.cc
|
||||||
@ -174,17 +172,21 @@ set(SOURCES
|
|||||||
${ROCKSDB_SOURCE_DIR}/db/version_set.cc
|
${ROCKSDB_SOURCE_DIR}/db/version_set.cc
|
||||||
${ROCKSDB_SOURCE_DIR}/db/wal_edit.cc
|
${ROCKSDB_SOURCE_DIR}/db/wal_edit.cc
|
||||||
${ROCKSDB_SOURCE_DIR}/db/wal_manager.cc
|
${ROCKSDB_SOURCE_DIR}/db/wal_manager.cc
|
||||||
|
${ROCKSDB_SOURCE_DIR}/db/wide/wide_column_serialization.cc
|
||||||
|
${ROCKSDB_SOURCE_DIR}/db/wide/wide_columns.cc
|
||||||
|
${ROCKSDB_SOURCE_DIR}/db/wide/wide_columns_helper.cc
|
||||||
${ROCKSDB_SOURCE_DIR}/db/write_batch.cc
|
${ROCKSDB_SOURCE_DIR}/db/write_batch.cc
|
||||||
${ROCKSDB_SOURCE_DIR}/db/write_batch_base.cc
|
${ROCKSDB_SOURCE_DIR}/db/write_batch_base.cc
|
||||||
${ROCKSDB_SOURCE_DIR}/db/write_controller.cc
|
${ROCKSDB_SOURCE_DIR}/db/write_controller.cc
|
||||||
|
${ROCKSDB_SOURCE_DIR}/db/write_stall_stats.cc
|
||||||
${ROCKSDB_SOURCE_DIR}/db/write_thread.cc
|
${ROCKSDB_SOURCE_DIR}/db/write_thread.cc
|
||||||
${ROCKSDB_SOURCE_DIR}/env/composite_env.cc
|
${ROCKSDB_SOURCE_DIR}/env/composite_env.cc
|
||||||
${ROCKSDB_SOURCE_DIR}/env/env.cc
|
${ROCKSDB_SOURCE_DIR}/env/env.cc
|
||||||
${ROCKSDB_SOURCE_DIR}/env/env_chroot.cc
|
${ROCKSDB_SOURCE_DIR}/env/env_chroot.cc
|
||||||
${ROCKSDB_SOURCE_DIR}/env/env_encryption.cc
|
${ROCKSDB_SOURCE_DIR}/env/env_encryption.cc
|
||||||
${ROCKSDB_SOURCE_DIR}/env/env_hdfs.cc
|
|
||||||
${ROCKSDB_SOURCE_DIR}/env/file_system.cc
|
${ROCKSDB_SOURCE_DIR}/env/file_system.cc
|
||||||
${ROCKSDB_SOURCE_DIR}/env/file_system_tracer.cc
|
${ROCKSDB_SOURCE_DIR}/env/file_system_tracer.cc
|
||||||
|
${ROCKSDB_SOURCE_DIR}/env/fs_on_demand.cc
|
||||||
${ROCKSDB_SOURCE_DIR}/env/fs_remap.cc
|
${ROCKSDB_SOURCE_DIR}/env/fs_remap.cc
|
||||||
${ROCKSDB_SOURCE_DIR}/env/mock_env.cc
|
${ROCKSDB_SOURCE_DIR}/env/mock_env.cc
|
||||||
${ROCKSDB_SOURCE_DIR}/env/unique_id_gen.cc
|
${ROCKSDB_SOURCE_DIR}/env/unique_id_gen.cc
|
||||||
@ -230,19 +232,21 @@ set(SOURCES
|
|||||||
${ROCKSDB_SOURCE_DIR}/options/configurable.cc
|
${ROCKSDB_SOURCE_DIR}/options/configurable.cc
|
||||||
${ROCKSDB_SOURCE_DIR}/options/customizable.cc
|
${ROCKSDB_SOURCE_DIR}/options/customizable.cc
|
||||||
${ROCKSDB_SOURCE_DIR}/options/db_options.cc
|
${ROCKSDB_SOURCE_DIR}/options/db_options.cc
|
||||||
|
${ROCKSDB_SOURCE_DIR}/options/offpeak_time_info.cc
|
||||||
${ROCKSDB_SOURCE_DIR}/options/options.cc
|
${ROCKSDB_SOURCE_DIR}/options/options.cc
|
||||||
${ROCKSDB_SOURCE_DIR}/options/options_helper.cc
|
${ROCKSDB_SOURCE_DIR}/options/options_helper.cc
|
||||||
${ROCKSDB_SOURCE_DIR}/options/options_parser.cc
|
${ROCKSDB_SOURCE_DIR}/options/options_parser.cc
|
||||||
|
${ROCKSDB_SOURCE_DIR}/port/mmap.cc
|
||||||
${ROCKSDB_SOURCE_DIR}/port/stack_trace.cc
|
${ROCKSDB_SOURCE_DIR}/port/stack_trace.cc
|
||||||
${ROCKSDB_SOURCE_DIR}/table/adaptive/adaptive_table_factory.cc
|
${ROCKSDB_SOURCE_DIR}/table/adaptive/adaptive_table_factory.cc
|
||||||
${ROCKSDB_SOURCE_DIR}/table/block_based/binary_search_index_reader.cc
|
${ROCKSDB_SOURCE_DIR}/table/block_based/binary_search_index_reader.cc
|
||||||
${ROCKSDB_SOURCE_DIR}/table/block_based/block.cc
|
${ROCKSDB_SOURCE_DIR}/table/block_based/block.cc
|
||||||
${ROCKSDB_SOURCE_DIR}/table/block_based/block_based_filter_block.cc
|
|
||||||
${ROCKSDB_SOURCE_DIR}/table/block_based/block_based_table_builder.cc
|
${ROCKSDB_SOURCE_DIR}/table/block_based/block_based_table_builder.cc
|
||||||
${ROCKSDB_SOURCE_DIR}/table/block_based/block_based_table_factory.cc
|
${ROCKSDB_SOURCE_DIR}/table/block_based/block_based_table_factory.cc
|
||||||
${ROCKSDB_SOURCE_DIR}/table/block_based/block_based_table_iterator.cc
|
${ROCKSDB_SOURCE_DIR}/table/block_based/block_based_table_iterator.cc
|
||||||
${ROCKSDB_SOURCE_DIR}/table/block_based/block_based_table_reader.cc
|
${ROCKSDB_SOURCE_DIR}/table/block_based/block_based_table_reader.cc
|
||||||
${ROCKSDB_SOURCE_DIR}/table/block_based/block_builder.cc
|
${ROCKSDB_SOURCE_DIR}/table/block_based/block_builder.cc
|
||||||
|
${ROCKSDB_SOURCE_DIR}/table/block_based/block_cache.cc
|
||||||
${ROCKSDB_SOURCE_DIR}/table/block_based/block_prefetcher.cc
|
${ROCKSDB_SOURCE_DIR}/table/block_based/block_prefetcher.cc
|
||||||
${ROCKSDB_SOURCE_DIR}/table/block_based/block_prefix_index.cc
|
${ROCKSDB_SOURCE_DIR}/table/block_based/block_prefix_index.cc
|
||||||
${ROCKSDB_SOURCE_DIR}/table/block_based/data_block_hash_index.cc
|
${ROCKSDB_SOURCE_DIR}/table/block_based/data_block_hash_index.cc
|
||||||
@ -268,6 +272,7 @@ set(SOURCES
|
|||||||
${ROCKSDB_SOURCE_DIR}/table/get_context.cc
|
${ROCKSDB_SOURCE_DIR}/table/get_context.cc
|
||||||
${ROCKSDB_SOURCE_DIR}/table/iterator.cc
|
${ROCKSDB_SOURCE_DIR}/table/iterator.cc
|
||||||
${ROCKSDB_SOURCE_DIR}/table/merging_iterator.cc
|
${ROCKSDB_SOURCE_DIR}/table/merging_iterator.cc
|
||||||
|
${ROCKSDB_SOURCE_DIR}/table/compaction_merging_iterator.cc
|
||||||
${ROCKSDB_SOURCE_DIR}/table/meta_blocks.cc
|
${ROCKSDB_SOURCE_DIR}/table/meta_blocks.cc
|
||||||
${ROCKSDB_SOURCE_DIR}/table/persistent_cache_helper.cc
|
${ROCKSDB_SOURCE_DIR}/table/persistent_cache_helper.cc
|
||||||
${ROCKSDB_SOURCE_DIR}/table/plain/plain_table_bloom.cc
|
${ROCKSDB_SOURCE_DIR}/table/plain/plain_table_bloom.cc
|
||||||
@ -300,27 +305,34 @@ set(SOURCES
|
|||||||
${ROCKSDB_SOURCE_DIR}/trace_replay/trace_record_result.cc
|
${ROCKSDB_SOURCE_DIR}/trace_replay/trace_record_result.cc
|
||||||
${ROCKSDB_SOURCE_DIR}/trace_replay/trace_record.cc
|
${ROCKSDB_SOURCE_DIR}/trace_replay/trace_record.cc
|
||||||
${ROCKSDB_SOURCE_DIR}/trace_replay/trace_replay.cc
|
${ROCKSDB_SOURCE_DIR}/trace_replay/trace_replay.cc
|
||||||
|
${ROCKSDB_SOURCE_DIR}/util/async_file_reader.cc
|
||||||
|
${ROCKSDB_SOURCE_DIR}/util/cleanable.cc
|
||||||
${ROCKSDB_SOURCE_DIR}/util/coding.cc
|
${ROCKSDB_SOURCE_DIR}/util/coding.cc
|
||||||
${ROCKSDB_SOURCE_DIR}/util/compaction_job_stats_impl.cc
|
${ROCKSDB_SOURCE_DIR}/util/compaction_job_stats_impl.cc
|
||||||
${ROCKSDB_SOURCE_DIR}/util/comparator.cc
|
${ROCKSDB_SOURCE_DIR}/util/comparator.cc
|
||||||
|
${ROCKSDB_SOURCE_DIR}/util/compression.cc
|
||||||
${ROCKSDB_SOURCE_DIR}/util/compression_context_cache.cc
|
${ROCKSDB_SOURCE_DIR}/util/compression_context_cache.cc
|
||||||
${ROCKSDB_SOURCE_DIR}/util/concurrent_task_limiter_impl.cc
|
${ROCKSDB_SOURCE_DIR}/util/concurrent_task_limiter_impl.cc
|
||||||
${ROCKSDB_SOURCE_DIR}/util/crc32c.cc
|
${ROCKSDB_SOURCE_DIR}/util/crc32c.cc
|
||||||
|
${ROCKSDB_SOURCE_DIR}/util/data_structure.cc
|
||||||
${ROCKSDB_SOURCE_DIR}/util/dynamic_bloom.cc
|
${ROCKSDB_SOURCE_DIR}/util/dynamic_bloom.cc
|
||||||
${ROCKSDB_SOURCE_DIR}/util/hash.cc
|
${ROCKSDB_SOURCE_DIR}/util/hash.cc
|
||||||
${ROCKSDB_SOURCE_DIR}/util/murmurhash.cc
|
${ROCKSDB_SOURCE_DIR}/util/murmurhash.cc
|
||||||
${ROCKSDB_SOURCE_DIR}/util/random.cc
|
${ROCKSDB_SOURCE_DIR}/util/random.cc
|
||||||
${ROCKSDB_SOURCE_DIR}/util/rate_limiter.cc
|
${ROCKSDB_SOURCE_DIR}/util/rate_limiter.cc
|
||||||
${ROCKSDB_SOURCE_DIR}/util/regex.cc
|
|
||||||
${ROCKSDB_SOURCE_DIR}/util/ribbon_config.cc
|
${ROCKSDB_SOURCE_DIR}/util/ribbon_config.cc
|
||||||
${ROCKSDB_SOURCE_DIR}/util/slice.cc
|
${ROCKSDB_SOURCE_DIR}/util/slice.cc
|
||||||
${ROCKSDB_SOURCE_DIR}/util/file_checksum_helper.cc
|
${ROCKSDB_SOURCE_DIR}/util/file_checksum_helper.cc
|
||||||
${ROCKSDB_SOURCE_DIR}/util/status.cc
|
${ROCKSDB_SOURCE_DIR}/util/status.cc
|
||||||
|
${ROCKSDB_SOURCE_DIR}/util/stderr_logger.cc
|
||||||
${ROCKSDB_SOURCE_DIR}/util/string_util.cc
|
${ROCKSDB_SOURCE_DIR}/util/string_util.cc
|
||||||
${ROCKSDB_SOURCE_DIR}/util/thread_local.cc
|
${ROCKSDB_SOURCE_DIR}/util/thread_local.cc
|
||||||
${ROCKSDB_SOURCE_DIR}/util/threadpool_imp.cc
|
${ROCKSDB_SOURCE_DIR}/util/threadpool_imp.cc
|
||||||
|
${ROCKSDB_SOURCE_DIR}/util/udt_util.cc
|
||||||
|
${ROCKSDB_SOURCE_DIR}/util/write_batch_util.cc
|
||||||
${ROCKSDB_SOURCE_DIR}/util/xxhash.cc
|
${ROCKSDB_SOURCE_DIR}/util/xxhash.cc
|
||||||
${ROCKSDB_SOURCE_DIR}/utilities/backupable/backupable_db.cc
|
${ROCKSDB_SOURCE_DIR}/utilities/agg_merge/agg_merge.cc
|
||||||
|
${ROCKSDB_SOURCE_DIR}/utilities/backup/backup_engine.cc
|
||||||
${ROCKSDB_SOURCE_DIR}/utilities/blob_db/blob_compaction_filter.cc
|
${ROCKSDB_SOURCE_DIR}/utilities/blob_db/blob_compaction_filter.cc
|
||||||
${ROCKSDB_SOURCE_DIR}/utilities/blob_db/blob_db.cc
|
${ROCKSDB_SOURCE_DIR}/utilities/blob_db/blob_db.cc
|
||||||
${ROCKSDB_SOURCE_DIR}/utilities/blob_db/blob_db_impl.cc
|
${ROCKSDB_SOURCE_DIR}/utilities/blob_db/blob_db_impl.cc
|
||||||
@ -335,6 +347,7 @@ set(SOURCES
|
|||||||
${ROCKSDB_SOURCE_DIR}/utilities/checkpoint/checkpoint_impl.cc
|
${ROCKSDB_SOURCE_DIR}/utilities/checkpoint/checkpoint_impl.cc
|
||||||
${ROCKSDB_SOURCE_DIR}/utilities/compaction_filters.cc
|
${ROCKSDB_SOURCE_DIR}/utilities/compaction_filters.cc
|
||||||
${ROCKSDB_SOURCE_DIR}/utilities/compaction_filters/remove_emptyvalue_compactionfilter.cc
|
${ROCKSDB_SOURCE_DIR}/utilities/compaction_filters/remove_emptyvalue_compactionfilter.cc
|
||||||
|
${ROCKSDB_SOURCE_DIR}/utilities/counted_fs.cc
|
||||||
${ROCKSDB_SOURCE_DIR}/utilities/debug.cc
|
${ROCKSDB_SOURCE_DIR}/utilities/debug.cc
|
||||||
${ROCKSDB_SOURCE_DIR}/utilities/env_mirror.cc
|
${ROCKSDB_SOURCE_DIR}/utilities/env_mirror.cc
|
||||||
${ROCKSDB_SOURCE_DIR}/utilities/env_timed.cc
|
${ROCKSDB_SOURCE_DIR}/utilities/env_timed.cc
|
||||||
@ -361,6 +374,7 @@ set(SOURCES
|
|||||||
${ROCKSDB_SOURCE_DIR}/utilities/persistent_cache/volatile_tier_impl.cc
|
${ROCKSDB_SOURCE_DIR}/utilities/persistent_cache/volatile_tier_impl.cc
|
||||||
${ROCKSDB_SOURCE_DIR}/utilities/simulator_cache/cache_simulator.cc
|
${ROCKSDB_SOURCE_DIR}/utilities/simulator_cache/cache_simulator.cc
|
||||||
${ROCKSDB_SOURCE_DIR}/utilities/simulator_cache/sim_cache.cc
|
${ROCKSDB_SOURCE_DIR}/utilities/simulator_cache/sim_cache.cc
|
||||||
|
${ROCKSDB_SOURCE_DIR}/utilities/table_properties_collectors/compact_for_tiering_collector.cc
|
||||||
${ROCKSDB_SOURCE_DIR}/utilities/table_properties_collectors/compact_on_deletion_collector.cc
|
${ROCKSDB_SOURCE_DIR}/utilities/table_properties_collectors/compact_on_deletion_collector.cc
|
||||||
${ROCKSDB_SOURCE_DIR}/utilities/trace/file_trace_reader_writer.cc
|
${ROCKSDB_SOURCE_DIR}/utilities/trace/file_trace_reader_writer.cc
|
||||||
${ROCKSDB_SOURCE_DIR}/utilities/trace/replayer_impl.cc
|
${ROCKSDB_SOURCE_DIR}/utilities/trace/replayer_impl.cc
|
||||||
@ -381,6 +395,7 @@ set(SOURCES
|
|||||||
${ROCKSDB_SOURCE_DIR}/utilities/transactions/write_prepared_txn_db.cc
|
${ROCKSDB_SOURCE_DIR}/utilities/transactions/write_prepared_txn_db.cc
|
||||||
${ROCKSDB_SOURCE_DIR}/utilities/transactions/write_unprepared_txn.cc
|
${ROCKSDB_SOURCE_DIR}/utilities/transactions/write_unprepared_txn.cc
|
||||||
${ROCKSDB_SOURCE_DIR}/utilities/transactions/write_unprepared_txn_db.cc
|
${ROCKSDB_SOURCE_DIR}/utilities/transactions/write_unprepared_txn_db.cc
|
||||||
|
${ROCKSDB_SOURCE_DIR}/utilities/types_util.cc
|
||||||
${ROCKSDB_SOURCE_DIR}/utilities/ttl/db_ttl_impl.cc
|
${ROCKSDB_SOURCE_DIR}/utilities/ttl/db_ttl_impl.cc
|
||||||
${ROCKSDB_SOURCE_DIR}/utilities/wal_filter.cc
|
${ROCKSDB_SOURCE_DIR}/utilities/wal_filter.cc
|
||||||
${ROCKSDB_SOURCE_DIR}/utilities/write_batch_with_index/write_batch_with_index.cc
|
${ROCKSDB_SOURCE_DIR}/utilities/write_batch_with_index/write_batch_with_index.cc
|
||||||
@ -399,12 +414,6 @@ set(SOURCES
|
|||||||
${ROCKSDB_SOURCE_DIR}/utilities/transactions/lock/range/range_tree/lib/util/memarena.cc
|
${ROCKSDB_SOURCE_DIR}/utilities/transactions/lock/range/range_tree/lib/util/memarena.cc
|
||||||
build_version.cc) # generated by hand
|
build_version.cc) # generated by hand
|
||||||
|
|
||||||
if(ENABLE_SSE42 AND ENABLE_PCLMULQDQ)
|
|
||||||
set_source_files_properties(
|
|
||||||
"${ROCKSDB_SOURCE_DIR}/util/crc32c.cc"
|
|
||||||
PROPERTIES COMPILE_FLAGS "-msse4.2 -mpclmul")
|
|
||||||
endif()
|
|
||||||
|
|
||||||
if(CMAKE_SYSTEM_PROCESSOR MATCHES "^(powerpc|ppc)64")
|
if(CMAKE_SYSTEM_PROCESSOR MATCHES "^(powerpc|ppc)64")
|
||||||
list(APPEND SOURCES
|
list(APPEND SOURCES
|
||||||
"${ROCKSDB_SOURCE_DIR}/util/crc32c_ppc.c"
|
"${ROCKSDB_SOURCE_DIR}/util/crc32c_ppc.c"
|
||||||
@ -417,23 +426,18 @@ if(HAS_ARMV8_CRC)
|
|||||||
endif(HAS_ARMV8_CRC)
|
endif(HAS_ARMV8_CRC)
|
||||||
|
|
||||||
list(APPEND SOURCES
|
list(APPEND SOURCES
|
||||||
"${ROCKSDB_SOURCE_DIR}/port/port_posix.cc"
|
${ROCKSDB_SOURCE_DIR}/port/port_posix.cc
|
||||||
"${ROCKSDB_SOURCE_DIR}/env/env_posix.cc"
|
${ROCKSDB_SOURCE_DIR}/env/env_posix.cc
|
||||||
"${ROCKSDB_SOURCE_DIR}/env/fs_posix.cc"
|
${ROCKSDB_SOURCE_DIR}/env/fs_posix.cc
|
||||||
"${ROCKSDB_SOURCE_DIR}/env/io_posix.cc")
|
${ROCKSDB_SOURCE_DIR}/env/io_posix.cc)
|
||||||
|
|
||||||
if(WITH_FOLLY_DISTRIBUTED_MUTEX)
|
|
||||||
list(APPEND SOURCES
|
|
||||||
"${ROCKSDB_SOURCE_DIR}/third-party/folly/folly/detail/Futex.cpp"
|
|
||||||
"${ROCKSDB_SOURCE_DIR}/third-party/folly/folly/synchronization/AtomicNotification.cpp"
|
|
||||||
"${ROCKSDB_SOURCE_DIR}/third-party/folly/folly/synchronization/DistributedMutex.cpp"
|
|
||||||
"${ROCKSDB_SOURCE_DIR}/third-party/folly/folly/synchronization/ParkingLot.cpp"
|
|
||||||
"${ROCKSDB_SOURCE_DIR}/third-party/folly/folly/synchronization/WaitOptions.cpp")
|
|
||||||
endif()
|
|
||||||
|
|
||||||
add_library(_rocksdb ${SOURCES})
|
add_library(_rocksdb ${SOURCES})
|
||||||
add_library(ch_contrib::rocksdb ALIAS _rocksdb)
|
add_library(ch_contrib::rocksdb ALIAS _rocksdb)
|
||||||
target_link_libraries(_rocksdb PRIVATE ${THIRDPARTY_LIBS} ${SYSTEM_LIBS})
|
target_link_libraries(_rocksdb PRIVATE ${THIRDPARTY_LIBS} ${SYSTEM_LIBS})
|
||||||
|
|
||||||
|
# Not in the native build system but useful anyways:
|
||||||
|
# Make all functions in xxHash.h inline. Beneficial for performance: https://github.com/Cyan4973/xxHash/tree/v0.8.2#build-modifiers
|
||||||
|
target_compile_definitions (_rocksdb PRIVATE XXH_INLINE_ALL)
|
||||||
|
|
||||||
# SYSTEM is required to overcome some issues
|
# SYSTEM is required to overcome some issues
|
||||||
target_include_directories(_rocksdb SYSTEM BEFORE INTERFACE "${ROCKSDB_SOURCE_DIR}/include")
|
target_include_directories(_rocksdb SYSTEM BEFORE INTERFACE "${ROCKSDB_SOURCE_DIR}/include")
|
||||||
|
@ -1,16 +1,33 @@
|
|||||||
// Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
|
// Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
|
||||||
/// This file was edited for ClickHouse.
|
|
||||||
|
|
||||||
#include <memory>
|
#include <memory>
|
||||||
|
|
||||||
#include "rocksdb/version.h"
|
#include "rocksdb/version.h"
|
||||||
|
#include "rocksdb/utilities/object_registry.h"
|
||||||
#include "util/string_util.h"
|
#include "util/string_util.h"
|
||||||
|
|
||||||
// The build script may replace these values with real values based
|
// The build script may replace these values with real values based
|
||||||
// on whether or not GIT is available and the platform settings
|
// on whether or not GIT is available and the platform settings
|
||||||
static const std::string rocksdb_build_git_sha = "rocksdb_build_git_sha:0";
|
static const std::string rocksdb_build_git_sha = "rocksdb_build_git_sha:72438a678872544809393b831c7273794c074215";
|
||||||
static const std::string rocksdb_build_git_tag = "rocksdb_build_git_tag:master";
|
static const std::string rocksdb_build_git_tag = "rocksdb_build_git_tag:main";
|
||||||
static const std::string rocksdb_build_date = "rocksdb_build_date:2000-01-01";
|
#define HAS_GIT_CHANGES 0
|
||||||
|
#if HAS_GIT_CHANGES == 0
|
||||||
|
// If HAS_GIT_CHANGES is 0, the GIT date is used.
|
||||||
|
// Use the time the branch/tag was last modified
|
||||||
|
static const std::string rocksdb_build_date = "rocksdb_build_date:2024-07-12 16:01:57";
|
||||||
|
#else
|
||||||
|
// If HAS_GIT_CHANGES is > 0, the branch/tag has modifications.
|
||||||
|
// Use the time the build was created.
|
||||||
|
static const std::string rocksdb_build_date = "rocksdb_build_date:2024-07-13 17:15:50";
|
||||||
|
#endif
|
||||||
|
|
||||||
|
extern "C" {
|
||||||
|
|
||||||
|
} // extern "C"
|
||||||
|
|
||||||
|
std::unordered_map<std::string, ROCKSDB_NAMESPACE::RegistrarFunc> ROCKSDB_NAMESPACE::ObjectRegistry::builtins_ = {
|
||||||
|
|
||||||
|
};
|
||||||
|
|
||||||
namespace ROCKSDB_NAMESPACE {
|
namespace ROCKSDB_NAMESPACE {
|
||||||
static void AddProperty(std::unordered_map<std::string, std::string> *props, const std::string& name) {
|
static void AddProperty(std::unordered_map<std::string, std::string> *props, const std::string& name) {
|
||||||
@ -39,12 +56,12 @@ const std::unordered_map<std::string, std::string>& GetRocksBuildProperties() {
|
|||||||
}
|
}
|
||||||
|
|
||||||
std::string GetRocksVersionAsString(bool with_patch) {
|
std::string GetRocksVersionAsString(bool with_patch) {
|
||||||
std::string version = ToString(ROCKSDB_MAJOR) + "." + ToString(ROCKSDB_MINOR);
|
std::string version = std::to_string(ROCKSDB_MAJOR) + "." + std::to_string(ROCKSDB_MINOR);
|
||||||
if (with_patch) {
|
if (with_patch) {
|
||||||
return version + "." + ToString(ROCKSDB_PATCH);
|
return version + "." + std::to_string(ROCKSDB_PATCH);
|
||||||
} else {
|
} else {
|
||||||
return version;
|
return version;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
std::string GetRocksBuildInfoAsString(const std::string& program, bool verbose) {
|
std::string GetRocksBuildInfoAsString(const std::string& program, bool verbose) {
|
||||||
|
2
contrib/usearch
vendored
2
contrib/usearch
vendored
@ -1 +1 @@
|
|||||||
Subproject commit 955c6f9c11adfd89c912e0d1643d160b4e9e543f
|
Subproject commit e21a5778a0d4469ddaf38c94b7be0196bb701ee4
|
@ -1,17 +1,22 @@
|
|||||||
set(USEARCH_PROJECT_DIR "${ClickHouse_SOURCE_DIR}/contrib/usearch")
|
|
||||||
set(USEARCH_SOURCE_DIR "${USEARCH_PROJECT_DIR}/include")
|
|
||||||
|
|
||||||
set(FP16_PROJECT_DIR "${ClickHouse_SOURCE_DIR}/contrib/FP16")
|
set(FP16_PROJECT_DIR "${ClickHouse_SOURCE_DIR}/contrib/FP16")
|
||||||
set(ROBIN_MAP_PROJECT_DIR "${ClickHouse_SOURCE_DIR}/contrib/robin-map")
|
set(SIMSIMD_PROJECT_DIR "${ClickHouse_SOURCE_DIR}/contrib/SimSIMD")
|
||||||
set(SIMSIMD_PROJECT_DIR "${ClickHouse_SOURCE_DIR}/contrib/SimSIMD-map")
|
set(USEARCH_PROJECT_DIR "${ClickHouse_SOURCE_DIR}/contrib/usearch")
|
||||||
|
|
||||||
add_library(_usearch INTERFACE)
|
add_library(_usearch INTERFACE)
|
||||||
|
|
||||||
target_include_directories(_usearch SYSTEM INTERFACE
|
target_include_directories(_usearch SYSTEM INTERFACE
|
||||||
${FP16_PROJECT_DIR}/include
|
${FP16_PROJECT_DIR}/include
|
||||||
${ROBIN_MAP_PROJECT_DIR}/include
|
|
||||||
${SIMSIMD_PROJECT_DIR}/include
|
${SIMSIMD_PROJECT_DIR}/include
|
||||||
${USEARCH_SOURCE_DIR})
|
${USEARCH_PROJECT_DIR}/include)
|
||||||
|
|
||||||
|
target_compile_definitions(_usearch INTERFACE USEARCH_USE_FP16LIB)
|
||||||
|
|
||||||
|
# target_compile_definitions(_usearch INTERFACE USEARCH_USE_SIMSIMD)
|
||||||
|
# ^^ simsimd is not enabled at the moment. Reasons:
|
||||||
|
# - Vectorization is important for raw scans but not so much for HNSW. We use usearch only for HNSW.
|
||||||
|
# - Simsimd does compile-time dispatch (choice of SIMD kernels determined by capabilities of the build machine) or dynamic dispatch (SIMD
|
||||||
|
# kernels chosen at runtime based on cpuid instruction). Since current builds are limited to SSE 4.2 (x86) and NEON (ARM), the speedup of
|
||||||
|
# the former would be moderate compared to AVX-512 / SVE. The latter is at the moment too fragile with respect to portability across x86
|
||||||
|
# and ARM machines ... certain conbinations of quantizations / distance functions / SIMD instructions are not implemented at the moment.
|
||||||
|
|
||||||
add_library(ch_contrib::usearch ALIAS _usearch)
|
add_library(ch_contrib::usearch ALIAS _usearch)
|
||||||
target_compile_definitions(_usearch INTERFACE ENABLE_USEARCH)
|
|
||||||
|
2
contrib/zlib-ng
vendored
2
contrib/zlib-ng
vendored
@ -1 +1 @@
|
|||||||
Subproject commit 50f0eae1a411764cd6d1e85b3ce471438acd3c1c
|
Subproject commit a2fbeffdc30a8b0ce6d54ee31208e2688eac4c9f
|
@ -14,6 +14,8 @@ add_definitions(-DHAVE_VISIBILITY_HIDDEN)
|
|||||||
add_definitions(-DHAVE_VISIBILITY_INTERNAL)
|
add_definitions(-DHAVE_VISIBILITY_INTERNAL)
|
||||||
add_definitions(-DHAVE_BUILTIN_CTZ)
|
add_definitions(-DHAVE_BUILTIN_CTZ)
|
||||||
add_definitions(-DHAVE_BUILTIN_CTZLL)
|
add_definitions(-DHAVE_BUILTIN_CTZLL)
|
||||||
|
add_definitions(-DHAVE_ATTRIBUTE_ALIGNED)
|
||||||
|
add_definitions(-DHAVE_POSIX_MEMALIGN)
|
||||||
|
|
||||||
set(ZLIB_ARCH_SRCS)
|
set(ZLIB_ARCH_SRCS)
|
||||||
set(ZLIB_ARCH_HDRS)
|
set(ZLIB_ARCH_HDRS)
|
||||||
@ -24,67 +26,74 @@ if(ARCH_AARCH64)
|
|||||||
set(ARCHDIR "${SOURCE_DIR}/arch/arm")
|
set(ARCHDIR "${SOURCE_DIR}/arch/arm")
|
||||||
|
|
||||||
add_definitions(-DARM_FEATURES)
|
add_definitions(-DARM_FEATURES)
|
||||||
|
add_definitions(-DHAVE_SYS_AUXV_H)
|
||||||
add_definitions(-DARM_AUXV_HAS_CRC32 -DARM_ASM_HWCAP)
|
add_definitions(-DARM_AUXV_HAS_CRC32 -DARM_ASM_HWCAP)
|
||||||
add_definitions(-DARM_AUXV_HAS_NEON)
|
add_definitions(-DARM_AUXV_HAS_NEON)
|
||||||
add_definitions(-DARM_ACLE_CRC_HASH)
|
add_definitions(-DARM_ACLE)
|
||||||
add_definitions(-DARM_NEON_ADLER32 -DARM_NEON_CHUNKSET -DARM_NEON_SLIDEHASH)
|
add_definitions(-DHAVE_ARM_ACLE_H)
|
||||||
|
add_definitions(-DARM_NEON)
|
||||||
|
add_definitions(-DARM_NEON_HASLD4)
|
||||||
|
|
||||||
list(APPEND ZLIB_ARCH_HDRS ${ARCHDIR}/arm.h)
|
list(APPEND ZLIB_ARCH_HDRS ${ARCHDIR}/arm_features.h)
|
||||||
list(APPEND ZLIB_ARCH_SRCS ${ARCHDIR}/armfeature.c)
|
list(APPEND ZLIB_ARCH_SRCS ${ARCHDIR}/arm_features.c)
|
||||||
set(ACLE_SRCS ${ARCHDIR}/crc32_acle.c ${ARCHDIR}/insert_string_acle.c)
|
set(ACLE_SRCS ${ARCHDIR}/crc32_acle.c ${ARCHDIR}/insert_string_acle.c)
|
||||||
list(APPEND ZLIB_ARCH_SRCS ${ACLE_SRCS})
|
list(APPEND ZLIB_ARCH_SRCS ${ACLE_SRCS})
|
||||||
set(NEON_SRCS ${ARCHDIR}/adler32_neon.c ${ARCHDIR}/chunkset_neon.c ${ARCHDIR}/slide_neon.c)
|
set(NEON_SRCS ${ARCHDIR}/adler32_neon.c ${ARCHDIR}/chunkset_neon.c
|
||||||
|
${ARCHDIR}/compare256_neon.c ${ARCHDIR}/slide_hash_neon.c)
|
||||||
list(APPEND ZLIB_ARCH_SRCS ${NEON_SRCS})
|
list(APPEND ZLIB_ARCH_SRCS ${NEON_SRCS})
|
||||||
|
|
||||||
elseif(ARCH_PPC64LE)
|
elseif(ARCH_PPC64LE)
|
||||||
set(ARCHDIR "${SOURCE_DIR}/arch/power")
|
set(ARCHDIR "${SOURCE_DIR}/arch/power")
|
||||||
|
|
||||||
add_definitions(-DPOWER8)
|
|
||||||
add_definitions(-DPOWER_FEATURES)
|
add_definitions(-DPOWER_FEATURES)
|
||||||
add_definitions(-DPOWER8_VSX_ADLER32)
|
add_definitions(-DHAVE_SYS_AUXV_H)
|
||||||
add_definitions(-DPOWER8_VSX_SLIDEHASH)
|
|
||||||
|
|
||||||
list(APPEND ZLIB_ARCH_HDRS ${ARCHDIR}/power.h)
|
if(POWER9)
|
||||||
list(APPEND ZLIB_ARCH_SRCS ${ARCHDIR}/power.c)
|
add_definitions(-DPOWER9)
|
||||||
set(POWER8_SRCS ${ARCHDIR}/adler32_power8.c ${ARCHDIR}/slide_hash_power8.c)
|
else()
|
||||||
|
add_definitions(-DPOWER8)
|
||||||
|
add_definitions(-DPOWER8_VSX)
|
||||||
|
add_definitions(-DPOWER8_VSX_CRC32)
|
||||||
|
endif()
|
||||||
|
|
||||||
|
list(APPEND ZLIB_ARCH_HDRS ${ARCHDIR}/power_features.h)
|
||||||
|
list(APPEND ZLIB_ARCH_SRCS ${ARCHDIR}/power_features.c)
|
||||||
|
set(POWER8_SRCS ${ARCHDIR}/adler32_power8.c ${ARCHDIR}/chunkset_power8.c ${ARCHDIR}/slide_hash_power8.c)
|
||||||
|
list(APPEND POWER8_SRCS ${ARCHDIR}/crc32_power8.c)
|
||||||
list(APPEND ZLIB_ARCH_SRCS ${POWER8_SRCS})
|
list(APPEND ZLIB_ARCH_SRCS ${POWER8_SRCS})
|
||||||
|
|
||||||
elseif(ARCH_AMD64)
|
elseif(ARCH_AMD64)
|
||||||
set(ARCHDIR "${SOURCE_DIR}/arch/x86")
|
set(ARCHDIR "${SOURCE_DIR}/arch/x86")
|
||||||
|
|
||||||
add_definitions(-DX86_FEATURES)
|
add_definitions(-DX86_FEATURES)
|
||||||
list(APPEND ZLIB_ARCH_HDRS ${ARCHDIR}/x86.h)
|
list(APPEND ZLIB_ARCH_HDRS ${ARCHDIR}/x86_features.h)
|
||||||
list(APPEND ZLIB_ARCH_SRCS ${ARCHDIR}/x86.c)
|
list(APPEND ZLIB_ARCH_SRCS ${ARCHDIR}/x86_features.c)
|
||||||
if(ENABLE_AVX2)
|
if(ENABLE_AVX2)
|
||||||
add_definitions(-DX86_AVX2 -DX86_AVX2_ADLER32 -DX86_AVX_CHUNKSET)
|
add_definitions(-DX86_AVX2)
|
||||||
set(AVX2_SRCS ${ARCHDIR}/slide_avx.c)
|
set(AVX2_SRCS ${ARCHDIR}/slide_hash_avx2.c)
|
||||||
list(APPEND AVX2_SRCS ${ARCHDIR}/chunkset_avx.c)
|
list(APPEND AVX2_SRCS ${ARCHDIR}/chunkset_avx2.c)
|
||||||
list(APPEND AVX2_SRCS ${ARCHDIR}/compare258_avx.c)
|
list(APPEND AVX2_SRCS ${ARCHDIR}/compare256_avx2.c)
|
||||||
list(APPEND AVX2_SRCS ${ARCHDIR}/adler32_avx.c)
|
list(APPEND AVX2_SRCS ${ARCHDIR}/adler32_avx2.c)
|
||||||
list(APPEND ZLIB_ARCH_SRCS ${AVX2_SRCS})
|
list(APPEND ZLIB_ARCH_SRCS ${AVX2_SRCS})
|
||||||
endif()
|
endif()
|
||||||
if(ENABLE_SSE42)
|
if(ENABLE_SSE42)
|
||||||
add_definitions(-DX86_SSE42_CRC_HASH)
|
add_definitions(-DX86_SSE42)
|
||||||
set(SSE42_SRCS ${ARCHDIR}/insert_string_sse.c)
|
set(SSE42_SRCS ${ARCHDIR}/adler32_sse42.c ${ARCHDIR}/insert_string_sse42.c)
|
||||||
list(APPEND ZLIB_ARCH_SRCS ${SSE42_SRCS})
|
|
||||||
add_definitions(-DX86_SSE42_CRC_INTRIN)
|
|
||||||
add_definitions(-DX86_SSE42_CMP_STR)
|
|
||||||
set(SSE42_SRCS ${ARCHDIR}/compare258_sse.c)
|
|
||||||
list(APPEND ZLIB_ARCH_SRCS ${SSE42_SRCS})
|
list(APPEND ZLIB_ARCH_SRCS ${SSE42_SRCS})
|
||||||
endif()
|
endif()
|
||||||
if(ENABLE_SSSE3)
|
if(ENABLE_SSSE3)
|
||||||
add_definitions(-DX86_SSSE3 -DX86_SSSE3_ADLER32)
|
add_definitions(-DX86_SSSE3)
|
||||||
set(SSSE3_SRCS ${ARCHDIR}/adler32_ssse3.c)
|
set(SSSE3_SRCS ${ARCHDIR}/adler32_ssse3.c ${ARCHDIR}/chunkset_ssse3.c)
|
||||||
list(APPEND ZLIB_ARCH_SRCS ${SSSE3_SRCS})
|
list(APPEND ZLIB_ARCH_SRCS ${SSSE3_SRCS})
|
||||||
endif()
|
endif()
|
||||||
if(ENABLE_PCLMULQDQ)
|
if(ENABLE_PCLMULQDQ)
|
||||||
add_definitions(-DX86_PCLMULQDQ_CRC)
|
add_definitions(-DX86_PCLMULQDQ_CRC)
|
||||||
set(PCLMULQDQ_SRCS ${ARCHDIR}/crc_folding.c)
|
set(PCLMULQDQ_SRCS ${ARCHDIR}/crc32_pclmulqdq.c)
|
||||||
list(APPEND ZLIB_ARCH_SRCS ${PCLMULQDQ_SRCS})
|
list(APPEND ZLIB_ARCH_SRCS ${PCLMULQDQ_SRCS})
|
||||||
endif()
|
endif()
|
||||||
|
|
||||||
add_definitions(-DX86_SSE2 -DX86_SSE2_CHUNKSET -DX86_SSE2_SLIDEHASH)
|
add_definitions(-DX86_SSE2)
|
||||||
set(SSE2_SRCS ${ARCHDIR}/chunkset_sse.c ${ARCHDIR}/slide_sse.c)
|
set(SSE2_SRCS ${ARCHDIR}/chunkset_sse2.c ${ARCHDIR}/compare256_sse2.c ${ARCHDIR}/slide_hash_sse2.c)
|
||||||
list(APPEND ZLIB_ARCH_SRCS ${SSE2_SRCS})
|
list(APPEND ZLIB_ARCH_SRCS ${SSE2_SRCS})
|
||||||
add_definitions(-DX86_NOCHECK_SSE2)
|
add_definitions(-DX86_NOCHECK_SSE2)
|
||||||
endif ()
|
endif ()
|
||||||
@ -106,39 +115,45 @@ generate_cmakein(${SOURCE_DIR}/zconf.h.in ${CMAKE_CURRENT_BINARY_DIR}/zconf.h.cm
|
|||||||
|
|
||||||
set(ZLIB_SRCS
|
set(ZLIB_SRCS
|
||||||
${SOURCE_DIR}/adler32.c
|
${SOURCE_DIR}/adler32.c
|
||||||
|
${SOURCE_DIR}/adler32_fold.c
|
||||||
${SOURCE_DIR}/chunkset.c
|
${SOURCE_DIR}/chunkset.c
|
||||||
${SOURCE_DIR}/compare258.c
|
${SOURCE_DIR}/compare256.c
|
||||||
${SOURCE_DIR}/compress.c
|
${SOURCE_DIR}/compress.c
|
||||||
${SOURCE_DIR}/crc32.c
|
${SOURCE_DIR}/cpu_features.c
|
||||||
${SOURCE_DIR}/crc32_comb.c
|
${SOURCE_DIR}/crc32_braid.c
|
||||||
|
${SOURCE_DIR}/crc32_braid_comb.c
|
||||||
|
${SOURCE_DIR}/crc32_fold.c
|
||||||
${SOURCE_DIR}/deflate.c
|
${SOURCE_DIR}/deflate.c
|
||||||
${SOURCE_DIR}/deflate_fast.c
|
${SOURCE_DIR}/deflate_fast.c
|
||||||
|
${SOURCE_DIR}/deflate_huff.c
|
||||||
${SOURCE_DIR}/deflate_medium.c
|
${SOURCE_DIR}/deflate_medium.c
|
||||||
${SOURCE_DIR}/deflate_quick.c
|
${SOURCE_DIR}/deflate_quick.c
|
||||||
|
${SOURCE_DIR}/deflate_rle.c
|
||||||
${SOURCE_DIR}/deflate_slow.c
|
${SOURCE_DIR}/deflate_slow.c
|
||||||
|
${SOURCE_DIR}/deflate_stored.c
|
||||||
${SOURCE_DIR}/functable.c
|
${SOURCE_DIR}/functable.c
|
||||||
${SOURCE_DIR}/infback.c
|
${SOURCE_DIR}/infback.c
|
||||||
${SOURCE_DIR}/inffast.c
|
|
||||||
${SOURCE_DIR}/inflate.c
|
${SOURCE_DIR}/inflate.c
|
||||||
${SOURCE_DIR}/inftrees.c
|
${SOURCE_DIR}/inftrees.c
|
||||||
${SOURCE_DIR}/insert_string.c
|
${SOURCE_DIR}/insert_string.c
|
||||||
|
${SOURCE_DIR}/insert_string_roll.c
|
||||||
|
${SOURCE_DIR}/slide_hash.c
|
||||||
${SOURCE_DIR}/trees.c
|
${SOURCE_DIR}/trees.c
|
||||||
${SOURCE_DIR}/uncompr.c
|
${SOURCE_DIR}/uncompr.c
|
||||||
${SOURCE_DIR}/zutil.c
|
${SOURCE_DIR}/zutil.c
|
||||||
|
)
|
||||||
|
|
||||||
|
set(ZLIB_GZFILE_SRCS
|
||||||
${SOURCE_DIR}/gzlib.c
|
${SOURCE_DIR}/gzlib.c
|
||||||
${SOURCE_DIR}/gzread.c
|
${CMAKE_CURRENT_BINARY_DIR}/gzread.c
|
||||||
${SOURCE_DIR}/gzwrite.c
|
${SOURCE_DIR}/gzwrite.c
|
||||||
)
|
)
|
||||||
|
|
||||||
set(ZLIB_ALL_SRCS ${ZLIB_SRCS} ${ZLIB_ARCH_SRCS})
|
set(ZLIB_ALL_SRCS ${ZLIB_SRCS} ${ZLIB_ARCH_SRCS} ${ZLIB_GZFILE_SRCS})
|
||||||
|
|
||||||
add_library(_zlib ${ZLIB_ALL_SRCS})
|
add_library(_zlib ${ZLIB_ALL_SRCS})
|
||||||
add_library(ch_contrib::zlib ALIAS _zlib)
|
add_library(ch_contrib::zlib ALIAS _zlib)
|
||||||
|
|
||||||
# https://github.com/zlib-ng/zlib-ng/pull/733
|
|
||||||
# This is disabed by default
|
|
||||||
add_compile_definitions(Z_TLS=__thread)
|
|
||||||
|
|
||||||
if(HAVE_UNISTD_H)
|
if(HAVE_UNISTD_H)
|
||||||
SET(ZCONF_UNISTD_LINE "#if 1 /* was set to #if 1 by configure/cmake/etc */")
|
SET(ZCONF_UNISTD_LINE "#if 1 /* was set to #if 1 by configure/cmake/etc */")
|
||||||
else()
|
else()
|
||||||
@ -153,6 +168,9 @@ endif()
|
|||||||
set(ZLIB_PC ${CMAKE_CURRENT_BINARY_DIR}/zlib.pc)
|
set(ZLIB_PC ${CMAKE_CURRENT_BINARY_DIR}/zlib.pc)
|
||||||
configure_file(${SOURCE_DIR}/zlib.pc.cmakein ${ZLIB_PC} @ONLY)
|
configure_file(${SOURCE_DIR}/zlib.pc.cmakein ${ZLIB_PC} @ONLY)
|
||||||
configure_file(${CMAKE_CURRENT_BINARY_DIR}/zconf.h.cmakein ${CMAKE_CURRENT_BINARY_DIR}/zconf.h @ONLY)
|
configure_file(${CMAKE_CURRENT_BINARY_DIR}/zconf.h.cmakein ${CMAKE_CURRENT_BINARY_DIR}/zconf.h @ONLY)
|
||||||
|
configure_file(${SOURCE_DIR}/zlib.h.in ${CMAKE_CURRENT_BINARY_DIR}/zlib.h @ONLY)
|
||||||
|
configure_file(${SOURCE_DIR}/zlib_name_mangling.h.in ${CMAKE_CURRENT_BINARY_DIR}/zlib_name_mangling.h @ONLY)
|
||||||
|
configure_file(${SOURCE_DIR}/gzread.c.in ${CMAKE_CURRENT_BINARY_DIR}/gzread.c @ONLY)
|
||||||
|
|
||||||
# We should use same defines when including zlib.h as used when zlib compiled
|
# We should use same defines when including zlib.h as used when zlib compiled
|
||||||
target_compile_definitions (_zlib PUBLIC ZLIB_COMPAT WITH_GZFILEOP)
|
target_compile_definitions (_zlib PUBLIC ZLIB_COMPAT WITH_GZFILEOP)
|
||||||
|
@ -26,7 +26,6 @@ sed -i '/onBrokenMarkdownLinks:/ s/ignore/error/g' docusaurus.config.js
|
|||||||
|
|
||||||
if [[ $# -lt 1 ]] || [[ "$1" == "--"* ]]; then
|
if [[ $# -lt 1 ]] || [[ "$1" == "--"* ]]; then
|
||||||
export CI=true
|
export CI=true
|
||||||
yarn install
|
|
||||||
exec yarn build "$@"
|
exec yarn build "$@"
|
||||||
fi
|
fi
|
||||||
|
|
||||||
|
@ -34,7 +34,7 @@ RUN arch=${TARGETARCH:-amd64} \
|
|||||||
# lts / testing / prestable / etc
|
# lts / testing / prestable / etc
|
||||||
ARG REPO_CHANNEL="stable"
|
ARG REPO_CHANNEL="stable"
|
||||||
ARG REPOSITORY="https://packages.clickhouse.com/tgz/${REPO_CHANNEL}"
|
ARG REPOSITORY="https://packages.clickhouse.com/tgz/${REPO_CHANNEL}"
|
||||||
ARG VERSION="24.6.2.17"
|
ARG VERSION="24.7.3.42"
|
||||||
ARG PACKAGES="clickhouse-keeper"
|
ARG PACKAGES="clickhouse-keeper"
|
||||||
ARG DIRECT_DOWNLOAD_URLS=""
|
ARG DIRECT_DOWNLOAD_URLS=""
|
||||||
|
|
||||||
|
@ -40,8 +40,6 @@ fi
|
|||||||
|
|
||||||
DATA_DIR="${CLICKHOUSE_DATA_DIR:-/var/lib/clickhouse}"
|
DATA_DIR="${CLICKHOUSE_DATA_DIR:-/var/lib/clickhouse}"
|
||||||
LOG_DIR="${LOG_DIR:-/var/log/clickhouse-keeper}"
|
LOG_DIR="${LOG_DIR:-/var/log/clickhouse-keeper}"
|
||||||
LOG_PATH="${LOG_DIR}/clickhouse-keeper.log"
|
|
||||||
ERROR_LOG_PATH="${LOG_DIR}/clickhouse-keeper.err.log"
|
|
||||||
COORDINATION_DIR="${DATA_DIR}/coordination"
|
COORDINATION_DIR="${DATA_DIR}/coordination"
|
||||||
COORDINATION_LOG_DIR="${DATA_DIR}/coordination/log"
|
COORDINATION_LOG_DIR="${DATA_DIR}/coordination/log"
|
||||||
COORDINATION_SNAPSHOT_DIR="${DATA_DIR}/coordination/snapshots"
|
COORDINATION_SNAPSHOT_DIR="${DATA_DIR}/coordination/snapshots"
|
||||||
@ -84,7 +82,7 @@ if [[ $# -lt 1 ]] || [[ "$1" == "--"* ]]; then
|
|||||||
|
|
||||||
# There is a config file. It is already tested with gosu (if it is readably by keeper user)
|
# There is a config file. It is already tested with gosu (if it is readably by keeper user)
|
||||||
if [ -f "$KEEPER_CONFIG" ]; then
|
if [ -f "$KEEPER_CONFIG" ]; then
|
||||||
exec $gosu /usr/bin/clickhouse-keeper --config-file="$KEEPER_CONFIG" --log-file="$LOG_PATH" --errorlog-file="$ERROR_LOG_PATH" "$@"
|
exec $gosu /usr/bin/clickhouse-keeper --config-file="$KEEPER_CONFIG" "$@"
|
||||||
fi
|
fi
|
||||||
|
|
||||||
# There is no config file. Will use embedded one
|
# There is no config file. Will use embedded one
|
||||||
|
@ -108,7 +108,8 @@ if [ -n "$MAKE_DEB" ]; then
|
|||||||
bash -x /build/packages/build
|
bash -x /build/packages/build
|
||||||
fi
|
fi
|
||||||
|
|
||||||
mv ./programs/clickhouse* /output || mv ./programs/*_fuzzer /output
|
mv ./programs/clickhouse* /output ||:
|
||||||
|
mv ./programs/*_fuzzer /output ||:
|
||||||
[ -x ./programs/self-extracting/clickhouse ] && mv ./programs/self-extracting/clickhouse /output
|
[ -x ./programs/self-extracting/clickhouse ] && mv ./programs/self-extracting/clickhouse /output
|
||||||
[ -x ./programs/self-extracting/clickhouse-stripped ] && mv ./programs/self-extracting/clickhouse-stripped /output
|
[ -x ./programs/self-extracting/clickhouse-stripped ] && mv ./programs/self-extracting/clickhouse-stripped /output
|
||||||
[ -x ./programs/self-extracting/clickhouse-keeper ] && mv ./programs/self-extracting/clickhouse-keeper /output
|
[ -x ./programs/self-extracting/clickhouse-keeper ] && mv ./programs/self-extracting/clickhouse-keeper /output
|
||||||
|
@ -1,3 +1,5 @@
|
|||||||
|
# docker build -t clickhouse/cctools .
|
||||||
|
|
||||||
# This is a hack to significantly reduce the build time of the clickhouse/binary-builder
|
# This is a hack to significantly reduce the build time of the clickhouse/binary-builder
|
||||||
# It's based on the assumption that we don't care of the cctools version so much
|
# It's based on the assumption that we don't care of the cctools version so much
|
||||||
# It event does not depend on the clickhouse/fasttest in the `docker/images.json`
|
# It event does not depend on the clickhouse/fasttest in the `docker/images.json`
|
||||||
@ -30,5 +32,29 @@ RUN git clone https://github.com/tpoechtrager/cctools-port.git \
|
|||||||
&& cd ../.. \
|
&& cd ../.. \
|
||||||
&& rm -rf cctools-port
|
&& rm -rf cctools-port
|
||||||
|
|
||||||
|
#
|
||||||
|
# GDB
|
||||||
|
#
|
||||||
|
# ld from binutils is 2.38, which has the following error:
|
||||||
|
#
|
||||||
|
# DWARF error: invalid or unhandled FORM value: 0x23
|
||||||
|
#
|
||||||
|
ENV LD=ld.lld-${LLVM_VERSION}
|
||||||
|
ARG GDB_VERSION=15.1
|
||||||
|
RUN apt-get update \
|
||||||
|
&& apt-get install --yes \
|
||||||
|
libgmp-dev \
|
||||||
|
libmpfr-dev \
|
||||||
|
&& apt-get clean \
|
||||||
|
&& rm -rf /var/lib/apt/lists/* /var/cache/debconf /tmp/*
|
||||||
|
RUN wget https://sourceware.org/pub/gdb/releases/gdb-$GDB_VERSION.tar.gz \
|
||||||
|
&& tar -xvf gdb-$GDB_VERSION.tar.gz \
|
||||||
|
&& cd gdb-$GDB_VERSION \
|
||||||
|
&& ./configure --prefix=/opt/gdb \
|
||||||
|
&& make -j $(nproc) \
|
||||||
|
&& make install \
|
||||||
|
&& rm -fr gdb-$GDB_VERSION gdb-$GDB_VERSION.tar.gz
|
||||||
|
|
||||||
FROM scratch
|
FROM scratch
|
||||||
COPY --from=builder /cctools /cctools
|
COPY --from=builder /cctools /cctools
|
||||||
|
COPY --from=builder /opt/gdb /opt/gdb
|
||||||
|
@ -32,7 +32,7 @@ RUN arch=${TARGETARCH:-amd64} \
|
|||||||
# lts / testing / prestable / etc
|
# lts / testing / prestable / etc
|
||||||
ARG REPO_CHANNEL="stable"
|
ARG REPO_CHANNEL="stable"
|
||||||
ARG REPOSITORY="https://packages.clickhouse.com/tgz/${REPO_CHANNEL}"
|
ARG REPOSITORY="https://packages.clickhouse.com/tgz/${REPO_CHANNEL}"
|
||||||
ARG VERSION="24.6.2.17"
|
ARG VERSION="24.7.3.42"
|
||||||
ARG PACKAGES="clickhouse-client clickhouse-server clickhouse-common-static"
|
ARG PACKAGES="clickhouse-client clickhouse-server clickhouse-common-static"
|
||||||
ARG DIRECT_DOWNLOAD_URLS=""
|
ARG DIRECT_DOWNLOAD_URLS=""
|
||||||
|
|
||||||
|
@ -28,7 +28,7 @@ RUN sed -i "s|http://archive.ubuntu.com|${apt_archive}|g" /etc/apt/sources.list
|
|||||||
|
|
||||||
ARG REPO_CHANNEL="stable"
|
ARG REPO_CHANNEL="stable"
|
||||||
ARG REPOSITORY="deb [signed-by=/usr/share/keyrings/clickhouse-keyring.gpg] https://packages.clickhouse.com/deb ${REPO_CHANNEL} main"
|
ARG REPOSITORY="deb [signed-by=/usr/share/keyrings/clickhouse-keyring.gpg] https://packages.clickhouse.com/deb ${REPO_CHANNEL} main"
|
||||||
ARG VERSION="24.6.2.17"
|
ARG VERSION="24.7.3.42"
|
||||||
ARG PACKAGES="clickhouse-client clickhouse-server clickhouse-common-static"
|
ARG PACKAGES="clickhouse-client clickhouse-server clickhouse-common-static"
|
||||||
|
|
||||||
#docker-official-library:off
|
#docker-official-library:off
|
||||||
|
@ -23,17 +23,23 @@ RUN apt-get update \
|
|||||||
# and MEMORY_LIMIT_EXCEEDED exceptions in Functional tests (total memory limit in Functional tests is ~55.24 GiB).
|
# and MEMORY_LIMIT_EXCEEDED exceptions in Functional tests (total memory limit in Functional tests is ~55.24 GiB).
|
||||||
# TSAN will flush shadow memory when reaching this limit.
|
# TSAN will flush shadow memory when reaching this limit.
|
||||||
# It may cause false-negatives, but it's better than OOM.
|
# It may cause false-negatives, but it's better than OOM.
|
||||||
RUN echo "TSAN_OPTIONS='verbosity=1000 halt_on_error=1 abort_on_error=1 history_size=7 memory_limit_mb=46080 second_deadlock_stack=1'" >> /etc/environment
|
# max_allocation_size_mb is set to 32GB, so we have much bigger chance to run into memory limit than the limitation of the sanitizers
|
||||||
RUN echo "UBSAN_OPTIONS='print_stacktrace=1'" >> /etc/environment
|
RUN echo "TSAN_OPTIONS='verbosity=1000 halt_on_error=1 abort_on_error=1 history_size=7 memory_limit_mb=46080 second_deadlock_stack=1 max_allocation_size_mb=32768'" >> /etc/environment
|
||||||
RUN echo "MSAN_OPTIONS='abort_on_error=1 poison_in_dtor=1'" >> /etc/environment
|
RUN echo "UBSAN_OPTIONS='print_stacktrace=1 max_allocation_size_mb=32768'" >> /etc/environment
|
||||||
RUN echo "LSAN_OPTIONS='suppressions=/usr/share/clickhouse-test/config/lsan_suppressions.txt'" >> /etc/environment
|
RUN echo "MSAN_OPTIONS='abort_on_error=1 poison_in_dtor=1 max_allocation_size_mb=32768'" >> /etc/environment
|
||||||
|
RUN echo "LSAN_OPTIONS='suppressions=/usr/share/clickhouse-test/config/lsan_suppressions.txt max_allocation_size_mb=32768'" >> /etc/environment
|
||||||
|
RUN echo "ASAN_OPTIONS='halt_on_error=1 abort_on_error=1'" >> /etc/environment
|
||||||
# Sanitizer options for current shell (not current, but the one that will be spawned on "docker run")
|
# Sanitizer options for current shell (not current, but the one that will be spawned on "docker run")
|
||||||
# (but w/o verbosity for TSAN, otherwise test.reference will not match)
|
# (but w/o verbosity for TSAN, otherwise test.reference will not match)
|
||||||
ENV TSAN_OPTIONS='halt_on_error=1 abort_on_error=1 history_size=7 memory_limit_mb=46080 second_deadlock_stack=1'
|
ENV TSAN_OPTIONS='halt_on_error=1 abort_on_error=1 history_size=7 memory_limit_mb=46080 second_deadlock_stack=1 max_allocation_size_mb=32768'
|
||||||
ENV UBSAN_OPTIONS='print_stacktrace=1'
|
ENV UBSAN_OPTIONS='print_stacktrace=1 max_allocation_size_mb=32768'
|
||||||
ENV MSAN_OPTIONS='abort_on_error=1 poison_in_dtor=1'
|
ENV MSAN_OPTIONS='abort_on_error=1 poison_in_dtor=1 max_allocation_size_mb=32768'
|
||||||
|
ENV LSAN_OPTIONS='max_allocation_size_mb=32768'
|
||||||
|
ENV ASAN_OPTIONS='halt_on_error=1 abort_on_error=1'
|
||||||
|
|
||||||
# for external_symbolizer_path
|
# for external_symbolizer_path, and also ensure that llvm-symbolizer really
|
||||||
|
# exists (since you don't want to fallback to addr2line, it is very slow)
|
||||||
|
RUN test -f /usr/bin/llvm-symbolizer-${LLVM_VERSION}
|
||||||
RUN ln -s /usr/bin/llvm-symbolizer-${LLVM_VERSION} /usr/bin/llvm-symbolizer
|
RUN ln -s /usr/bin/llvm-symbolizer-${LLVM_VERSION} /usr/bin/llvm-symbolizer
|
||||||
|
|
||||||
RUN echo "en_US.UTF-8 UTF-8" > /etc/locale.gen && locale-gen en_US.UTF-8
|
RUN echo "en_US.UTF-8 UTF-8" > /etc/locale.gen && locale-gen en_US.UTF-8
|
||||||
|
@ -218,6 +218,6 @@ function stop_logs_replication
|
|||||||
clickhouse-client --query "select database||'.'||table from system.tables where database = 'system' and (table like '%_sender' or table like '%_watcher')" | {
|
clickhouse-client --query "select database||'.'||table from system.tables where database = 'system' and (table like '%_sender' or table like '%_watcher')" | {
|
||||||
tee /dev/stderr
|
tee /dev/stderr
|
||||||
} | {
|
} | {
|
||||||
xargs -n1 -r -i clickhouse-client --query "drop table {}"
|
timeout --preserve-status --signal TERM --kill-after 5m 15m xargs -n1 -r -i clickhouse-client --query "drop table {}"
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -83,7 +83,7 @@ RUN arch=${TARGETARCH:-amd64} \
|
|||||||
|
|
||||||
# Give suid to gdb to grant it attach permissions
|
# Give suid to gdb to grant it attach permissions
|
||||||
# chmod 777 to make the container user independent
|
# chmod 777 to make the container user independent
|
||||||
RUN chmod u+s /usr/bin/gdb \
|
RUN chmod u+s /opt/gdb/bin/gdb \
|
||||||
&& mkdir -p /var/lib/clickhouse \
|
&& mkdir -p /var/lib/clickhouse \
|
||||||
&& chmod 777 /var/lib/clickhouse
|
&& chmod 777 /var/lib/clickhouse
|
||||||
|
|
||||||
|
@ -41,7 +41,7 @@ export FASTTEST_WORKSPACE
|
|||||||
export FASTTEST_SOURCE
|
export FASTTEST_SOURCE
|
||||||
export FASTTEST_BUILD
|
export FASTTEST_BUILD
|
||||||
export FASTTEST_DATA
|
export FASTTEST_DATA
|
||||||
export FASTTEST_OUT
|
export FASTTEST_OUTPUT
|
||||||
export PATH
|
export PATH
|
||||||
|
|
||||||
function ccache_status
|
function ccache_status
|
||||||
@ -256,19 +256,6 @@ function configure
|
|||||||
rm -f "$FASTTEST_DATA/config.d/secure_ports.xml"
|
rm -f "$FASTTEST_DATA/config.d/secure_ports.xml"
|
||||||
}
|
}
|
||||||
|
|
||||||
function timeout_with_logging() {
|
|
||||||
local exit_code=0
|
|
||||||
|
|
||||||
timeout -s TERM --preserve-status "${@}" || exit_code="${?}"
|
|
||||||
|
|
||||||
if [[ "${exit_code}" -eq "124" ]]
|
|
||||||
then
|
|
||||||
echo "The command 'timeout ${*}' has been killed by timeout"
|
|
||||||
fi
|
|
||||||
|
|
||||||
return $exit_code
|
|
||||||
}
|
|
||||||
|
|
||||||
function run_tests
|
function run_tests
|
||||||
{
|
{
|
||||||
clickhouse-server --version
|
clickhouse-server --version
|
||||||
@ -337,7 +324,7 @@ case "$stage" in
|
|||||||
configure 2>&1 | ts '%Y-%m-%d %H:%M:%S' | tee "$FASTTEST_OUTPUT/install_log.txt"
|
configure 2>&1 | ts '%Y-%m-%d %H:%M:%S' | tee "$FASTTEST_OUTPUT/install_log.txt"
|
||||||
;&
|
;&
|
||||||
"run_tests")
|
"run_tests")
|
||||||
timeout_with_logging 35m bash -c run_tests ||:
|
run_tests ||:
|
||||||
/process_functional_tests_result.py --in-results-dir "$FASTTEST_OUTPUT/" \
|
/process_functional_tests_result.py --in-results-dir "$FASTTEST_OUTPUT/" \
|
||||||
--out-results-file "$FASTTEST_OUTPUT/test_results.tsv" \
|
--out-results-file "$FASTTEST_OUTPUT/test_results.tsv" \
|
||||||
--out-status-file "$FASTTEST_OUTPUT/check_status.tsv" || echo -e "failure\tCannot parse results" > "$FASTTEST_OUTPUT/check_status.tsv"
|
--out-status-file "$FASTTEST_OUTPUT/check_status.tsv" || echo -e "failure\tCannot parse results" > "$FASTTEST_OUTPUT/check_status.tsv"
|
||||||
|
@ -20,7 +20,7 @@
|
|||||||
</max_execution_time>
|
</max_execution_time>
|
||||||
|
|
||||||
<max_memory_usage>
|
<max_memory_usage>
|
||||||
<max>10G</max>
|
<max>5G</max>
|
||||||
</max_memory_usage>
|
</max_memory_usage>
|
||||||
|
|
||||||
<table_function_remote_max_addresses>
|
<table_function_remote_max_addresses>
|
||||||
@ -28,9 +28,9 @@
|
|||||||
</table_function_remote_max_addresses>
|
</table_function_remote_max_addresses>
|
||||||
|
|
||||||
<!-- Don't waste cycles testing the old interpreter. Spend time in the new analyzer instead -->
|
<!-- Don't waste cycles testing the old interpreter. Spend time in the new analyzer instead -->
|
||||||
<allow_experimental_analyzer>
|
<enable_analyzer>
|
||||||
<readonly/>
|
<readonly/>
|
||||||
</allow_experimental_analyzer>
|
</enable_analyzer>
|
||||||
|
|
||||||
<!-- This feature is broken, deprecated and will be removed. We don't want more reports about it -->
|
<!-- This feature is broken, deprecated and will be removed. We don't want more reports about it -->
|
||||||
<allow_experimental_object_type>
|
<allow_experimental_object_type>
|
||||||
|
@ -193,54 +193,60 @@ function fuzz
|
|||||||
|
|
||||||
kill -0 $server_pid
|
kill -0 $server_pid
|
||||||
|
|
||||||
# Set follow-fork-mode to parent, because we attach to clickhouse-server, not to watchdog
|
IS_ASAN=$(clickhouse-client --query "SELECT count() FROM system.build_options WHERE name = 'CXX_FLAGS' AND position('sanitize=address' IN value)")
|
||||||
# and clickhouse-server can do fork-exec, for example, to run some bridge.
|
if [[ "$IS_ASAN" = "1" ]];
|
||||||
# Do not set nostop noprint for all signals, because some it may cause gdb to hang,
|
then
|
||||||
# explicitly ignore non-fatal signals that are used by server.
|
echo "ASAN build detected. Not using gdb since it disables LeakSanitizer detections"
|
||||||
# Number of SIGRTMIN can be determined only in runtime.
|
else
|
||||||
RTMIN=$(kill -l SIGRTMIN)
|
# Set follow-fork-mode to parent, because we attach to clickhouse-server, not to watchdog
|
||||||
echo "
|
# and clickhouse-server can do fork-exec, for example, to run some bridge.
|
||||||
set follow-fork-mode parent
|
# Do not set nostop noprint for all signals, because some it may cause gdb to hang,
|
||||||
handle SIGHUP nostop noprint pass
|
# explicitly ignore non-fatal signals that are used by server.
|
||||||
handle SIGINT nostop noprint pass
|
# Number of SIGRTMIN can be determined only in runtime.
|
||||||
handle SIGQUIT nostop noprint pass
|
RTMIN=$(kill -l SIGRTMIN)
|
||||||
handle SIGPIPE nostop noprint pass
|
echo "
|
||||||
handle SIGTERM nostop noprint pass
|
set follow-fork-mode parent
|
||||||
handle SIGUSR1 nostop noprint pass
|
handle SIGHUP nostop noprint pass
|
||||||
handle SIGUSR2 nostop noprint pass
|
handle SIGINT nostop noprint pass
|
||||||
handle SIGSEGV nostop pass
|
handle SIGQUIT nostop noprint pass
|
||||||
handle SIG$RTMIN nostop noprint pass
|
handle SIGPIPE nostop noprint pass
|
||||||
info signals
|
handle SIGTERM nostop noprint pass
|
||||||
continue
|
handle SIGUSR1 nostop noprint pass
|
||||||
backtrace full
|
handle SIGUSR2 nostop noprint pass
|
||||||
thread apply all backtrace full
|
handle SIG$RTMIN nostop noprint pass
|
||||||
info registers
|
info signals
|
||||||
disassemble /s
|
continue
|
||||||
up
|
backtrace full
|
||||||
disassemble /s
|
thread apply all backtrace full
|
||||||
up
|
info registers
|
||||||
disassemble /s
|
disassemble /s
|
||||||
p \"done\"
|
up
|
||||||
detach
|
disassemble /s
|
||||||
quit
|
up
|
||||||
" > script.gdb
|
disassemble /s
|
||||||
|
p \"done\"
|
||||||
|
detach
|
||||||
|
quit
|
||||||
|
" > script.gdb
|
||||||
|
|
||||||
gdb -batch -command script.gdb -p $server_pid &
|
gdb -batch -command script.gdb -p $server_pid &
|
||||||
sleep 5
|
sleep 5
|
||||||
# gdb will send SIGSTOP, spend some time loading debug info, and then send SIGCONT, wait for it (up to send_timeout, 300s)
|
# gdb will send SIGSTOP, spend some time loading debug info, and then send SIGCONT, wait for it (up to send_timeout, 300s)
|
||||||
time clickhouse-client --query "SELECT 'Connected to clickhouse-server after attaching gdb'" ||:
|
time clickhouse-client --query "SELECT 'Connected to clickhouse-server after attaching gdb'" ||:
|
||||||
|
|
||||||
|
# Check connectivity after we attach gdb, because it might cause the server
|
||||||
|
# to freeze, and the fuzzer will fail. In debug build, it can take a lot of time.
|
||||||
|
for _ in {1..180}
|
||||||
|
do
|
||||||
|
if clickhouse-client --query "select 1"
|
||||||
|
then
|
||||||
|
break
|
||||||
|
fi
|
||||||
|
sleep 1
|
||||||
|
done
|
||||||
|
kill -0 $server_pid # This checks that it is our server that is started and not some other one
|
||||||
|
fi
|
||||||
|
|
||||||
# Check connectivity after we attach gdb, because it might cause the server
|
|
||||||
# to freeze, and the fuzzer will fail. In debug build, it can take a lot of time.
|
|
||||||
for _ in {1..180}
|
|
||||||
do
|
|
||||||
if clickhouse-client --query "select 1"
|
|
||||||
then
|
|
||||||
break
|
|
||||||
fi
|
|
||||||
sleep 1
|
|
||||||
done
|
|
||||||
kill -0 $server_pid # This checks that it is our server that is started and not some other one
|
|
||||||
echo 'Server started and responded.'
|
echo 'Server started and responded.'
|
||||||
|
|
||||||
setup_logs_replication
|
setup_logs_replication
|
||||||
@ -265,8 +271,13 @@ quit
|
|||||||
# The fuzzer_pid belongs to the timeout process.
|
# The fuzzer_pid belongs to the timeout process.
|
||||||
actual_fuzzer_pid=$(ps -o pid= --ppid "$fuzzer_pid")
|
actual_fuzzer_pid=$(ps -o pid= --ppid "$fuzzer_pid")
|
||||||
|
|
||||||
echo "Attaching gdb to the fuzzer itself"
|
if [[ "$IS_ASAN" = "1" ]];
|
||||||
gdb -batch -command script.gdb -p $actual_fuzzer_pid &
|
then
|
||||||
|
echo "ASAN build detected. Not using gdb since it disables LeakSanitizer detections"
|
||||||
|
else
|
||||||
|
echo "Attaching gdb to the fuzzer itself"
|
||||||
|
gdb -batch -command script.gdb -p $actual_fuzzer_pid &
|
||||||
|
fi
|
||||||
|
|
||||||
# Wait for the fuzzer to complete.
|
# Wait for the fuzzer to complete.
|
||||||
# Note that the 'wait || ...' thing is required so that the script doesn't
|
# Note that the 'wait || ...' thing is required so that the script doesn't
|
||||||
|
@ -11,7 +11,6 @@ RUN apt-get update \
|
|||||||
curl \
|
curl \
|
||||||
default-jre \
|
default-jre \
|
||||||
g++ \
|
g++ \
|
||||||
gdb \
|
|
||||||
iproute2 \
|
iproute2 \
|
||||||
krb5-user \
|
krb5-user \
|
||||||
libicu-dev \
|
libicu-dev \
|
||||||
@ -73,3 +72,6 @@ maxClientCnxns=80' > /opt/zookeeper/conf/zoo.cfg && \
|
|||||||
|
|
||||||
ENV TZ=Etc/UTC
|
ENV TZ=Etc/UTC
|
||||||
RUN ln -snf /usr/share/zoneinfo/$TZ /etc/localtime && echo $TZ > /etc/timezone
|
RUN ln -snf /usr/share/zoneinfo/$TZ /etc/localtime && echo $TZ > /etc/timezone
|
||||||
|
|
||||||
|
COPY --from=clickhouse/cctools:0d6b90a7a490 /opt/gdb /opt/gdb
|
||||||
|
ENV PATH="/opt/gdb/bin:${PATH}"
|
||||||
|
@ -30,7 +30,6 @@ RUN apt-get update \
|
|||||||
luajit \
|
luajit \
|
||||||
libssl-dev \
|
libssl-dev \
|
||||||
libcurl4-openssl-dev \
|
libcurl4-openssl-dev \
|
||||||
gdb \
|
|
||||||
default-jdk \
|
default-jdk \
|
||||||
software-properties-common \
|
software-properties-common \
|
||||||
libkrb5-dev \
|
libkrb5-dev \
|
||||||
@ -87,6 +86,8 @@ COPY modprobe.sh /usr/local/bin/modprobe
|
|||||||
COPY dockerd-entrypoint.sh /usr/local/bin/
|
COPY dockerd-entrypoint.sh /usr/local/bin/
|
||||||
COPY misc/ /misc/
|
COPY misc/ /misc/
|
||||||
|
|
||||||
|
COPY --from=clickhouse/cctools:0d6b90a7a490 /opt/gdb /opt/gdb
|
||||||
|
ENV PATH="/opt/gdb/bin:${PATH}"
|
||||||
|
|
||||||
# Same options as in test/base/Dockerfile
|
# Same options as in test/base/Dockerfile
|
||||||
# (in case you need to override them in tests)
|
# (in case you need to override them in tests)
|
||||||
|
@ -74,6 +74,7 @@ protobuf==4.25.2
|
|||||||
psycopg2-binary==2.9.6
|
psycopg2-binary==2.9.6
|
||||||
py4j==0.10.9.5
|
py4j==0.10.9.5
|
||||||
py==1.11.0
|
py==1.11.0
|
||||||
|
pyarrow==17.0.0
|
||||||
pycparser==2.22
|
pycparser==2.22
|
||||||
pycryptodome==3.20.0
|
pycryptodome==3.20.0
|
||||||
pymongo==3.11.0
|
pymongo==3.11.0
|
||||||
|
@ -9,7 +9,6 @@ RUN apt-get update \
|
|||||||
curl \
|
curl \
|
||||||
dmidecode \
|
dmidecode \
|
||||||
g++ \
|
g++ \
|
||||||
gdb \
|
|
||||||
git \
|
git \
|
||||||
gnuplot \
|
gnuplot \
|
||||||
imagemagick \
|
imagemagick \
|
||||||
@ -42,6 +41,9 @@ RUN pip3 --no-cache-dir install -r requirements.txt
|
|||||||
|
|
||||||
COPY run.sh /
|
COPY run.sh /
|
||||||
|
|
||||||
|
COPY --from=clickhouse/cctools:0d6b90a7a490 /opt/gdb /opt/gdb
|
||||||
|
ENV PATH="/opt/gdb/bin:${PATH}"
|
||||||
|
|
||||||
CMD ["bash", "/run.sh"]
|
CMD ["bash", "/run.sh"]
|
||||||
|
|
||||||
# docker run --network=host --volume <workspace>:/workspace --volume=<output>:/output -e PR_TO_TEST=<> -e SHA_TO_TEST=<> clickhouse/performance-comparison
|
# docker run --network=host --volume <workspace>:/workspace --volume=<output>:/output -e PR_TO_TEST=<> -e SHA_TO_TEST=<> clickhouse/performance-comparison
|
||||||
|
@ -13,6 +13,7 @@ entry="/usr/share/clickhouse-test/performance/scripts/entrypoint.sh"
|
|||||||
# https://www.kernel.org/doc/Documentation/filesystems/tmpfs.txt
|
# https://www.kernel.org/doc/Documentation/filesystems/tmpfs.txt
|
||||||
# Double-escaped backslashes are a tribute to the engineering wonder of docker --
|
# Double-escaped backslashes are a tribute to the engineering wonder of docker --
|
||||||
# it gives '/bin/sh: 1: [bash,: not found' otherwise.
|
# it gives '/bin/sh: 1: [bash,: not found' otherwise.
|
||||||
|
numactl --hardware
|
||||||
node=$(( RANDOM % $(numactl --hardware | sed -n 's/^.*available:\(.*\)nodes.*$/\1/p') ));
|
node=$(( RANDOM % $(numactl --hardware | sed -n 's/^.*available:\(.*\)nodes.*$/\1/p') ));
|
||||||
echo Will bind to NUMA node $node;
|
echo Will bind to NUMA node $node;
|
||||||
numactl --cpunodebind=$node --membind=$node $entry
|
numactl --cpunodebind=$node --membind=$node $entry
|
||||||
|
@ -6,7 +6,7 @@ ARG apt_archive="http://archive.ubuntu.com"
|
|||||||
RUN sed -i "s|http://archive.ubuntu.com|$apt_archive|g" /etc/apt/sources.list
|
RUN sed -i "s|http://archive.ubuntu.com|$apt_archive|g" /etc/apt/sources.list
|
||||||
|
|
||||||
RUN apt-get update --yes \
|
RUN apt-get update --yes \
|
||||||
&& env DEBIAN_FRONTEND=noninteractive apt-get install wget git default-jdk maven python3 --yes --no-install-recommends \
|
&& env DEBIAN_FRONTEND=noninteractive apt-get install wget git python3 default-jdk maven --yes --no-install-recommends \
|
||||||
&& apt-get clean \
|
&& apt-get clean \
|
||||||
&& rm -rf /var/lib/apt/lists/* /var/cache/debconf /tmp/*
|
&& rm -rf /var/lib/apt/lists/* /var/cache/debconf /tmp/*
|
||||||
|
|
||||||
|
@ -35,7 +35,6 @@ RUN mkdir -p /tmp/clickhouse-odbc-tmp \
|
|||||||
|
|
||||||
|
|
||||||
ENV TZ=Europe/Amsterdam
|
ENV TZ=Europe/Amsterdam
|
||||||
ENV MAX_RUN_TIME=9000
|
|
||||||
RUN ln -snf /usr/share/zoneinfo/$TZ /etc/localtime && echo $TZ > /etc/timezone
|
RUN ln -snf /usr/share/zoneinfo/$TZ /etc/localtime && echo $TZ > /etc/timezone
|
||||||
|
|
||||||
ARG sqllogic_test_repo="https://github.com/gregrahn/sqllogictest.git"
|
ARG sqllogic_test_repo="https://github.com/gregrahn/sqllogictest.git"
|
||||||
|
@ -94,7 +94,7 @@ function run_tests()
|
|||||||
|
|
||||||
export -f run_tests
|
export -f run_tests
|
||||||
|
|
||||||
timeout "${MAX_RUN_TIME:-9000}" bash -c run_tests || echo "timeout reached" >&2
|
run_tests
|
||||||
|
|
||||||
#/process_functional_tests_result.py || echo -e "failure\tCannot parse results" > /test_output/check_status.tsv
|
#/process_functional_tests_result.py || echo -e "failure\tCannot parse results" > /test_output/check_status.tsv
|
||||||
|
|
||||||
|
@ -22,7 +22,6 @@ ARG sqltest_repo="https://github.com/elliotchance/sqltest/"
|
|||||||
RUN git clone ${sqltest_repo}
|
RUN git clone ${sqltest_repo}
|
||||||
|
|
||||||
ENV TZ=UTC
|
ENV TZ=UTC
|
||||||
ENV MAX_RUN_TIME=900
|
|
||||||
RUN ln -snf /usr/share/zoneinfo/$TZ /etc/localtime && echo $TZ > /etc/timezone
|
RUN ln -snf /usr/share/zoneinfo/$TZ /etc/localtime && echo $TZ > /etc/timezone
|
||||||
|
|
||||||
COPY run.sh /
|
COPY run.sh /
|
||||||
|
@ -4,9 +4,6 @@
|
|||||||
source /setup_export_logs.sh
|
source /setup_export_logs.sh
|
||||||
set -e -x
|
set -e -x
|
||||||
|
|
||||||
MAX_RUN_TIME=${MAX_RUN_TIME:-3600}
|
|
||||||
MAX_RUN_TIME=$((MAX_RUN_TIME == 0 ? 3600 : MAX_RUN_TIME))
|
|
||||||
|
|
||||||
# Choose random timezone for this test run
|
# Choose random timezone for this test run
|
||||||
TZ="$(rg -v '#' /usr/share/zoneinfo/zone.tab | awk '{print $3}' | shuf | head -n1)"
|
TZ="$(rg -v '#' /usr/share/zoneinfo/zone.tab | awk '{print $3}' | shuf | head -n1)"
|
||||||
echo "Choosen random timezone $TZ"
|
echo "Choosen random timezone $TZ"
|
||||||
@ -118,14 +115,11 @@ if [[ -n "$USE_DATABASE_REPLICATED" ]] && [[ "$USE_DATABASE_REPLICATED" -eq 1 ]]
|
|||||||
clickhouse-client --query "CREATE TABLE test.hits AS datasets.hits_v1"
|
clickhouse-client --query "CREATE TABLE test.hits AS datasets.hits_v1"
|
||||||
clickhouse-client --query "CREATE TABLE test.visits AS datasets.visits_v1"
|
clickhouse-client --query "CREATE TABLE test.visits AS datasets.visits_v1"
|
||||||
|
|
||||||
clickhouse-client --query "INSERT INTO test.hits SELECT * FROM datasets.hits_v1"
|
clickhouse-client --max_memory_usage 10G --query "INSERT INTO test.hits SELECT * FROM datasets.hits_v1"
|
||||||
clickhouse-client --query "INSERT INTO test.visits SELECT * FROM datasets.visits_v1"
|
clickhouse-client --max_memory_usage 10G --query "INSERT INTO test.visits SELECT * FROM datasets.visits_v1"
|
||||||
|
|
||||||
clickhouse-client --query "DROP TABLE datasets.hits_v1"
|
clickhouse-client --query "DROP TABLE datasets.hits_v1"
|
||||||
clickhouse-client --query "DROP TABLE datasets.visits_v1"
|
clickhouse-client --query "DROP TABLE datasets.visits_v1"
|
||||||
|
|
||||||
MAX_RUN_TIME=$((MAX_RUN_TIME < 9000 ? MAX_RUN_TIME : 9000)) # min(MAX_RUN_TIME, 2.5 hours)
|
|
||||||
MAX_RUN_TIME=$((MAX_RUN_TIME != 0 ? MAX_RUN_TIME : 9000)) # set to 2.5 hours if 0 (unlimited)
|
|
||||||
else
|
else
|
||||||
clickhouse-client --query "CREATE DATABASE test"
|
clickhouse-client --query "CREATE DATABASE test"
|
||||||
clickhouse-client --query "SHOW TABLES FROM test"
|
clickhouse-client --query "SHOW TABLES FROM test"
|
||||||
@ -191,8 +185,8 @@ else
|
|||||||
ENGINE = CollapsingMergeTree(Sign) PARTITION BY toYYYYMM(StartDate) ORDER BY (CounterID, StartDate, intHash32(UserID), VisitID)
|
ENGINE = CollapsingMergeTree(Sign) PARTITION BY toYYYYMM(StartDate) ORDER BY (CounterID, StartDate, intHash32(UserID), VisitID)
|
||||||
SAMPLE BY intHash32(UserID) SETTINGS index_granularity = 8192, storage_policy='s3_cache'"
|
SAMPLE BY intHash32(UserID) SETTINGS index_granularity = 8192, storage_policy='s3_cache'"
|
||||||
|
|
||||||
clickhouse-client --query "INSERT INTO test.hits SELECT * FROM datasets.hits_v1 SETTINGS enable_filesystem_cache_on_write_operations=0"
|
clickhouse-client --max_memory_usage 10G --query "INSERT INTO test.hits SELECT * FROM datasets.hits_v1 SETTINGS enable_filesystem_cache_on_write_operations=0, max_insert_threads=16"
|
||||||
clickhouse-client --query "INSERT INTO test.visits SELECT * FROM datasets.visits_v1 SETTINGS enable_filesystem_cache_on_write_operations=0"
|
clickhouse-client --max_memory_usage 10G --query "INSERT INTO test.visits SELECT * FROM datasets.visits_v1 SETTINGS enable_filesystem_cache_on_write_operations=0, max_insert_threads=16"
|
||||||
clickhouse-client --query "DROP TABLE datasets.visits_v1 SYNC"
|
clickhouse-client --query "DROP TABLE datasets.visits_v1 SYNC"
|
||||||
clickhouse-client --query "DROP TABLE datasets.hits_v1 SYNC"
|
clickhouse-client --query "DROP TABLE datasets.hits_v1 SYNC"
|
||||||
else
|
else
|
||||||
@ -200,7 +194,8 @@ else
|
|||||||
clickhouse-client --query "RENAME TABLE datasets.visits_v1 TO test.visits"
|
clickhouse-client --query "RENAME TABLE datasets.visits_v1 TO test.visits"
|
||||||
fi
|
fi
|
||||||
clickhouse-client --query "CREATE TABLE test.hits_s3 (WatchID UInt64, JavaEnable UInt8, Title String, GoodEvent Int16, EventTime DateTime, EventDate Date, CounterID UInt32, ClientIP UInt32, ClientIP6 FixedString(16), RegionID UInt32, UserID UInt64, CounterClass Int8, OS UInt8, UserAgent UInt8, URL String, Referer String, URLDomain String, RefererDomain String, Refresh UInt8, IsRobot UInt8, RefererCategories Array(UInt16), URLCategories Array(UInt16), URLRegions Array(UInt32), RefererRegions Array(UInt32), ResolutionWidth UInt16, ResolutionHeight UInt16, ResolutionDepth UInt8, FlashMajor UInt8, FlashMinor UInt8, FlashMinor2 String, NetMajor UInt8, NetMinor UInt8, UserAgentMajor UInt16, UserAgentMinor FixedString(2), CookieEnable UInt8, JavascriptEnable UInt8, IsMobile UInt8, MobilePhone UInt8, MobilePhoneModel String, Params String, IPNetworkID UInt32, TraficSourceID Int8, SearchEngineID UInt16, SearchPhrase String, AdvEngineID UInt8, IsArtifical UInt8, WindowClientWidth UInt16, WindowClientHeight UInt16, ClientTimeZone Int16, ClientEventTime DateTime, SilverlightVersion1 UInt8, SilverlightVersion2 UInt8, SilverlightVersion3 UInt32, SilverlightVersion4 UInt16, PageCharset String, CodeVersion UInt32, IsLink UInt8, IsDownload UInt8, IsNotBounce UInt8, FUniqID UInt64, HID UInt32, IsOldCounter UInt8, IsEvent UInt8, IsParameter UInt8, DontCountHits UInt8, WithHash UInt8, HitColor FixedString(1), UTCEventTime DateTime, Age UInt8, Sex UInt8, Income UInt8, Interests UInt16, Robotness UInt8, GeneralInterests Array(UInt16), RemoteIP UInt32, RemoteIP6 FixedString(16), WindowName Int32, OpenerName Int32, HistoryLength Int16, BrowserLanguage FixedString(2), BrowserCountry FixedString(2), SocialNetwork String, SocialAction String, HTTPError UInt16, SendTiming Int32, DNSTiming Int32, ConnectTiming Int32, ResponseStartTiming Int32, ResponseEndTiming Int32, FetchTiming Int32, RedirectTiming Int32, DOMInteractiveTiming Int32, DOMContentLoadedTiming Int32, DOMCompleteTiming Int32, LoadEventStartTiming Int32, LoadEventEndTiming Int32, NSToDOMContentLoadedTiming Int32, FirstPaintTiming Int32, RedirectCount Int8, SocialSourceNetworkID UInt8, SocialSourcePage String, ParamPrice Int64, ParamOrderID String, ParamCurrency FixedString(3), ParamCurrencyID UInt16, GoalsReached Array(UInt32), OpenstatServiceName String, OpenstatCampaignID String, OpenstatAdID String, OpenstatSourceID String, UTMSource String, UTMMedium String, UTMCampaign String, UTMContent String, UTMTerm String, FromTag String, HasGCLID UInt8, RefererHash UInt64, URLHash UInt64, CLID UInt32, YCLID UInt64, ShareService String, ShareURL String, ShareTitle String, ParsedParams Nested(Key1 String, Key2 String, Key3 String, Key4 String, Key5 String, ValueDouble Float64), IslandID FixedString(16), RequestNum UInt32, RequestTry UInt8) ENGINE = MergeTree() PARTITION BY toYYYYMM(EventDate) ORDER BY (CounterID, EventDate, intHash32(UserID)) SAMPLE BY intHash32(UserID) SETTINGS index_granularity = 8192, storage_policy='s3_cache'"
|
clickhouse-client --query "CREATE TABLE test.hits_s3 (WatchID UInt64, JavaEnable UInt8, Title String, GoodEvent Int16, EventTime DateTime, EventDate Date, CounterID UInt32, ClientIP UInt32, ClientIP6 FixedString(16), RegionID UInt32, UserID UInt64, CounterClass Int8, OS UInt8, UserAgent UInt8, URL String, Referer String, URLDomain String, RefererDomain String, Refresh UInt8, IsRobot UInt8, RefererCategories Array(UInt16), URLCategories Array(UInt16), URLRegions Array(UInt32), RefererRegions Array(UInt32), ResolutionWidth UInt16, ResolutionHeight UInt16, ResolutionDepth UInt8, FlashMajor UInt8, FlashMinor UInt8, FlashMinor2 String, NetMajor UInt8, NetMinor UInt8, UserAgentMajor UInt16, UserAgentMinor FixedString(2), CookieEnable UInt8, JavascriptEnable UInt8, IsMobile UInt8, MobilePhone UInt8, MobilePhoneModel String, Params String, IPNetworkID UInt32, TraficSourceID Int8, SearchEngineID UInt16, SearchPhrase String, AdvEngineID UInt8, IsArtifical UInt8, WindowClientWidth UInt16, WindowClientHeight UInt16, ClientTimeZone Int16, ClientEventTime DateTime, SilverlightVersion1 UInt8, SilverlightVersion2 UInt8, SilverlightVersion3 UInt32, SilverlightVersion4 UInt16, PageCharset String, CodeVersion UInt32, IsLink UInt8, IsDownload UInt8, IsNotBounce UInt8, FUniqID UInt64, HID UInt32, IsOldCounter UInt8, IsEvent UInt8, IsParameter UInt8, DontCountHits UInt8, WithHash UInt8, HitColor FixedString(1), UTCEventTime DateTime, Age UInt8, Sex UInt8, Income UInt8, Interests UInt16, Robotness UInt8, GeneralInterests Array(UInt16), RemoteIP UInt32, RemoteIP6 FixedString(16), WindowName Int32, OpenerName Int32, HistoryLength Int16, BrowserLanguage FixedString(2), BrowserCountry FixedString(2), SocialNetwork String, SocialAction String, HTTPError UInt16, SendTiming Int32, DNSTiming Int32, ConnectTiming Int32, ResponseStartTiming Int32, ResponseEndTiming Int32, FetchTiming Int32, RedirectTiming Int32, DOMInteractiveTiming Int32, DOMContentLoadedTiming Int32, DOMCompleteTiming Int32, LoadEventStartTiming Int32, LoadEventEndTiming Int32, NSToDOMContentLoadedTiming Int32, FirstPaintTiming Int32, RedirectCount Int8, SocialSourceNetworkID UInt8, SocialSourcePage String, ParamPrice Int64, ParamOrderID String, ParamCurrency FixedString(3), ParamCurrencyID UInt16, GoalsReached Array(UInt32), OpenstatServiceName String, OpenstatCampaignID String, OpenstatAdID String, OpenstatSourceID String, UTMSource String, UTMMedium String, UTMCampaign String, UTMContent String, UTMTerm String, FromTag String, HasGCLID UInt8, RefererHash UInt64, URLHash UInt64, CLID UInt32, YCLID UInt64, ShareService String, ShareURL String, ShareTitle String, ParsedParams Nested(Key1 String, Key2 String, Key3 String, Key4 String, Key5 String, ValueDouble Float64), IslandID FixedString(16), RequestNum UInt32, RequestTry UInt8) ENGINE = MergeTree() PARTITION BY toYYYYMM(EventDate) ORDER BY (CounterID, EventDate, intHash32(UserID)) SAMPLE BY intHash32(UserID) SETTINGS index_granularity = 8192, storage_policy='s3_cache'"
|
||||||
clickhouse-client --query "INSERT INTO test.hits_s3 SELECT * FROM test.hits SETTINGS enable_filesystem_cache_on_write_operations=0"
|
# AWS S3 is very inefficient, so increase memory even further:
|
||||||
|
clickhouse-client --max_memory_usage 30G --max_memory_usage_for_user 30G --query "INSERT INTO test.hits_s3 SELECT * FROM test.hits SETTINGS enable_filesystem_cache_on_write_operations=0, max_insert_threads=16"
|
||||||
fi
|
fi
|
||||||
|
|
||||||
clickhouse-client --query "SHOW TABLES FROM test"
|
clickhouse-client --query "SHOW TABLES FROM test"
|
||||||
@ -232,35 +227,32 @@ function run_tests()
|
|||||||
|
|
||||||
set +e
|
set +e
|
||||||
|
|
||||||
|
TEST_ARGS=(
|
||||||
|
-j 2
|
||||||
|
--testname
|
||||||
|
--shard
|
||||||
|
--zookeeper
|
||||||
|
--check-zookeeper-session
|
||||||
|
--no-stateless
|
||||||
|
--hung-check
|
||||||
|
--print-time
|
||||||
|
--capture-client-stacktrace
|
||||||
|
"${ADDITIONAL_OPTIONS[@]}"
|
||||||
|
"$SKIP_TESTS_OPTION"
|
||||||
|
)
|
||||||
if [[ -n "$USE_PARALLEL_REPLICAS" ]] && [[ "$USE_PARALLEL_REPLICAS" -eq 1 ]]; then
|
if [[ -n "$USE_PARALLEL_REPLICAS" ]] && [[ "$USE_PARALLEL_REPLICAS" -eq 1 ]]; then
|
||||||
clickhouse-test --client="clickhouse-client --allow_experimental_parallel_reading_from_replicas=1 --parallel_replicas_for_non_replicated_merge_tree=1 \
|
TEST_ARGS+=(
|
||||||
--max_parallel_replicas=100 --cluster_for_parallel_replicas='parallel_replicas'" \
|
--client="clickhouse-client --allow_experimental_parallel_reading_from_replicas=1 --parallel_replicas_for_non_replicated_merge_tree=1 --max_parallel_replicas=100 --cluster_for_parallel_replicas='parallel_replicas'"
|
||||||
-j 2 --testname --shard --zookeeper --check-zookeeper-session --no-stateless --no-parallel-replicas --hung-check --print-time "${ADDITIONAL_OPTIONS[@]}" \
|
--no-parallel-replicas
|
||||||
"$SKIP_TESTS_OPTION" 2>&1 | ts '%Y-%m-%d %H:%M:%S' | tee test_output/test_result.txt
|
)
|
||||||
else
|
|
||||||
clickhouse-test -j 2 --testname --shard --zookeeper --check-zookeeper-session --no-stateless --hung-check --print-time "${ADDITIONAL_OPTIONS[@]}" \
|
|
||||||
"$SKIP_TESTS_OPTION" 2>&1 | ts '%Y-%m-%d %H:%M:%S' | tee test_output/test_result.txt
|
|
||||||
fi
|
fi
|
||||||
|
clickhouse-test "${TEST_ARGS[@]}" 2>&1 | ts '%Y-%m-%d %H:%M:%S' | tee test_output/test_result.txt
|
||||||
set -e
|
set -e
|
||||||
}
|
}
|
||||||
|
|
||||||
export -f run_tests
|
export -f run_tests
|
||||||
|
|
||||||
function timeout_with_logging() {
|
run_tests ||:
|
||||||
local exit_code=0
|
|
||||||
|
|
||||||
timeout -s TERM --preserve-status "${@}" || exit_code="${?}"
|
|
||||||
|
|
||||||
if [[ "${exit_code}" -eq "124" ]]
|
|
||||||
then
|
|
||||||
echo "The command 'timeout ${*}' has been killed by timeout"
|
|
||||||
fi
|
|
||||||
|
|
||||||
return $exit_code
|
|
||||||
}
|
|
||||||
|
|
||||||
TIMEOUT=$((MAX_RUN_TIME - 700))
|
|
||||||
timeout_with_logging "$TIMEOUT" bash -c run_tests ||:
|
|
||||||
|
|
||||||
echo "Files in current directory"
|
echo "Files in current directory"
|
||||||
ls -la ./
|
ls -la ./
|
||||||
|
@ -65,12 +65,11 @@ ENV TZ=Europe/Amsterdam
|
|||||||
RUN ln -snf /usr/share/zoneinfo/$TZ /etc/localtime && echo $TZ > /etc/timezone
|
RUN ln -snf /usr/share/zoneinfo/$TZ /etc/localtime && echo $TZ > /etc/timezone
|
||||||
|
|
||||||
ENV NUM_TRIES=1
|
ENV NUM_TRIES=1
|
||||||
ENV MAX_RUN_TIME=0
|
|
||||||
|
|
||||||
# Unrelated to vars in setup_minio.sh, but should be the same there
|
# Unrelated to vars in setup_minio.sh, but should be the same there
|
||||||
# to have the same binaries for local running scenario
|
# to have the same binaries for local running scenario
|
||||||
ARG MINIO_SERVER_VERSION=2022-01-03T18-22-58Z
|
ARG MINIO_SERVER_VERSION=2024-08-03T04-33-23Z
|
||||||
ARG MINIO_CLIENT_VERSION=2022-01-05T23-52-51Z
|
ARG MINIO_CLIENT_VERSION=2024-07-31T15-58-33Z
|
||||||
ARG TARGETARCH
|
ARG TARGETARCH
|
||||||
|
|
||||||
# Download Minio-related binaries
|
# Download Minio-related binaries
|
||||||
|
@ -5,44 +5,53 @@ source /utils.lib
|
|||||||
|
|
||||||
function attach_gdb_to_clickhouse()
|
function attach_gdb_to_clickhouse()
|
||||||
{
|
{
|
||||||
# Set follow-fork-mode to parent, because we attach to clickhouse-server, not to watchdog
|
IS_ASAN=$(clickhouse-client --query "SELECT count() FROM system.build_options WHERE name = 'CXX_FLAGS' AND position('sanitize=address' IN value)")
|
||||||
# and clickhouse-server can do fork-exec, for example, to run some bridge.
|
if [[ "$IS_ASAN" = "1" ]];
|
||||||
# Do not set nostop noprint for all signals, because some it may cause gdb to hang,
|
then
|
||||||
# explicitly ignore non-fatal signals that are used by server.
|
echo "ASAN build detected. Not using gdb since it disables LeakSanitizer detections"
|
||||||
# Number of SIGRTMIN can be determined only in runtime.
|
else
|
||||||
RTMIN=$(kill -l SIGRTMIN)
|
# Set follow-fork-mode to parent, because we attach to clickhouse-server, not to watchdog
|
||||||
echo "
|
# and clickhouse-server can do fork-exec, for example, to run some bridge.
|
||||||
set follow-fork-mode parent
|
# Do not set nostop noprint for all signals, because some it may cause gdb to hang,
|
||||||
handle SIGHUP nostop noprint pass
|
# explicitly ignore non-fatal signals that are used by server.
|
||||||
handle SIGINT nostop noprint pass
|
# Number of SIGRTMIN can be determined only in runtime.
|
||||||
handle SIGQUIT nostop noprint pass
|
RTMIN=$(kill -l SIGRTMIN)
|
||||||
handle SIGPIPE nostop noprint pass
|
# shellcheck disable=SC2016
|
||||||
handle SIGTERM nostop noprint pass
|
echo "
|
||||||
handle SIGUSR1 nostop noprint pass
|
set follow-fork-mode parent
|
||||||
handle SIGUSR2 nostop noprint pass
|
handle SIGHUP nostop noprint pass
|
||||||
handle SIGSEGV nostop pass
|
handle SIGINT nostop noprint pass
|
||||||
handle SIG$RTMIN nostop noprint pass
|
handle SIGQUIT nostop noprint pass
|
||||||
info signals
|
handle SIGPIPE nostop noprint pass
|
||||||
continue
|
handle SIGTERM nostop noprint pass
|
||||||
backtrace full
|
handle SIGUSR1 nostop noprint pass
|
||||||
thread apply all backtrace full
|
handle SIGUSR2 nostop noprint pass
|
||||||
info registers
|
handle SIG$RTMIN nostop noprint pass
|
||||||
disassemble /s
|
info signals
|
||||||
up
|
continue
|
||||||
disassemble /s
|
backtrace full
|
||||||
up
|
info registers
|
||||||
disassemble /s
|
p "top 1 KiB of the stack:"
|
||||||
p \"done\"
|
p/x *(uint64_t[128]*)"'$sp'"
|
||||||
detach
|
maintenance info sections
|
||||||
quit
|
thread apply all backtrace full
|
||||||
" > script.gdb
|
disassemble /s
|
||||||
|
up
|
||||||
|
disassemble /s
|
||||||
|
up
|
||||||
|
disassemble /s
|
||||||
|
p \"done\"
|
||||||
|
detach
|
||||||
|
quit
|
||||||
|
" > script.gdb
|
||||||
|
|
||||||
# FIXME Hung check may work incorrectly because of attached gdb
|
# FIXME Hung check may work incorrectly because of attached gdb
|
||||||
# We cannot attach another gdb to get stacktraces if some queries hung
|
# We cannot attach another gdb to get stacktraces if some queries hung
|
||||||
gdb -batch -command script.gdb -p "$(cat /var/run/clickhouse-server/clickhouse-server.pid)" | ts '%Y-%m-%d %H:%M:%S' >> /test_output/gdb.log &
|
gdb -batch -command script.gdb -p "$(cat /var/run/clickhouse-server/clickhouse-server.pid)" | ts '%Y-%m-%d %H:%M:%S' >> /test_output/gdb.log &
|
||||||
sleep 5
|
sleep 5
|
||||||
# gdb will send SIGSTOP, spend some time loading debug info and then send SIGCONT, wait for it (up to send_timeout, 300s)
|
# gdb will send SIGSTOP, spend some time loading debug info and then send SIGCONT, wait for it (up to send_timeout, 300s)
|
||||||
run_with_retry 60 clickhouse-client --query "SELECT 'Connected to clickhouse-server after attaching gdb'"
|
run_with_retry 60 clickhouse-client --query "SELECT 'Connected to clickhouse-server after attaching gdb'"
|
||||||
|
fi
|
||||||
}
|
}
|
||||||
|
|
||||||
# vi: ft=bash
|
# vi: ft=bash
|
||||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user