mirror of
https://github.com/ClickHouse/ClickHouse.git
synced 2024-11-25 00:52:02 +00:00
Merge branch 'master' into mongodb_refactoring
This commit is contained in:
commit
c23e3d8663
@ -27,6 +27,8 @@ Checks: [
|
|||||||
'-bugprone-not-null-terminated-result',
|
'-bugprone-not-null-terminated-result',
|
||||||
'-bugprone-reserved-identifier', # useful but too slow, TODO retry when https://reviews.llvm.org/rG1c282052624f9d0bd273bde0b47b30c96699c6c7 is merged
|
'-bugprone-reserved-identifier', # useful but too slow, TODO retry when https://reviews.llvm.org/rG1c282052624f9d0bd273bde0b47b30c96699c6c7 is merged
|
||||||
'-bugprone-unchecked-optional-access',
|
'-bugprone-unchecked-optional-access',
|
||||||
|
'-bugprone-crtp-constructor-accessibility',
|
||||||
|
'-bugprone-suspicious-stringview-data-usage',
|
||||||
|
|
||||||
'-cert-dcl16-c',
|
'-cert-dcl16-c',
|
||||||
'-cert-dcl37-c',
|
'-cert-dcl37-c',
|
||||||
@ -36,6 +38,7 @@ Checks: [
|
|||||||
'-cert-msc51-cpp',
|
'-cert-msc51-cpp',
|
||||||
'-cert-oop54-cpp',
|
'-cert-oop54-cpp',
|
||||||
'-cert-oop57-cpp',
|
'-cert-oop57-cpp',
|
||||||
|
'-cert-err33-c', # Misreports on clang-19: it warns about all functions containing 'remove' in the name, not only about the standard library.
|
||||||
|
|
||||||
'-clang-analyzer-optin.performance.Padding',
|
'-clang-analyzer-optin.performance.Padding',
|
||||||
|
|
||||||
@ -99,6 +102,7 @@ Checks: [
|
|||||||
'-modernize-use-emplace',
|
'-modernize-use-emplace',
|
||||||
'-modernize-use-nodiscard',
|
'-modernize-use-nodiscard',
|
||||||
'-modernize-use-trailing-return-type',
|
'-modernize-use-trailing-return-type',
|
||||||
|
'-modernize-use-designated-initializers',
|
||||||
|
|
||||||
'-performance-enum-size',
|
'-performance-enum-size',
|
||||||
'-performance-inefficient-string-concatenation',
|
'-performance-inefficient-string-concatenation',
|
||||||
|
@ -13,3 +13,6 @@
|
|||||||
# dbms/ → src/
|
# dbms/ → src/
|
||||||
# (though it is unlikely that you will see it in blame)
|
# (though it is unlikely that you will see it in blame)
|
||||||
06446b4f08a142d6f1bc30664c47ded88ab51782
|
06446b4f08a142d6f1bc30664c47ded88ab51782
|
||||||
|
|
||||||
|
# Applied Black formatter for Python code
|
||||||
|
e6f5a3f98b21ba99cf274a9833797889e020a2b3
|
||||||
|
1
.gitattributes
vendored
1
.gitattributes
vendored
@ -2,3 +2,4 @@ contrib/* linguist-vendored
|
|||||||
*.h linguist-language=C++
|
*.h linguist-language=C++
|
||||||
tests/queries/0_stateless/data_json/* binary
|
tests/queries/0_stateless/data_json/* binary
|
||||||
tests/queries/0_stateless/*.reference -crlf
|
tests/queries/0_stateless/*.reference -crlf
|
||||||
|
src/Core/SettingsChangesHistory.cpp merge=union
|
||||||
|
1
.github/actionlint.yml
vendored
1
.github/actionlint.yml
vendored
@ -7,3 +7,4 @@ self-hosted-runner:
|
|||||||
- stress-tester
|
- stress-tester
|
||||||
- style-checker
|
- style-checker
|
||||||
- style-checker-aarch64
|
- style-checker-aarch64
|
||||||
|
- release-maker
|
||||||
|
168
.github/actions/release/action.yml
vendored
Normal file
168
.github/actions/release/action.yml
vendored
Normal file
@ -0,0 +1,168 @@
|
|||||||
|
name: Release
|
||||||
|
|
||||||
|
description: Makes patch releases and creates new release branch
|
||||||
|
|
||||||
|
inputs:
|
||||||
|
ref:
|
||||||
|
description: 'Git reference (branch or commit sha) from which to create the release'
|
||||||
|
required: true
|
||||||
|
type: string
|
||||||
|
type:
|
||||||
|
description: 'The type of release: "new" for a new release or "patch" for a patch release'
|
||||||
|
required: true
|
||||||
|
type: choice
|
||||||
|
options:
|
||||||
|
- patch
|
||||||
|
- new
|
||||||
|
dry-run:
|
||||||
|
description: 'Dry run'
|
||||||
|
required: false
|
||||||
|
default: true
|
||||||
|
type: boolean
|
||||||
|
token:
|
||||||
|
required: true
|
||||||
|
type: string
|
||||||
|
|
||||||
|
runs:
|
||||||
|
using: "composite"
|
||||||
|
steps:
|
||||||
|
- name: Prepare Release Info
|
||||||
|
shell: bash
|
||||||
|
run: |
|
||||||
|
python3 ./tests/ci/create_release.py --prepare-release-info \
|
||||||
|
--ref ${{ inputs.ref }} --release-type ${{ inputs.type }} \
|
||||||
|
${{ inputs.dry-run && '--dry-run' || '' }}
|
||||||
|
echo "::group::Release Info"
|
||||||
|
python3 -m json.tool /tmp/release_info.json
|
||||||
|
echo "::endgroup::"
|
||||||
|
release_tag=$(jq -r '.release_tag' /tmp/release_info.json)
|
||||||
|
commit_sha=$(jq -r '.commit_sha' /tmp/release_info.json)
|
||||||
|
echo "Release Tag: $release_tag"
|
||||||
|
echo "RELEASE_TAG=$release_tag" >> "$GITHUB_ENV"
|
||||||
|
echo "COMMIT_SHA=$commit_sha" >> "$GITHUB_ENV"
|
||||||
|
- name: Download All Release Artifacts
|
||||||
|
if: ${{ inputs.type == 'patch' }}
|
||||||
|
shell: bash
|
||||||
|
run: |
|
||||||
|
python3 ./tests/ci/create_release.py --download-packages ${{ inputs.dry-run && '--dry-run' || '' }}
|
||||||
|
- name: Push Git Tag for the Release
|
||||||
|
shell: bash
|
||||||
|
run: |
|
||||||
|
python3 ./tests/ci/create_release.py --push-release-tag ${{ inputs.dry-run && '--dry-run' || '' }}
|
||||||
|
- name: Push New Release Branch
|
||||||
|
if: ${{ inputs.type == 'new' }}
|
||||||
|
shell: bash
|
||||||
|
run: |
|
||||||
|
python3 ./tests/ci/create_release.py --push-new-release-branch ${{ inputs.dry-run && '--dry-run' || '' }}
|
||||||
|
- name: Bump CH Version and Update Contributors' List
|
||||||
|
shell: bash
|
||||||
|
run: |
|
||||||
|
python3 ./tests/ci/create_release.py --create-bump-version-pr ${{ inputs.dry-run && '--dry-run' || '' }}
|
||||||
|
- name: Bump Docker versions, Changelog, Security
|
||||||
|
if: ${{ inputs.type == 'patch' }}
|
||||||
|
shell: bash
|
||||||
|
run: |
|
||||||
|
git checkout master
|
||||||
|
python3 ./tests/ci/create_release.py --set-progress-started --progress "update changelog, docker version, security"
|
||||||
|
echo "List versions"
|
||||||
|
./utils/list-versions/list-versions.sh > ./utils/list-versions/version_date.tsv
|
||||||
|
echo "Update docker version"
|
||||||
|
./utils/list-versions/update-docker-version.sh
|
||||||
|
echo "Generate ChangeLog"
|
||||||
|
export CI=1
|
||||||
|
docker run -u "${UID}:${GID}" -e PYTHONUNBUFFERED=1 -e CI=1 --network=host \
|
||||||
|
--volume=".:/ClickHouse" clickhouse/style-test \
|
||||||
|
/ClickHouse/tests/ci/changelog.py -v --debug-helpers \
|
||||||
|
--gh-user-or-token=${{ inputs.token }} --jobs=5 \
|
||||||
|
--output="/ClickHouse/docs/changelogs/${{ env.RELEASE_TAG }}.md" ${{ env.RELEASE_TAG }}
|
||||||
|
git add ./docs/changelogs/${{ env.RELEASE_TAG }}.md
|
||||||
|
echo "Generate Security"
|
||||||
|
python3 ./utils/security-generator/generate_security.py > SECURITY.md
|
||||||
|
git diff HEAD
|
||||||
|
- name: Create ChangeLog PR
|
||||||
|
if: ${{ inputs.type == 'patch' && ! inputs.dry-run }}
|
||||||
|
uses: peter-evans/create-pull-request@v6
|
||||||
|
with:
|
||||||
|
author: "robot-clickhouse <robot-clickhouse@users.noreply.github.com>"
|
||||||
|
token: ${{ inputs.token }}
|
||||||
|
committer: "robot-clickhouse <robot-clickhouse@users.noreply.github.com>"
|
||||||
|
commit-message: Update version_date.tsv and changelogs after ${{ env.RELEASE_TAG }}
|
||||||
|
branch: auto/${{ env.RELEASE_TAG }}
|
||||||
|
assignees: ${{ github.event.sender.login }} # assign the PR to the tag pusher
|
||||||
|
delete-branch: true
|
||||||
|
title: Update version_date.tsv and changelog after ${{ env.RELEASE_TAG }}
|
||||||
|
labels: do not test
|
||||||
|
body: |
|
||||||
|
Update version_date.tsv and changelogs after ${{ env.RELEASE_TAG }}
|
||||||
|
### Changelog category (leave one):
|
||||||
|
- Not for changelog (changelog entry is not required)
|
||||||
|
- name: Complete previous steps and Restore git state
|
||||||
|
if: ${{ inputs.type == 'patch' }}
|
||||||
|
shell: bash
|
||||||
|
run: |
|
||||||
|
python3 ./tests/ci/create_release.py --set-progress-completed
|
||||||
|
git reset --hard HEAD
|
||||||
|
git checkout "$GITHUB_REF_NAME"
|
||||||
|
- name: Create GH Release
|
||||||
|
shell: bash
|
||||||
|
if: ${{ inputs.type == 'patch' }}
|
||||||
|
run: |
|
||||||
|
python3 ./tests/ci/create_release.py --create-gh-release ${{ inputs.dry-run && '--dry-run' || '' }}
|
||||||
|
- name: Export TGZ Packages
|
||||||
|
if: ${{ inputs.type == 'patch' }}
|
||||||
|
shell: bash
|
||||||
|
run: |
|
||||||
|
python3 ./tests/ci/artifactory.py --export-tgz ${{ inputs.dry-run && '--dry-run' || '' }}
|
||||||
|
- name: Test TGZ Packages
|
||||||
|
if: ${{ inputs.type == 'patch' }}
|
||||||
|
shell: bash
|
||||||
|
run: |
|
||||||
|
python3 ./tests/ci/artifactory.py --test-tgz ${{ inputs.dry-run && '--dry-run' || '' }}
|
||||||
|
- name: Export RPM Packages
|
||||||
|
if: ${{ inputs.type == 'patch' }}
|
||||||
|
shell: bash
|
||||||
|
run: |
|
||||||
|
python3 ./tests/ci/artifactory.py --export-rpm ${{ inputs.dry-run && '--dry-run' || '' }}
|
||||||
|
- name: Test RPM Packages
|
||||||
|
if: ${{ inputs.type == 'patch' }}
|
||||||
|
shell: bash
|
||||||
|
run: |
|
||||||
|
python3 ./tests/ci/artifactory.py --test-rpm ${{ inputs.dry-run && '--dry-run' || '' }}
|
||||||
|
- name: Export Debian Packages
|
||||||
|
if: ${{ inputs.type == 'patch' }}
|
||||||
|
shell: bash
|
||||||
|
run: |
|
||||||
|
python3 ./tests/ci/artifactory.py --export-debian ${{ inputs.dry-run && '--dry-run' || '' }}
|
||||||
|
- name: Test Debian Packages
|
||||||
|
if: ${{ inputs.type == 'patch' }}
|
||||||
|
shell: bash
|
||||||
|
run: |
|
||||||
|
python3 ./tests/ci/artifactory.py --test-debian ${{ inputs.dry-run && '--dry-run' || '' }}
|
||||||
|
- name: Docker clickhouse/clickhouse-server building
|
||||||
|
if: ${{ inputs.type == 'patch' }}
|
||||||
|
shell: bash
|
||||||
|
run: |
|
||||||
|
cd "./tests/ci"
|
||||||
|
python3 ./create_release.py --set-progress-started --progress "docker server release"
|
||||||
|
export CHECK_NAME="Docker server image"
|
||||||
|
python3 docker_server.py --release-type auto --version ${{ env.RELEASE_TAG }} --check-name "$CHECK_NAME" --sha ${{ env.COMMIT_SHA }} ${{ ! inputs.dry-run && '--push' || '' }}
|
||||||
|
python3 ./create_release.py --set-progress-completed
|
||||||
|
- name: Docker clickhouse/clickhouse-keeper building
|
||||||
|
if: ${{ inputs.type == 'patch' }}
|
||||||
|
shell: bash
|
||||||
|
run: |
|
||||||
|
cd "./tests/ci"
|
||||||
|
python3 ./create_release.py --set-progress-started --progress "docker keeper release"
|
||||||
|
export CHECK_NAME="Docker keeper image"
|
||||||
|
python3 docker_server.py --release-type auto --version ${{ env.RELEASE_TAG }} --check-name "$CHECK_NAME" --sha ${{ env.COMMIT_SHA }} ${{ ! inputs.dry-run && '--push' || '' }}
|
||||||
|
python3 ./create_release.py --set-progress-completed
|
||||||
|
- name: Set current Release progress to Completed with OK
|
||||||
|
shell: bash
|
||||||
|
run: |
|
||||||
|
python3 ./tests/ci/create_release.py --set-progress-started --progress "completed"
|
||||||
|
python3 ./tests/ci/create_release.py --set-progress-completed
|
||||||
|
- name: Post Slack Message
|
||||||
|
if: ${{ !cancelled() }}
|
||||||
|
shell: bash
|
||||||
|
run: |
|
||||||
|
python3 ./tests/ci/create_release.py --post-status ${{ inputs.dry-run && '--dry-run' || '' }}
|
98
.github/workflows/auto_release.yml
vendored
98
.github/workflows/auto_release.yml
vendored
@ -1,44 +1,110 @@
|
|||||||
name: AutoRelease
|
name: AutoRelease
|
||||||
|
|
||||||
env:
|
env:
|
||||||
# Force the stdout and stderr streams to be unbuffered
|
|
||||||
PYTHONUNBUFFERED: 1
|
PYTHONUNBUFFERED: 1
|
||||||
|
DRY_RUN: true
|
||||||
|
|
||||||
concurrency:
|
concurrency:
|
||||||
group: auto-release
|
group: release
|
||||||
on: # yamllint disable-line rule:truthy
|
on: # yamllint disable-line rule:truthy
|
||||||
# schedule:
|
# Workflow uses a test bucket for packages and dry run mode (no real releases)
|
||||||
# - cron: '0 10-16 * * 1-5'
|
schedule:
|
||||||
|
- cron: '0 9 * * *'
|
||||||
|
- cron: '0 15 * * *'
|
||||||
workflow_dispatch:
|
workflow_dispatch:
|
||||||
|
inputs:
|
||||||
|
dry-run:
|
||||||
|
description: 'Dry run'
|
||||||
|
required: false
|
||||||
|
default: true
|
||||||
|
type: boolean
|
||||||
|
|
||||||
jobs:
|
jobs:
|
||||||
CherryPick:
|
AutoRelease:
|
||||||
runs-on: [self-hosted, style-checker-aarch64]
|
runs-on: [self-hosted, release-maker]
|
||||||
steps:
|
steps:
|
||||||
|
- name: DebugInfo
|
||||||
|
uses: hmarr/debug-action@f7318c783045ac39ed9bb497e22ce835fdafbfe6
|
||||||
- name: Set envs
|
- name: Set envs
|
||||||
# https://docs.github.com/en/actions/learn-github-actions/workflow-commands-for-github-actions#multiline-strings
|
|
||||||
run: |
|
run: |
|
||||||
cat >> "$GITHUB_ENV" << 'EOF'
|
cat >> "$GITHUB_ENV" << 'EOF'
|
||||||
TEMP_PATH=${{runner.temp}}/cherry_pick
|
|
||||||
ROBOT_CLICKHOUSE_SSH_KEY<<RCSK
|
ROBOT_CLICKHOUSE_SSH_KEY<<RCSK
|
||||||
${{secrets.ROBOT_CLICKHOUSE_SSH_KEY}}
|
${{secrets.ROBOT_CLICKHOUSE_SSH_KEY}}
|
||||||
RCSK
|
RCSK
|
||||||
REPO_OWNER=ClickHouse
|
|
||||||
REPO_NAME=ClickHouse
|
|
||||||
REPO_TEAM=core
|
|
||||||
EOF
|
EOF
|
||||||
|
- name: Set DRY_RUN for schedule
|
||||||
|
if: ${{ github.event_name == 'schedule' }}
|
||||||
|
run: echo "DRY_RUN=true" >> "$GITHUB_ENV"
|
||||||
|
- name: Set DRY_RUN for dispatch
|
||||||
|
if: ${{ github.event_name == 'workflow_dispatch' }}
|
||||||
|
run: echo "DRY_RUN=${{ github.event.inputs.dry-run }}" >> "$GITHUB_ENV"
|
||||||
- name: Check out repository code
|
- name: Check out repository code
|
||||||
uses: ClickHouse/checkout@v1
|
uses: ClickHouse/checkout@v1
|
||||||
with:
|
with:
|
||||||
clear-repository: true
|
|
||||||
token: ${{secrets.ROBOT_CLICKHOUSE_COMMIT_TOKEN}}
|
token: ${{secrets.ROBOT_CLICKHOUSE_COMMIT_TOKEN}}
|
||||||
fetch-depth: 0
|
fetch-depth: 0
|
||||||
- name: Auto-release
|
- name: Auto Release Prepare
|
||||||
run: |
|
run: |
|
||||||
cd "$GITHUB_WORKSPACE/tests/ci"
|
cd "$GITHUB_WORKSPACE/tests/ci"
|
||||||
python3 auto_release.py --release-after-days=3
|
python3 auto_release.py --prepare
|
||||||
- name: Cleanup
|
echo "::group::Auto Release Info"
|
||||||
if: always()
|
python3 -m json.tool /tmp/autorelease_info.json
|
||||||
|
echo "::endgroup::"
|
||||||
|
{
|
||||||
|
echo 'AUTO_RELEASE_PARAMS<<EOF'
|
||||||
|
cat /tmp/autorelease_info.json
|
||||||
|
echo 'EOF'
|
||||||
|
} >> "$GITHUB_ENV"
|
||||||
|
- name: Post Release Branch statuses
|
||||||
|
run: |
|
||||||
|
cd "$GITHUB_WORKSPACE/tests/ci"
|
||||||
|
python3 auto_release.py --post-status
|
||||||
|
- name: Release ${{ fromJson(env.AUTO_RELEASE_PARAMS).releases[0].release_branch }}
|
||||||
|
if: ${{ fromJson(env.AUTO_RELEASE_PARAMS).releases[0] && fromJson(env.AUTO_RELEASE_PARAMS).releases[0].ready }}
|
||||||
|
uses: ./.github/actions/release
|
||||||
|
with:
|
||||||
|
ref: ${{ fromJson(env.AUTO_RELEASE_PARAMS).releases[0].commit_sha }}
|
||||||
|
type: patch
|
||||||
|
dry-run: ${{ env.DRY_RUN }}
|
||||||
|
token: ${{secrets.ROBOT_CLICKHOUSE_COMMIT_TOKEN}}
|
||||||
|
- name: Release ${{ fromJson(env.AUTO_RELEASE_PARAMS).releases[1].release_branch }}
|
||||||
|
if: ${{ fromJson(env.AUTO_RELEASE_PARAMS).releases[0] && fromJson(env.AUTO_RELEASE_PARAMS).releases[1].ready }}
|
||||||
|
uses: ./.github/actions/release
|
||||||
|
with:
|
||||||
|
ref: ${{ fromJson(env.AUTO_RELEASE_PARAMS).releases[1].commit_sha }}
|
||||||
|
type: patch
|
||||||
|
dry-run: ${{ env.DRY_RUN }}
|
||||||
|
token: ${{secrets.ROBOT_CLICKHOUSE_COMMIT_TOKEN}}
|
||||||
|
- name: Release ${{ fromJson(env.AUTO_RELEASE_PARAMS).releases[2].release_branch }}
|
||||||
|
if: ${{ fromJson(env.AUTO_RELEASE_PARAMS).releases[2] && fromJson(env.AUTO_RELEASE_PARAMS).releases[2].ready }}
|
||||||
|
uses: ./.github/actions/release
|
||||||
|
with:
|
||||||
|
ref: ${{ fromJson(env.AUTO_RELEASE_PARAMS).releases[2].commit_sha }}
|
||||||
|
type: patch
|
||||||
|
dry-run: ${{ env.DRY_RUN }}
|
||||||
|
token: ${{secrets.ROBOT_CLICKHOUSE_COMMIT_TOKEN}}
|
||||||
|
- name: Release ${{ fromJson(env.AUTO_RELEASE_PARAMS).releases[3].release_branch }}
|
||||||
|
if: ${{ fromJson(env.AUTO_RELEASE_PARAMS).releases[3] && fromJson(env.AUTO_RELEASE_PARAMS).releases[3].ready }}
|
||||||
|
uses: ./.github/actions/release
|
||||||
|
with:
|
||||||
|
ref: ${{ fromJson(env.AUTO_RELEASE_PARAMS).releases[3].commit_sha }}
|
||||||
|
type: patch
|
||||||
|
dry-run: ${{ env.DRY_RUN }}
|
||||||
|
token: ${{secrets.ROBOT_CLICKHOUSE_COMMIT_TOKEN}}
|
||||||
|
- name: Release ${{ fromJson(env.AUTO_RELEASE_PARAMS).releases[4].release_branch }}
|
||||||
|
if: ${{ fromJson(env.AUTO_RELEASE_PARAMS).releases[4] && fromJson(env.AUTO_RELEASE_PARAMS).releases[4].ready }}
|
||||||
|
uses: ./.github/actions/release
|
||||||
|
with:
|
||||||
|
ref: ${{ fromJson(env.AUTO_RELEASE_PARAMS).releases[4].commit_sha }}
|
||||||
|
type: patch
|
||||||
|
dry-run: ${{ env.DRY_RUN }}
|
||||||
|
token: ${{secrets.ROBOT_CLICKHOUSE_COMMIT_TOKEN}}
|
||||||
|
- name: Post Slack Message
|
||||||
|
if: ${{ !cancelled() }}
|
||||||
|
run: |
|
||||||
|
cd "$GITHUB_WORKSPACE/tests/ci"
|
||||||
|
python3 auto_release.py --post-auto-release-complete --wf-status ${{ job.status }}
|
||||||
|
- name: Clean up
|
||||||
run: |
|
run: |
|
||||||
docker ps --quiet | xargs --no-run-if-empty docker kill ||:
|
docker ps --quiet | xargs --no-run-if-empty docker kill ||:
|
||||||
docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||:
|
docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||:
|
||||||
|
23
.github/workflows/backport_branches.yml
vendored
23
.github/workflows/backport_branches.yml
vendored
@ -36,10 +36,6 @@ jobs:
|
|||||||
cd "$GITHUB_WORKSPACE/tests/ci"
|
cd "$GITHUB_WORKSPACE/tests/ci"
|
||||||
echo "Testing the main ci directory"
|
echo "Testing the main ci directory"
|
||||||
python3 -m unittest discover -s . -p 'test_*.py'
|
python3 -m unittest discover -s . -p 'test_*.py'
|
||||||
for dir in *_lambda/; do
|
|
||||||
echo "Testing $dir"
|
|
||||||
python3 -m unittest discover -s "$dir" -p 'test_*.py'
|
|
||||||
done
|
|
||||||
- name: PrepareRunConfig
|
- name: PrepareRunConfig
|
||||||
id: runconfig
|
id: runconfig
|
||||||
run: |
|
run: |
|
||||||
@ -62,7 +58,7 @@ jobs:
|
|||||||
BuildDockers:
|
BuildDockers:
|
||||||
needs: [RunConfig]
|
needs: [RunConfig]
|
||||||
if: ${{ !failure() && !cancelled() }}
|
if: ${{ !failure() && !cancelled() }}
|
||||||
uses: ./.github/workflows/reusable_docker.yml
|
uses: ./.github/workflows/docker_test_images.yml
|
||||||
with:
|
with:
|
||||||
data: ${{ needs.RunConfig.outputs.data }}
|
data: ${{ needs.RunConfig.outputs.data }}
|
||||||
CompatibilityCheckX86:
|
CompatibilityCheckX86:
|
||||||
@ -167,10 +163,16 @@ jobs:
|
|||||||
steps:
|
steps:
|
||||||
- name: Check out repository code
|
- name: Check out repository code
|
||||||
uses: ClickHouse/checkout@v1
|
uses: ClickHouse/checkout@v1
|
||||||
|
- name: Download reports
|
||||||
|
run: |
|
||||||
|
python3 "$GITHUB_WORKSPACE/tests/ci/ci.py" --infile ${{ toJson(needs.RunConfig.outputs.data) }} --pre --job-name Builds
|
||||||
- name: Builds report
|
- name: Builds report
|
||||||
run: |
|
run: |
|
||||||
cd "$GITHUB_WORKSPACE/tests/ci"
|
cd "$GITHUB_WORKSPACE/tests/ci"
|
||||||
python3 ./build_report_check.py --reports package_release package_aarch64 package_asan package_tsan package_debug binary_darwin binary_darwin_aarch64
|
python3 ./build_report_check.py --reports package_release package_aarch64 package_asan package_tsan package_debug binary_darwin binary_darwin_aarch64
|
||||||
|
- name: Set status
|
||||||
|
run: |
|
||||||
|
python3 "$GITHUB_WORKSPACE/tests/ci/ci.py" --infile ${{ toJson(needs.RunConfig.outputs.data) }} --post --job-name Builds
|
||||||
############################################################################################
|
############################################################################################
|
||||||
#################################### INSTALL PACKAGES ######################################
|
#################################### INSTALL PACKAGES ######################################
|
||||||
############################################################################################
|
############################################################################################
|
||||||
@ -239,8 +241,9 @@ jobs:
|
|||||||
runner_type: stress-tester
|
runner_type: stress-tester
|
||||||
data: ${{ needs.RunConfig.outputs.data }}
|
data: ${{ needs.RunConfig.outputs.data }}
|
||||||
FinishCheck:
|
FinishCheck:
|
||||||
if: ${{ !failure() && !cancelled() }}
|
if: ${{ !cancelled() }}
|
||||||
needs:
|
needs:
|
||||||
|
- RunConfig
|
||||||
- Builds_Report
|
- Builds_Report
|
||||||
- FunctionalStatelessTestAsan
|
- FunctionalStatelessTestAsan
|
||||||
- FunctionalStatefulTestDebug
|
- FunctionalStatefulTestDebug
|
||||||
@ -255,6 +258,7 @@ jobs:
|
|||||||
with:
|
with:
|
||||||
clear-repository: true
|
clear-repository: true
|
||||||
- name: Finish label
|
- name: Finish label
|
||||||
|
if: ${{ !failure() }}
|
||||||
run: |
|
run: |
|
||||||
cd "$GITHUB_WORKSPACE/tests/ci"
|
cd "$GITHUB_WORKSPACE/tests/ci"
|
||||||
# update mergeable check
|
# update mergeable check
|
||||||
@ -262,3 +266,10 @@ jobs:
|
|||||||
# update overall ci report
|
# update overall ci report
|
||||||
python3 finish_check.py --wf-status ${{ contains(needs.*.result, 'failure') && 'failure' || 'success' }}
|
python3 finish_check.py --wf-status ${{ contains(needs.*.result, 'failure') && 'failure' || 'success' }}
|
||||||
python3 merge_pr.py
|
python3 merge_pr.py
|
||||||
|
- name: Check Workflow results
|
||||||
|
run: |
|
||||||
|
export WORKFLOW_RESULT_FILE="/tmp/workflow_results.json"
|
||||||
|
cat > "$WORKFLOW_RESULT_FILE" << 'EOF'
|
||||||
|
${{ toJson(needs) }}
|
||||||
|
EOF
|
||||||
|
python3 ./tests/ci/ci_buddy.py --check-wf-status
|
||||||
|
44
.github/workflows/create_release.yml
vendored
Normal file
44
.github/workflows/create_release.yml
vendored
Normal file
@ -0,0 +1,44 @@
|
|||||||
|
name: CreateRelease
|
||||||
|
|
||||||
|
concurrency:
|
||||||
|
group: release
|
||||||
|
'on':
|
||||||
|
workflow_dispatch:
|
||||||
|
inputs:
|
||||||
|
ref:
|
||||||
|
description: 'Git reference (branch or commit sha) from which to create the release'
|
||||||
|
required: true
|
||||||
|
type: string
|
||||||
|
type:
|
||||||
|
description: 'The type of release: "new" for a new release or "patch" for a patch release'
|
||||||
|
required: true
|
||||||
|
type: choice
|
||||||
|
options:
|
||||||
|
- patch
|
||||||
|
- new
|
||||||
|
dry-run:
|
||||||
|
description: 'Dry run'
|
||||||
|
required: false
|
||||||
|
default: true
|
||||||
|
type: boolean
|
||||||
|
|
||||||
|
jobs:
|
||||||
|
CreateRelease:
|
||||||
|
env:
|
||||||
|
GH_TOKEN: ${{ secrets.ROBOT_CLICKHOUSE_COMMIT_TOKEN }}
|
||||||
|
runs-on: [self-hosted, release-maker]
|
||||||
|
steps:
|
||||||
|
- name: DebugInfo
|
||||||
|
uses: hmarr/debug-action@f7318c783045ac39ed9bb497e22ce835fdafbfe6
|
||||||
|
- name: Check out repository code
|
||||||
|
uses: ClickHouse/checkout@v1
|
||||||
|
with:
|
||||||
|
token: ${{secrets.ROBOT_CLICKHOUSE_COMMIT_TOKEN}}
|
||||||
|
fetch-depth: 0
|
||||||
|
- name: Call Release Action
|
||||||
|
uses: ./.github/actions/release
|
||||||
|
with:
|
||||||
|
ref: ${{ inputs.ref }}
|
||||||
|
type: ${{ inputs.type }}
|
||||||
|
dry-run: ${{ inputs.dry-run }}
|
||||||
|
token: ${{secrets.ROBOT_CLICKHOUSE_COMMIT_TOKEN}}
|
69
.github/workflows/jepsen.yml
vendored
69
.github/workflows/jepsen.yml
vendored
@ -9,19 +9,64 @@ on: # yamllint disable-line rule:truthy
|
|||||||
- cron: '0 */6 * * *'
|
- cron: '0 */6 * * *'
|
||||||
workflow_dispatch:
|
workflow_dispatch:
|
||||||
jobs:
|
jobs:
|
||||||
|
RunConfig:
|
||||||
|
runs-on: [self-hosted, style-checker-aarch64]
|
||||||
|
outputs:
|
||||||
|
data: ${{ steps.runconfig.outputs.CI_DATA }}
|
||||||
|
steps:
|
||||||
|
- name: DebugInfo
|
||||||
|
uses: hmarr/debug-action@f7318c783045ac39ed9bb497e22ce835fdafbfe6
|
||||||
|
- name: Check out repository code
|
||||||
|
uses: ClickHouse/checkout@v1
|
||||||
|
with:
|
||||||
|
clear-repository: true # to ensure correct digests
|
||||||
|
fetch-depth: 0 # to get version
|
||||||
|
filter: tree:0
|
||||||
|
- name: PrepareRunConfig
|
||||||
|
id: runconfig
|
||||||
|
run: |
|
||||||
|
echo "::group::configure CI run"
|
||||||
|
python3 "$GITHUB_WORKSPACE/tests/ci/ci.py" --configure --workflow "$GITHUB_WORKFLOW" --outfile ${{ runner.temp }}/ci_run_data.json
|
||||||
|
echo "::endgroup::"
|
||||||
|
|
||||||
|
echo "::group::CI run configure results"
|
||||||
|
python3 -m json.tool ${{ runner.temp }}/ci_run_data.json
|
||||||
|
echo "::endgroup::"
|
||||||
|
{
|
||||||
|
echo 'CI_DATA<<EOF'
|
||||||
|
cat ${{ runner.temp }}/ci_run_data.json
|
||||||
|
echo 'EOF'
|
||||||
|
} >> "$GITHUB_OUTPUT"
|
||||||
KeeperJepsenRelease:
|
KeeperJepsenRelease:
|
||||||
uses: ./.github/workflows/reusable_simple_job.yml
|
needs: [RunConfig]
|
||||||
|
uses: ./.github/workflows/reusable_test.yml
|
||||||
with:
|
with:
|
||||||
test_name: Jepsen keeper check
|
test_name: ClickHouse Keeper Jepsen
|
||||||
runner_type: style-checker
|
runner_type: style-checker-aarch64
|
||||||
report_required: true
|
data: ${{ needs.RunConfig.outputs.data }}
|
||||||
run_command: |
|
run_command: |
|
||||||
python3 jepsen_check.py keeper
|
python3 jepsen_check.py keeper
|
||||||
# ServerJepsenRelease:
|
ServerJepsenRelease:
|
||||||
# uses: ./.github/workflows/reusable_simple_job.yml
|
if: false # skip for server
|
||||||
# with:
|
needs: [RunConfig]
|
||||||
# test_name: Jepsen server check
|
uses: ./.github/workflows/reusable_test.yml
|
||||||
# runner_type: style-checker
|
with:
|
||||||
# run_command: |
|
test_name: ClickHouse Server Jepsen
|
||||||
# cd "$REPO_COPY/tests/ci"
|
runner_type: style-checker-aarch64
|
||||||
# python3 jepsen_check.py server
|
data: ${{ needs.RunConfig.outputs.data }}
|
||||||
|
run_command: |
|
||||||
|
python3 jepsen_check.py server
|
||||||
|
CheckWorkflow:
|
||||||
|
if: ${{ !cancelled() }}
|
||||||
|
needs: [RunConfig, ServerJepsenRelease, KeeperJepsenRelease]
|
||||||
|
runs-on: [self-hosted, style-checker-aarch64]
|
||||||
|
steps:
|
||||||
|
- name: Check out repository code
|
||||||
|
uses: ClickHouse/checkout@v1
|
||||||
|
- name: Check Workflow results
|
||||||
|
run: |
|
||||||
|
export WORKFLOW_RESULT_FILE="/tmp/workflow_results.json"
|
||||||
|
cat >> "$WORKFLOW_RESULT_FILE" << 'EOF'
|
||||||
|
${{ toJson(needs) }}
|
||||||
|
EOF
|
||||||
|
python3 ./tests/ci/ci_buddy.py --check-wf-status
|
||||||
|
41
.github/workflows/master.yml
vendored
41
.github/workflows/master.yml
vendored
@ -33,10 +33,6 @@ jobs:
|
|||||||
# cd "$GITHUB_WORKSPACE/tests/ci"
|
# cd "$GITHUB_WORKSPACE/tests/ci"
|
||||||
# echo "Testing the main ci directory"
|
# echo "Testing the main ci directory"
|
||||||
# python3 -m unittest discover -s . -p 'test_*.py'
|
# python3 -m unittest discover -s . -p 'test_*.py'
|
||||||
# for dir in *_lambda/; do
|
|
||||||
# echo "Testing $dir"
|
|
||||||
# python3 -m unittest discover -s "$dir" -p 'test_*.py'
|
|
||||||
# done
|
|
||||||
- name: PrepareRunConfig
|
- name: PrepareRunConfig
|
||||||
id: runconfig
|
id: runconfig
|
||||||
run: |
|
run: |
|
||||||
@ -58,7 +54,7 @@ jobs:
|
|||||||
# BuildDockers:
|
# BuildDockers:
|
||||||
# needs: [RunConfig]
|
# needs: [RunConfig]
|
||||||
# if: ${{ !failure() && !cancelled() }}
|
# if: ${{ !failure() && !cancelled() }}
|
||||||
# uses: ./.github/workflows/reusable_docker.yml
|
# uses: ./.github/workflows/docker_test_images.yml
|
||||||
# with:
|
# with:
|
||||||
# data: ${{ needs.RunConfig.outputs.data }}
|
# data: ${{ needs.RunConfig.outputs.data }}
|
||||||
# StyleCheck:
|
# StyleCheck:
|
||||||
@ -125,34 +121,6 @@ jobs:
|
|||||||
runner_type: style-checker-aarch64
|
runner_type: style-checker-aarch64
|
||||||
data: ${{ needs.RunConfig.outputs.data }}
|
data: ${{ needs.RunConfig.outputs.data }}
|
||||||
|
|
||||||
MarkReleaseReady:
|
|
||||||
if: ${{ !failure() && !cancelled() }}
|
|
||||||
needs: [RunConfig, Builds_1, Builds_2]
|
|
||||||
runs-on: [self-hosted, style-checker-aarch64]
|
|
||||||
steps:
|
|
||||||
- name: Debug
|
|
||||||
run: |
|
|
||||||
echo need with different filters
|
|
||||||
cat << 'EOF'
|
|
||||||
${{ toJSON(needs) }}
|
|
||||||
${{ toJSON(needs.*.result) }}
|
|
||||||
no failures ${{ !contains(needs.*.result, 'failure') }}
|
|
||||||
no skips ${{ !contains(needs.*.result, 'skipped') }}
|
|
||||||
no both ${{ !(contains(needs.*.result, 'skipped') || contains(needs.*.result, 'failure')) }}
|
|
||||||
EOF
|
|
||||||
- name: Not ready
|
|
||||||
# fail the job to be able to restart it
|
|
||||||
if: ${{ contains(needs.*.result, 'skipped') || contains(needs.*.result, 'failure') }}
|
|
||||||
run: exit 1
|
|
||||||
- name: Check out repository code
|
|
||||||
if: ${{ ! (contains(needs.*.result, 'skipped') || contains(needs.*.result, 'failure')) }}
|
|
||||||
uses: ClickHouse/checkout@v1
|
|
||||||
- name: Mark Commit Release Ready
|
|
||||||
if: ${{ ! (contains(needs.*.result, 'skipped') || contains(needs.*.result, 'failure')) }}
|
|
||||||
run: |
|
|
||||||
cd "$GITHUB_WORKSPACE/tests/ci"
|
|
||||||
python3 mark_release_ready.py
|
|
||||||
|
|
||||||
FinishCheck:
|
FinishCheck:
|
||||||
if: ${{ !cancelled() }}
|
if: ${{ !cancelled() }}
|
||||||
needs: [RunConfig, Builds_1, Builds_2, Builds_Report, Tests_1, Tests_2, Tests_3]
|
needs: [RunConfig, Builds_1, Builds_2, Builds_Report, Tests_1, Tests_2, Tests_3]
|
||||||
@ -164,3 +132,10 @@ jobs:
|
|||||||
run: |
|
run: |
|
||||||
cd "$GITHUB_WORKSPACE/tests/ci"
|
cd "$GITHUB_WORKSPACE/tests/ci"
|
||||||
python3 finish_check.py --wf-status ${{ contains(needs.*.result, 'failure') && 'failure' || 'success' }}
|
python3 finish_check.py --wf-status ${{ contains(needs.*.result, 'failure') && 'failure' || 'success' }}
|
||||||
|
- name: Check Workflow results
|
||||||
|
run: |
|
||||||
|
export WORKFLOW_RESULT_FILE="/tmp/workflow_results.json"
|
||||||
|
cat > "$WORKFLOW_RESULT_FILE" << 'EOF'
|
||||||
|
${{ toJson(needs) }}
|
||||||
|
EOF
|
||||||
|
python3 ./tests/ci/ci_buddy.py --check-wf-status
|
||||||
|
16
.github/workflows/merge_queue.yml
vendored
16
.github/workflows/merge_queue.yml
vendored
@ -30,10 +30,6 @@ jobs:
|
|||||||
cd "$GITHUB_WORKSPACE/tests/ci"
|
cd "$GITHUB_WORKSPACE/tests/ci"
|
||||||
echo "Testing the main ci directory"
|
echo "Testing the main ci directory"
|
||||||
python3 -m unittest discover -s . -p 'test_*.py'
|
python3 -m unittest discover -s . -p 'test_*.py'
|
||||||
for dir in *_lambda/; do
|
|
||||||
echo "Testing $dir"
|
|
||||||
python3 -m unittest discover -s "$dir" -p 'test_*.py'
|
|
||||||
done
|
|
||||||
- name: PrepareRunConfig
|
- name: PrepareRunConfig
|
||||||
id: runconfig
|
id: runconfig
|
||||||
run: |
|
run: |
|
||||||
@ -51,7 +47,7 @@ jobs:
|
|||||||
BuildDockers:
|
BuildDockers:
|
||||||
needs: [RunConfig]
|
needs: [RunConfig]
|
||||||
if: ${{ !failure() && !cancelled() && toJson(fromJson(needs.RunConfig.outputs.data).docker_data.missing_multi) != '[]' }}
|
if: ${{ !failure() && !cancelled() && toJson(fromJson(needs.RunConfig.outputs.data).docker_data.missing_multi) != '[]' }}
|
||||||
uses: ./.github/workflows/reusable_docker.yml
|
uses: ./.github/workflows/docker_test_images.yml
|
||||||
with:
|
with:
|
||||||
data: ${{ needs.RunConfig.outputs.data }}
|
data: ${{ needs.RunConfig.outputs.data }}
|
||||||
StyleCheck:
|
StyleCheck:
|
||||||
@ -97,7 +93,7 @@ jobs:
|
|||||||
data: ${{ needs.RunConfig.outputs.data }}
|
data: ${{ needs.RunConfig.outputs.data }}
|
||||||
|
|
||||||
CheckReadyForMerge:
|
CheckReadyForMerge:
|
||||||
if: ${{ !cancelled() && needs.StyleCheck.result == 'success' }}
|
if: ${{ !cancelled() }}
|
||||||
# Test_2 or Test_3 must not have jobs required for Mergeable check
|
# Test_2 or Test_3 must not have jobs required for Mergeable check
|
||||||
needs: [RunConfig, BuildDockers, StyleCheck, FastTest, Builds_1, Tests_1]
|
needs: [RunConfig, BuildDockers, StyleCheck, FastTest, Builds_1, Tests_1]
|
||||||
runs-on: [self-hosted, style-checker-aarch64]
|
runs-on: [self-hosted, style-checker-aarch64]
|
||||||
@ -105,6 +101,14 @@ jobs:
|
|||||||
- name: Check out repository code
|
- name: Check out repository code
|
||||||
uses: ClickHouse/checkout@v1
|
uses: ClickHouse/checkout@v1
|
||||||
- name: Check and set merge status
|
- name: Check and set merge status
|
||||||
|
if: ${{ needs.StyleCheck.result == 'success' }}
|
||||||
run: |
|
run: |
|
||||||
cd "$GITHUB_WORKSPACE/tests/ci"
|
cd "$GITHUB_WORKSPACE/tests/ci"
|
||||||
python3 merge_pr.py --set-ci-status --wf-status ${{ contains(needs.*.result, 'failure') && 'failure' || 'success' }}
|
python3 merge_pr.py --set-ci-status --wf-status ${{ contains(needs.*.result, 'failure') && 'failure' || 'success' }}
|
||||||
|
- name: Check Workflow results
|
||||||
|
run: |
|
||||||
|
export WORKFLOW_RESULT_FILE="/tmp/workflow_results.json"
|
||||||
|
cat > "$WORKFLOW_RESULT_FILE" << 'EOF'
|
||||||
|
${{ toJson(needs) }}
|
||||||
|
EOF
|
||||||
|
python3 ./tests/ci/ci_buddy.py --check-wf-status
|
||||||
|
16
.github/workflows/nightly.yml
vendored
16
.github/workflows/nightly.yml
vendored
@ -40,7 +40,21 @@ jobs:
|
|||||||
} >> "$GITHUB_OUTPUT"
|
} >> "$GITHUB_OUTPUT"
|
||||||
BuildDockers:
|
BuildDockers:
|
||||||
needs: [RunConfig]
|
needs: [RunConfig]
|
||||||
uses: ./.github/workflows/reusable_docker.yml
|
uses: ./.github/workflows/docker_test_images.yml
|
||||||
with:
|
with:
|
||||||
data: "${{ needs.RunConfig.outputs.data }}"
|
data: "${{ needs.RunConfig.outputs.data }}"
|
||||||
set_latest: true
|
set_latest: true
|
||||||
|
CheckWorkflow:
|
||||||
|
if: ${{ !cancelled() }}
|
||||||
|
needs: [RunConfig, BuildDockers]
|
||||||
|
runs-on: [self-hosted, style-checker-aarch64]
|
||||||
|
steps:
|
||||||
|
- name: Check out repository code
|
||||||
|
uses: ClickHouse/checkout@v1
|
||||||
|
- name: Check Workflow results
|
||||||
|
run: |
|
||||||
|
export WORKFLOW_RESULT_FILE="/tmp/workflow_results.json"
|
||||||
|
cat > "$WORKFLOW_RESULT_FILE" << 'EOF'
|
||||||
|
${{ toJson(needs) }}
|
||||||
|
EOF
|
||||||
|
python3 ./tests/ci/ci_buddy.py --check-wf-status
|
||||||
|
23
.github/workflows/pull_request.yml
vendored
23
.github/workflows/pull_request.yml
vendored
@ -48,10 +48,6 @@ jobs:
|
|||||||
cd "$GITHUB_WORKSPACE/tests/ci"
|
cd "$GITHUB_WORKSPACE/tests/ci"
|
||||||
echo "Testing the main ci directory"
|
echo "Testing the main ci directory"
|
||||||
python3 -m unittest discover -s . -p 'test_*.py'
|
python3 -m unittest discover -s . -p 'test_*.py'
|
||||||
for dir in *_lambda/; do
|
|
||||||
echo "Testing $dir"
|
|
||||||
python3 -m unittest discover -s "$dir" -p 'test_*.py'
|
|
||||||
done
|
|
||||||
- name: PrepareRunConfig
|
- name: PrepareRunConfig
|
||||||
id: runconfig
|
id: runconfig
|
||||||
run: |
|
run: |
|
||||||
@ -72,7 +68,7 @@ jobs:
|
|||||||
BuildDockers:
|
BuildDockers:
|
||||||
needs: [RunConfig]
|
needs: [RunConfig]
|
||||||
if: ${{ !failure() && !cancelled() && toJson(fromJson(needs.RunConfig.outputs.data).docker_data.missing_multi) != '[]' }}
|
if: ${{ !failure() && !cancelled() && toJson(fromJson(needs.RunConfig.outputs.data).docker_data.missing_multi) != '[]' }}
|
||||||
uses: ./.github/workflows/reusable_docker.yml
|
uses: ./.github/workflows/docker_test_images.yml
|
||||||
with:
|
with:
|
||||||
data: ${{ needs.RunConfig.outputs.data }}
|
data: ${{ needs.RunConfig.outputs.data }}
|
||||||
StyleCheck:
|
StyleCheck:
|
||||||
@ -155,9 +151,10 @@ jobs:
|
|||||||
data: ${{ needs.RunConfig.outputs.data }}
|
data: ${{ needs.RunConfig.outputs.data }}
|
||||||
|
|
||||||
CheckReadyForMerge:
|
CheckReadyForMerge:
|
||||||
if: ${{ !cancelled() && needs.StyleCheck.result == 'success' }}
|
if: ${{ !cancelled() }}
|
||||||
# Test_2 or Test_3 must not have jobs required for Mergeable check
|
# Test_2 or Test_3 do not have the jobs required for Mergeable check,
|
||||||
needs: [RunConfig, BuildDockers, StyleCheck, FastTest, Builds_1, Builds_2, Builds_Report, Tests_1]
|
# however, set them as "needs" to get all checks results before the automatic merge occurs.
|
||||||
|
needs: [RunConfig, BuildDockers, StyleCheck, FastTest, Builds_1, Builds_2, Builds_Report, Tests_1, Tests_2, Tests_3]
|
||||||
runs-on: [self-hosted, style-checker-aarch64]
|
runs-on: [self-hosted, style-checker-aarch64]
|
||||||
steps:
|
steps:
|
||||||
- name: Check out repository code
|
- name: Check out repository code
|
||||||
@ -165,14 +162,22 @@ jobs:
|
|||||||
with:
|
with:
|
||||||
filter: tree:0
|
filter: tree:0
|
||||||
- name: Check and set merge status
|
- name: Check and set merge status
|
||||||
|
if: ${{ needs.StyleCheck.result == 'success' }}
|
||||||
run: |
|
run: |
|
||||||
cd "$GITHUB_WORKSPACE/tests/ci"
|
cd "$GITHUB_WORKSPACE/tests/ci"
|
||||||
python3 merge_pr.py --set-ci-status --wf-status ${{ contains(needs.*.result, 'failure') && 'failure' || 'success' }}
|
python3 merge_pr.py --set-ci-status --wf-status ${{ contains(needs.*.result, 'failure') && 'failure' || 'success' }}
|
||||||
|
- name: Check Workflow results
|
||||||
|
run: |
|
||||||
|
export WORKFLOW_RESULT_FILE="/tmp/workflow_results.json"
|
||||||
|
cat > "$WORKFLOW_RESULT_FILE" << 'EOF'
|
||||||
|
${{ toJson(needs) }}
|
||||||
|
EOF
|
||||||
|
python3 ./tests/ci/ci_buddy.py --check-wf-status
|
||||||
|
|
||||||
################################# Stage Final #################################
|
################################# Stage Final #################################
|
||||||
#
|
#
|
||||||
FinishCheck:
|
FinishCheck:
|
||||||
if: ${{ !cancelled() }}
|
if: ${{ !failure() && !cancelled() }}
|
||||||
needs: [RunConfig, BuildDockers, StyleCheck, FastTest, Builds_1, Builds_2, Builds_Report, Tests_1, Tests_2, Tests_3]
|
needs: [RunConfig, BuildDockers, StyleCheck, FastTest, Builds_1, Builds_2, Builds_Report, Tests_1, Tests_2, Tests_3]
|
||||||
runs-on: [self-hosted, style-checker-aarch64]
|
runs-on: [self-hosted, style-checker-aarch64]
|
||||||
steps:
|
steps:
|
||||||
|
24
.github/workflows/release_branches.yml
vendored
24
.github/workflows/release_branches.yml
vendored
@ -33,10 +33,6 @@ jobs:
|
|||||||
cd "$GITHUB_WORKSPACE/tests/ci"
|
cd "$GITHUB_WORKSPACE/tests/ci"
|
||||||
echo "Testing the main ci directory"
|
echo "Testing the main ci directory"
|
||||||
python3 -m unittest discover -s . -p 'test_*.py'
|
python3 -m unittest discover -s . -p 'test_*.py'
|
||||||
for dir in *_lambda/; do
|
|
||||||
echo "Testing $dir"
|
|
||||||
python3 -m unittest discover -s "$dir" -p 'test_*.py'
|
|
||||||
done
|
|
||||||
- name: PrepareRunConfig
|
- name: PrepareRunConfig
|
||||||
id: runconfig
|
id: runconfig
|
||||||
run: |
|
run: |
|
||||||
@ -57,7 +53,7 @@ jobs:
|
|||||||
BuildDockers:
|
BuildDockers:
|
||||||
needs: [RunConfig]
|
needs: [RunConfig]
|
||||||
if: ${{ !failure() && !cancelled() }}
|
if: ${{ !failure() && !cancelled() }}
|
||||||
uses: ./.github/workflows/reusable_docker.yml
|
uses: ./.github/workflows/docker_test_images.yml
|
||||||
with:
|
with:
|
||||||
data: ${{ needs.RunConfig.outputs.data }}
|
data: ${{ needs.RunConfig.outputs.data }}
|
||||||
CompatibilityCheckX86:
|
CompatibilityCheckX86:
|
||||||
@ -184,10 +180,16 @@ jobs:
|
|||||||
steps:
|
steps:
|
||||||
- name: Check out repository code
|
- name: Check out repository code
|
||||||
uses: ClickHouse/checkout@v1
|
uses: ClickHouse/checkout@v1
|
||||||
|
- name: Download reports
|
||||||
|
run: |
|
||||||
|
python3 "$GITHUB_WORKSPACE/tests/ci/ci.py" --infile ${{ toJson(needs.RunConfig.outputs.data) }} --pre --job-name Builds
|
||||||
- name: Builds report
|
- name: Builds report
|
||||||
run: |
|
run: |
|
||||||
cd "$GITHUB_WORKSPACE/tests/ci"
|
cd "$GITHUB_WORKSPACE/tests/ci"
|
||||||
python3 ./build_report_check.py --reports package_release package_aarch64 package_asan package_msan package_ubsan package_tsan package_debug binary_darwin binary_darwin_aarch64
|
python3 ./build_report_check.py --reports package_release package_aarch64 package_asan package_msan package_ubsan package_tsan package_debug binary_darwin binary_darwin_aarch64
|
||||||
|
- name: Set status
|
||||||
|
run: |
|
||||||
|
python3 "$GITHUB_WORKSPACE/tests/ci/ci.py" --infile ${{ toJson(needs.RunConfig.outputs.data) }} --post --job-name Builds
|
||||||
MarkReleaseReady:
|
MarkReleaseReady:
|
||||||
if: ${{ !failure() && !cancelled() }}
|
if: ${{ !failure() && !cancelled() }}
|
||||||
needs:
|
needs:
|
||||||
@ -439,8 +441,9 @@ jobs:
|
|||||||
runner_type: stress-tester
|
runner_type: stress-tester
|
||||||
data: ${{ needs.RunConfig.outputs.data }}
|
data: ${{ needs.RunConfig.outputs.data }}
|
||||||
FinishCheck:
|
FinishCheck:
|
||||||
if: ${{ !failure() && !cancelled() }}
|
if: ${{ !cancelled() }}
|
||||||
needs:
|
needs:
|
||||||
|
- RunConfig
|
||||||
- DockerServerImage
|
- DockerServerImage
|
||||||
- DockerKeeperImage
|
- DockerKeeperImage
|
||||||
- Builds_Report
|
- Builds_Report
|
||||||
@ -476,9 +479,18 @@ jobs:
|
|||||||
with:
|
with:
|
||||||
clear-repository: true
|
clear-repository: true
|
||||||
- name: Finish label
|
- name: Finish label
|
||||||
|
if: ${{ !failure() }}
|
||||||
run: |
|
run: |
|
||||||
cd "$GITHUB_WORKSPACE/tests/ci"
|
cd "$GITHUB_WORKSPACE/tests/ci"
|
||||||
# update mergeable check
|
# update mergeable check
|
||||||
python3 merge_pr.py --set-ci-status --wf-status ${{ contains(needs.*.result, 'failure') && 'failure' || 'success' }}
|
python3 merge_pr.py --set-ci-status --wf-status ${{ contains(needs.*.result, 'failure') && 'failure' || 'success' }}
|
||||||
# update overall ci report
|
# update overall ci report
|
||||||
python3 finish_check.py --wf-status ${{ contains(needs.*.result, 'failure') && 'failure' || 'success' }}
|
python3 finish_check.py --wf-status ${{ contains(needs.*.result, 'failure') && 'failure' || 'success' }}
|
||||||
|
- name: Check Workflow results
|
||||||
|
run: |
|
||||||
|
export WORKFLOW_RESULT_FILE="/tmp/workflow_results.json"
|
||||||
|
cat > "$WORKFLOW_RESULT_FILE" << 'EOF'
|
||||||
|
${{ toJson(needs) }}
|
||||||
|
EOF
|
||||||
|
|
||||||
|
python3 ./tests/ci/ci_buddy.py --check-wf-status
|
||||||
|
2
.github/workflows/reusable_test.yml
vendored
2
.github/workflows/reusable_test.yml
vendored
@ -102,6 +102,8 @@ jobs:
|
|||||||
--job-name '${{inputs.test_name}}' \
|
--job-name '${{inputs.test_name}}' \
|
||||||
--run \
|
--run \
|
||||||
--run-command '''${{inputs.run_command}}'''
|
--run-command '''${{inputs.run_command}}'''
|
||||||
|
# shellcheck disable=SC2319
|
||||||
|
echo "JOB_EXIT_CODE=$?" >> "$GITHUB_ENV"
|
||||||
- name: Post run
|
- name: Post run
|
||||||
if: ${{ !cancelled() }}
|
if: ${{ !cancelled() }}
|
||||||
run: |
|
run: |
|
||||||
|
@ -13,5 +13,10 @@ rules:
|
|||||||
level: warning
|
level: warning
|
||||||
comments:
|
comments:
|
||||||
min-spaces-from-content: 1
|
min-spaces-from-content: 1
|
||||||
document-start:
|
document-start: disable
|
||||||
present: false
|
colons: disable
|
||||||
|
indentation: disable
|
||||||
|
line-length: disable
|
||||||
|
trailing-spaces: disable
|
||||||
|
truthy: disable
|
||||||
|
new-line-at-end-of-file: disable
|
||||||
|
325
CHANGELOG.md
325
CHANGELOG.md
@ -1,5 +1,6 @@
|
|||||||
### Table of Contents
|
### Table of Contents
|
||||||
**[ClickHouse release v24.6, 2024-06-27](#246)**<br/>
|
**[ClickHouse release v24.7, 2024-07-30](#247)**<br/>
|
||||||
|
**[ClickHouse release v24.6, 2024-07-01](#246)**<br/>
|
||||||
**[ClickHouse release v24.5, 2024-05-30](#245)**<br/>
|
**[ClickHouse release v24.5, 2024-05-30](#245)**<br/>
|
||||||
**[ClickHouse release v24.4, 2024-04-30](#244)**<br/>
|
**[ClickHouse release v24.4, 2024-04-30](#244)**<br/>
|
||||||
**[ClickHouse release v24.3 LTS, 2024-03-26](#243)**<br/>
|
**[ClickHouse release v24.3 LTS, 2024-03-26](#243)**<br/>
|
||||||
@ -9,107 +10,277 @@
|
|||||||
|
|
||||||
# 2024 Changelog
|
# 2024 Changelog
|
||||||
|
|
||||||
### <a id="246"></a> ClickHouse release 24.6, 2024-06-27
|
### <a id="247"></a> ClickHouse release 24.7, 2024-07-30
|
||||||
|
|
||||||
|
#### Backward Incompatible Change
|
||||||
|
* Forbid `CRATE MATERIALIZED VIEW ... ENGINE Replicated*MergeTree POPULATE AS SELECT ...` with Replicated databases. [#63963](https://github.com/ClickHouse/ClickHouse/pull/63963) ([vdimir](https://github.com/vdimir)).
|
||||||
|
* `clickhouse-keeper-client` will only accept paths in string literals, such as `ls '/hello/world'`, not bare strings such as `ls /hello/world`. [#65494](https://github.com/ClickHouse/ClickHouse/pull/65494) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||||
|
* Metric `KeeperOutstandingRequets` was renamed to `KeeperOutstandingRequests`. [#66206](https://github.com/ClickHouse/ClickHouse/pull/66206) ([Robert Schulze](https://github.com/rschu1ze)).
|
||||||
|
* Remove `is_deterministic` field from the `system.functions` table. [#66630](https://github.com/ClickHouse/ClickHouse/pull/66630) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||||
|
* Function `tuple` will now try to construct named tuples in query (controlled by `enable_named_columns_in_function_tuple`). Introduce function `tupleNames` to extract names from tuples. [#54881](https://github.com/ClickHouse/ClickHouse/pull/54881) ([Amos Bird](https://github.com/amosbird)).
|
||||||
|
|
||||||
|
#### New Feature
|
||||||
|
* Add `ASOF JOIN` support for `full_sorting_join` algorithm. [#55051](https://github.com/ClickHouse/ClickHouse/pull/55051) ([vdimir](https://github.com/vdimir)).
|
||||||
|
* Add new window function `percent_rank`. [#62747](https://github.com/ClickHouse/ClickHouse/pull/62747) ([lgbo](https://github.com/lgbo-ustc)).
|
||||||
|
* Support JWT authentication in `clickhouse-client` (will be available only in ClickHouse Cloud). [#62829](https://github.com/ClickHouse/ClickHouse/pull/62829) ([Konstantin Bogdanov](https://github.com/thevar1able)).
|
||||||
|
* Add SQL functions `changeYear`, `changeMonth`, `changeDay`, `changeHour`, `changeMinute`, `changeSecond`. For example, `SELECT changeMonth(toDate('2024-06-14'), 7)` returns date `2024-07-14`. [#63186](https://github.com/ClickHouse/ClickHouse/pull/63186) ([cucumber95](https://github.com/cucumber95)).
|
||||||
|
* Introduce startup scripts, which allow the execution of preconfigured queries at the startup stage. [#64889](https://github.com/ClickHouse/ClickHouse/pull/64889) ([pufit](https://github.com/pufit)).
|
||||||
|
* Support accept_invalid_certificate in client's config in order to allow for client to connect over secure TCP to a server running with self-signed certificate - can be used as a shorthand for corresponding `openSSL` client settings `verificationMode=none` + `invalidCertificateHandler.name=AcceptCertificateHandler`. [#65238](https://github.com/ClickHouse/ClickHouse/pull/65238) ([peacewalker122](https://github.com/peacewalker122)).
|
||||||
|
* Add system.error_log which contains history of error values from table system.errors, periodically flushed to disk. [#65381](https://github.com/ClickHouse/ClickHouse/pull/65381) ([Pablo Marcos](https://github.com/pamarcos)).
|
||||||
|
* Add aggregate function `groupConcat`. About the same as `arrayStringConcat( groupArray(column), ',')` Can receive 2 parameters: a string delimiter and the number of elements to be processed. [#65451](https://github.com/ClickHouse/ClickHouse/pull/65451) ([Yarik Briukhovetskyi](https://github.com/yariks5s)).
|
||||||
|
* Add AzureQueue storage. [#65458](https://github.com/ClickHouse/ClickHouse/pull/65458) ([Kseniia Sumarokova](https://github.com/kssenii)).
|
||||||
|
* Add a new setting to disable/enable writing page index into parquet files. [#65475](https://github.com/ClickHouse/ClickHouse/pull/65475) ([lgbo](https://github.com/lgbo-ustc)).
|
||||||
|
* Introduce `logger.console_log_level` server config to control the log level to the console (if enabled). [#65559](https://github.com/ClickHouse/ClickHouse/pull/65559) ([Azat Khuzhin](https://github.com/azat)).
|
||||||
|
* Automatically append a wildcard `*` to the end of a directory path with table function `file`. [#66019](https://github.com/ClickHouse/ClickHouse/pull/66019) ([Zhidong (David) Guo](https://github.com/Gun9niR)).
|
||||||
|
* Add `--memory-usage` option to client in non interactive mode. [#66393](https://github.com/ClickHouse/ClickHouse/pull/66393) ([vdimir](https://github.com/vdimir)).
|
||||||
|
* Make an interactive client for clickhouse-disks, add local disk from the local directory. [#64446](https://github.com/ClickHouse/ClickHouse/pull/64446) ([Daniil Ivanik](https://github.com/divanik)).
|
||||||
|
* When lightweight delete happens on a table with projection(s), users have choices either throw an exception (by default) or drop the projection [#65594](https://github.com/ClickHouse/ClickHouse/pull/65594) ([jsc0218](https://github.com/jsc0218)).
|
||||||
|
|
||||||
|
#### Experimental Feature
|
||||||
|
* Change binary serialization of Variant data type: add `compact` mode to avoid writing the same discriminator multiple times for granules with single variant or with only NULL values. Add MergeTree setting `use_compact_variant_discriminators_serialization` that is enabled by default. Note that Variant type is still experimental and backward-incompatible change in serialization is ok. [#62774](https://github.com/ClickHouse/ClickHouse/pull/62774) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||||
|
* Support rocksdb as backend storage of keeper. [#56626](https://github.com/ClickHouse/ClickHouse/pull/56626) ([Han Fei](https://github.com/hanfei1991)).
|
||||||
|
* Refactor JSONExtract functions, support more types including experimental Dynamic type. [#66046](https://github.com/ClickHouse/ClickHouse/pull/66046) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||||
|
* Support null map subcolumn for Variant and Dynamic subcolumns. [#66178](https://github.com/ClickHouse/ClickHouse/pull/66178) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||||
|
* Fix reading dynamic subcolumns from altered Memory table. Previously if `max_types` parameter of a Dynamic type was changed in Memory table via alter, further subcolumns reading can return wrong result. [#66066](https://github.com/ClickHouse/ClickHouse/pull/66066) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||||
|
* Add support for `cluster_for_parallel_replicas` when using custom key parallel replicas. It allows you to use parallel replicas with custom key with MergeTree tables. [#65453](https://github.com/ClickHouse/ClickHouse/pull/65453) ([Antonio Andelic](https://github.com/antonio2368)).
|
||||||
|
|
||||||
|
#### Performance Improvement
|
||||||
|
* Enable `optimize_functions_to_subcolumns` by default. [#58661](https://github.com/ClickHouse/ClickHouse/pull/58661) ([Anton Popov](https://github.com/CurtizJ)).
|
||||||
|
* Replace int to string algorithm with a faster one (from a modified amdn/itoa to a modified jeaiii/itoa). [#61661](https://github.com/ClickHouse/ClickHouse/pull/61661) ([Raúl Marín](https://github.com/Algunenano)).
|
||||||
|
* Sizes of hash tables created by join (`parallel_hash` algorithm) is collected and cached now. This information will be used to preallocate space in hash tables for subsequent query executions and save time on hash table resizes. [#64553](https://github.com/ClickHouse/ClickHouse/pull/64553) ([Nikita Taranov](https://github.com/nickitat)).
|
||||||
|
* Optimized queries with `ORDER BY` primary key and `WHERE` that have a condition with high selectivity by using of buffering. It is controlled by setting `read_in_order_use_buffering` (enabled by default) and can increase memory usage of query. [#64607](https://github.com/ClickHouse/ClickHouse/pull/64607) ([Anton Popov](https://github.com/CurtizJ)).
|
||||||
|
* Improve performance of loading `plain_rewritable` metadata. [#65634](https://github.com/ClickHouse/ClickHouse/pull/65634) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||||
|
* Attaching tables on read-only disks will use fewer resources by not loading outdated parts. [#65635](https://github.com/ClickHouse/ClickHouse/pull/65635) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||||
|
* Support minmax hyperrectangle for Set indices. [#65676](https://github.com/ClickHouse/ClickHouse/pull/65676) ([AntiTopQuark](https://github.com/AntiTopQuark)).
|
||||||
|
* Unload primary index of outdated parts to reduce total memory usage. [#65852](https://github.com/ClickHouse/ClickHouse/pull/65852) ([Anton Popov](https://github.com/CurtizJ)).
|
||||||
|
* Functions `replaceRegexpAll` and `replaceRegexpOne` are now significantly faster if the pattern is trivial, i.e. contains no metacharacters, pattern classes, flags, grouping characters etc. (Thanks to Taiyang Li). [#66185](https://github.com/ClickHouse/ClickHouse/pull/66185) ([Robert Schulze](https://github.com/rschu1ze)).
|
||||||
|
* s3 requests: Reduce retry time for queries, increase retries count for backups. 8.5 minutes and 100 retires for queries, 1.2 hours and 1000 retries for backup restore. [#65232](https://github.com/ClickHouse/ClickHouse/pull/65232) ([Sema Checherinda](https://github.com/CheSema)).
|
||||||
|
* Support query plan LIMIT optimization. Support LIMIT pushdown for PostgreSQL storage and table function. [#65454](https://github.com/ClickHouse/ClickHouse/pull/65454) ([Maksim Kita](https://github.com/kitaisreal)).
|
||||||
|
* Improved ZooKeeper load balancing. The current session doesn't expire until the optimal nodes become available despite `fallback_session_lifetime`. Added support for AZ-aware balancing. [#65570](https://github.com/ClickHouse/ClickHouse/pull/65570) ([Alexander Tokmakov](https://github.com/tavplubix)).
|
||||||
|
* DatabaseCatalog drops tables faster by using up to database_catalog_drop_table_concurrency threads. [#66065](https://github.com/ClickHouse/ClickHouse/pull/66065) ([Sema Checherinda](https://github.com/CheSema)).
|
||||||
|
|
||||||
|
#### Improvement
|
||||||
|
* The setting `optimize_trivial_insert_select` is disabled by default. In most cases, it should be beneficial. Nevertheless, if you are seeing slower INSERT SELECT or increased memory usage, you can enable it back or `SET compatibility = '24.6'`. [#58970](https://github.com/ClickHouse/ClickHouse/pull/58970) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||||
|
* Print stacktrace and diagnostic info if `clickhouse-client` or `clickhouse-local` crashes. [#61109](https://github.com/ClickHouse/ClickHouse/pull/61109) ([Alexander Tokmakov](https://github.com/tavplubix)).
|
||||||
|
* The result of `SHOW INDEX | INDEXES | INDICES | KEYS` was previously sorted by the primary key column names. Since this was unintuitive, the result is now sorted by the position of the primary key columns within the primary key. [#61131](https://github.com/ClickHouse/ClickHouse/pull/61131) ([Robert Schulze](https://github.com/rschu1ze)).
|
||||||
|
* Change how deduplication for Materialized Views works. Fixed a lot of cases like: - on destination table: data is split for 2 or more blocks and that blocks is considered as duplicate when that block is inserted in parallel. - on MV destination table: the equal blocks are deduplicated, that happens when MV often produces equal data as a result for different input data due to performing aggregation. - on MV destination table: the equal blocks which comes from different MV are deduplicated. [#61601](https://github.com/ClickHouse/ClickHouse/pull/61601) ([Sema Checherinda](https://github.com/CheSema)).
|
||||||
|
* Allow matching column names in a case insensitive manner when reading json files (`input_format_json_case_insensitive_column_matching`). [#61750](https://github.com/ClickHouse/ClickHouse/pull/61750) ([kevinyhzou](https://github.com/KevinyhZou)).
|
||||||
|
* Support reading partitioned data DeltaLake data. Infer DeltaLake schema by reading metadata instead of data. [#63201](https://github.com/ClickHouse/ClickHouse/pull/63201) ([Kseniia Sumarokova](https://github.com/kssenii)).
|
||||||
|
* In composable protocols TLS layer accepted only `certificateFile` and `privateKeyFile` parameters. https://clickhouse.com/docs/en/operations/settings/composable-protocols. [#63985](https://github.com/ClickHouse/ClickHouse/pull/63985) ([Anton Ivashkin](https://github.com/ianton-ru)).
|
||||||
|
* Added profile event `SelectQueriesWithPrimaryKeyUsage` which indicates how many SELECT queries use the primary key to evaluate the WHERE clause. [#64492](https://github.com/ClickHouse/ClickHouse/pull/64492) ([0x01f](https://github.com/0xfei)).
|
||||||
|
* `StorageS3Queue` related fixes and improvements. Deduce a default value of `s3queue_processing_threads_num` according to the number of physical cpu cores on the server (instead of the previous default value as 1). Set default value of `s3queue_loading_retries` to 10. Fix possible vague "Uncaught exception" in exception column of `system.s3queue`. Do not increment retry count on `MEMORY_LIMIT_EXCEEDED` exception. Move files commit to a stage after insertion into table fully finished to avoid files being commited while not inserted. Add settings `s3queue_max_processed_files_before_commit`, `s3queue_max_processed_rows_before_commit`, `s3queue_max_processed_bytes_before_commit`, `s3queue_max_processing_time_sec_before_commit`, to better control commit and flush time. [#65046](https://github.com/ClickHouse/ClickHouse/pull/65046) ([Kseniia Sumarokova](https://github.com/kssenii)).
|
||||||
|
* Support aliases in parametrized view function (only new analyzer). [#65190](https://github.com/ClickHouse/ClickHouse/pull/65190) ([Kseniia Sumarokova](https://github.com/kssenii)).
|
||||||
|
* Updated to mask account key in logs in azureBlobStorage. [#65273](https://github.com/ClickHouse/ClickHouse/pull/65273) ([SmitaRKulkarni](https://github.com/SmitaRKulkarni)).
|
||||||
|
* Partition pruning for `IN` predicates when filter expression is a part of `PARTITION BY` expression. [#65335](https://github.com/ClickHouse/ClickHouse/pull/65335) ([Eduard Karacharov](https://github.com/korowa)).
|
||||||
|
* Add system tables with main information about all detached tables. [#65400](https://github.com/ClickHouse/ClickHouse/pull/65400) ([Konstantin Morozov](https://github.com/k-morozov)).
|
||||||
|
* `arrayMin`/`arrayMax` can be applicable to all data types that are comparable. [#65455](https://github.com/ClickHouse/ClickHouse/pull/65455) ([pn](https://github.com/chloro-pn)).
|
||||||
|
* Improved memory accounting for cgroups v2 to exclude the amount occupied by the page cache. [#65470](https://github.com/ClickHouse/ClickHouse/pull/65470) ([Nikita Taranov](https://github.com/nickitat)).
|
||||||
|
* Do not create format settings for each row when serializing chunks to insert to EmbeddedRocksDB table. [#65474](https://github.com/ClickHouse/ClickHouse/pull/65474) ([Duc Canh Le](https://github.com/canhld94)).
|
||||||
|
* Reduce `clickhouse-local` prompt to just `:)`. `getFQDNOrHostName()` takes too long on macOS, and we don't want a hostname in the prompt for `clickhouse-local` anyway. [#65510](https://github.com/ClickHouse/ClickHouse/pull/65510) ([Konstantin Bogdanov](https://github.com/thevar1able)).
|
||||||
|
* Avoid printing a message from jemalloc about per-CPU arenas on low-end virtual machines. [#65532](https://github.com/ClickHouse/ClickHouse/pull/65532) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||||
|
* Disable filesystem cache background download by default. It will be enabled back when we fix the issue with possible "Memory limit exceeded" because memory deallocation is done outside of query context (while buffer is allocated inside of query context) if we use background download threads. Plus we need to add a separate setting to define max size to download for background workers (currently it is limited by max_file_segment_size, which might be too big). [#65534](https://github.com/ClickHouse/ClickHouse/pull/65534) ([Kseniia Sumarokova](https://github.com/kssenii)).
|
||||||
|
* Add new option to config `<config_reload_interval_ms>` which allow to specify how often clickhouse will reload config. [#65545](https://github.com/ClickHouse/ClickHouse/pull/65545) ([alesapin](https://github.com/alesapin)).
|
||||||
|
* Implement binary encoding for ClickHouse data types and add its specification in docs. Use it in Dynamic binary serialization, allow to use it in RowBinaryWithNamesAndTypes and Native formats under settings. [#65546](https://github.com/ClickHouse/ClickHouse/pull/65546) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||||
|
* Improved ZooKeeper load balancing. The current session doesn't expire until the optimal nodes become available despite `fallback_session_lifetime`. Added support for AZ-aware balancing. [#65570](https://github.com/ClickHouse/ClickHouse/pull/65570) ([Alexander Tokmakov](https://github.com/tavplubix)).
|
||||||
|
* Server settings `compiled_expression_cache_size` and `compiled_expression_cache_elements_size` are now shown in `system.server_settings`. [#65584](https://github.com/ClickHouse/ClickHouse/pull/65584) ([Robert Schulze](https://github.com/rschu1ze)).
|
||||||
|
* Add support for user identification based on x509 SubjectAltName extension. [#65626](https://github.com/ClickHouse/ClickHouse/pull/65626) ([Anton Kozlov](https://github.com/tonickkozlov)).
|
||||||
|
* `clickhouse-local` will respect the `max_server_memory_usage` and `max_server_memory_usage_to_ram_ratio` from the configuration file. It will also set the max memory usage to 90% of the system memory by default, like `clickhouse-server` does. [#65697](https://github.com/ClickHouse/ClickHouse/pull/65697) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||||
|
* Add a script to backup your files to ClickHouse. [#65699](https://github.com/ClickHouse/ClickHouse/pull/65699) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||||
|
* PostgreSQL source support cancel. [#65722](https://github.com/ClickHouse/ClickHouse/pull/65722) ([Maksim Kita](https://github.com/kitaisreal)).
|
||||||
|
* Make allow_experimental_analyzer be controlled by the initiator for distributed queries. This ensures compatibility and correctness during operations in mixed version clusters. [#65777](https://github.com/ClickHouse/ClickHouse/pull/65777) ([Nikita Mikhaylov](https://github.com/nikitamikhaylov)).
|
||||||
|
* Respect cgroup CPU limit in Keeper. [#65819](https://github.com/ClickHouse/ClickHouse/pull/65819) ([Antonio Andelic](https://github.com/antonio2368)).
|
||||||
|
* Allow to use `concat` function with empty arguments ``` sql :) select concat();. [#65887](https://github.com/ClickHouse/ClickHouse/pull/65887) ([李扬](https://github.com/taiyang-li)).
|
||||||
|
* Allow controlling named collections in clickhouse-local. [#65973](https://github.com/ClickHouse/ClickHouse/pull/65973) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||||
|
* Improve Azure profile events. [#65999](https://github.com/ClickHouse/ClickHouse/pull/65999) ([alesapin](https://github.com/alesapin)).
|
||||||
|
* Support ORC file read by writer time zone. [#66025](https://github.com/ClickHouse/ClickHouse/pull/66025) ([kevinyhzou](https://github.com/KevinyhZou)).
|
||||||
|
* Add settings to control connection to the PostgreSQL. * Setting `postgresql_connection_attempt_timeout` specifies the value passed to `connect_timeout` parameter of connection URL. * Setting `postgresql_connection_pool_retries` specifies the number of retries to establish a connection to the PostgreSQL end-point. [#66232](https://github.com/ClickHouse/ClickHouse/pull/66232) ([Dmitry Novik](https://github.com/novikd)).
|
||||||
|
* Reduce inaccuracy of input_wait_elapsed_us/input_wait_elapsed_us/elapsed_us. [#66239](https://github.com/ClickHouse/ClickHouse/pull/66239) ([Azat Khuzhin](https://github.com/azat)).
|
||||||
|
* Improve FilesystemCache ProfileEvents. [#66249](https://github.com/ClickHouse/ClickHouse/pull/66249) ([zhukai](https://github.com/nauu)).
|
||||||
|
* Add settings to ignore ON CLUSTER clause in queries for named collection management with replicated storage. [#66288](https://github.com/ClickHouse/ClickHouse/pull/66288) ([MikhailBurdukov](https://github.com/MikhailBurdukov)).
|
||||||
|
* Function `generateSnowflakeID` now allows to specify a machine ID as a parameter to prevent collisions in large clusters. [#66374](https://github.com/ClickHouse/ClickHouse/pull/66374) ([ZAWA_ll](https://github.com/Zawa-ll)).
|
||||||
|
* Disable suspending on Ctrl+Z in interactive mode. This is a common trap and is not expected behavior for almost all users. I imagine only a few extreme power users could appreciate suspending terminal applications to the background, but I don't know any. [#66511](https://github.com/ClickHouse/ClickHouse/pull/66511) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||||
|
* Add option for validating the Primary key type in Dictionaries. Without this option for simple layouts any column type will be implicitly converted to UInt64. ### Documentation entry for user-facing changes. [#66595](https://github.com/ClickHouse/ClickHouse/pull/66595) ([MikhailBurdukov](https://github.com/MikhailBurdukov)).
|
||||||
|
|
||||||
|
#### Bug Fix (user-visible misbehavior in an official stable release)
|
||||||
|
* Fix unexpected size of low cardinality column in function calls. [#65298](https://github.com/ClickHouse/ClickHouse/pull/65298) ([Raúl Marín](https://github.com/Algunenano)).
|
||||||
|
* Check cyclic dependencies on CREATE/REPLACE/RENAME/EXCHANGE queries and throw an exception if there is a cyclic dependency. Previously such cyclic dependencies could lead to a deadlock during server startup. Also fix some bugs in dependencies creation. [#65405](https://github.com/ClickHouse/ClickHouse/pull/65405) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||||
|
* Fix crash in maxIntersections. [#65689](https://github.com/ClickHouse/ClickHouse/pull/65689) ([Raúl Marín](https://github.com/Algunenano)).
|
||||||
|
* Fix the VALID UNTIL clause in the user definition resetting after a restart. [#66409](https://github.com/ClickHouse/ClickHouse/pull/66409) ([Nikolay Degterinsky](https://github.com/evillique)).
|
||||||
|
* Fix SHOW MERGES remaining time. [#66735](https://github.com/ClickHouse/ClickHouse/pull/66735) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||||
|
* `Query was cancelled` might have been printed twice in clickhouse-client. This behaviour is fixed. [#66005](https://github.com/ClickHouse/ClickHouse/pull/66005) ([Nikita Mikhaylov](https://github.com/nikitamikhaylov)).
|
||||||
|
* Fixed crash while using MaterializedMySQL with TABLE OVERRIDE that maps MySQL NULL field into ClickHouse not NULL field. [#54649](https://github.com/ClickHouse/ClickHouse/pull/54649) ([Filipp Ozinov](https://github.com/bakwc)).
|
||||||
|
* Fix logical error when PREWHERE expression read no columns and table has no adaptive index granularity (very old table). [#59173](https://github.com/ClickHouse/ClickHouse/pull/59173) ([Alexander Gololobov](https://github.com/davenger)).
|
||||||
|
* Fix bug with cancellation buffer when canceling a query. [#64478](https://github.com/ClickHouse/ClickHouse/pull/64478) ([Sema Checherinda](https://github.com/CheSema)).
|
||||||
|
* Fix filling parts columns from metadata (when columns.txt does not exists). [#64757](https://github.com/ClickHouse/ClickHouse/pull/64757) ([Azat Khuzhin](https://github.com/azat)).
|
||||||
|
* Fix crash for `ALTER TABLE ... ON CLUSTER ... MODIFY SQL SECURITY`. [#64957](https://github.com/ClickHouse/ClickHouse/pull/64957) ([pufit](https://github.com/pufit)).
|
||||||
|
* Fix crash on destroying AccessControl: add explicit shutdown. [#64993](https://github.com/ClickHouse/ClickHouse/pull/64993) ([Vitaly Baranov](https://github.com/vitlibar)).
|
||||||
|
* Eliminate injective function in argument of functions `uniq*` recursively. This used to work correctly but was broken in the new analyzer. [#65140](https://github.com/ClickHouse/ClickHouse/pull/65140) ([Duc Canh Le](https://github.com/canhld94)).
|
||||||
|
* Fix unexpected projection name when query with CTE. [#65267](https://github.com/ClickHouse/ClickHouse/pull/65267) ([wudidapaopao](https://github.com/wudidapaopao)).
|
||||||
|
* Require `dictGet` privilege when accessing dictionaries via direct query or the `Dictionary` table engine. [#65359](https://github.com/ClickHouse/ClickHouse/pull/65359) ([Joe Lynch](https://github.com/joelynch)).
|
||||||
|
* Fix user-specific S3 auth with incremental backups. [#65481](https://github.com/ClickHouse/ClickHouse/pull/65481) ([Antonio Andelic](https://github.com/antonio2368)).
|
||||||
|
* Disable `non-intersecting-parts` optimization for queries with `FINAL` in case of `read-in-order` optimization was enabled. This could lead to an incorrect query result. As a workaround, disable `do_not_merge_across_partitions_select_final` and `split_parts_ranges_into_intersecting_and_non_intersecting_final` before this fix is merged. [#65505](https://github.com/ClickHouse/ClickHouse/pull/65505) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
|
||||||
|
* Fix getting exception `Index out of bound for blob metadata` in case all files from list batch were filtered out. [#65523](https://github.com/ClickHouse/ClickHouse/pull/65523) ([Kseniia Sumarokova](https://github.com/kssenii)).
|
||||||
|
* Fix NOT_FOUND_COLUMN_IN_BLOCK for deduplicate merge of projection. [#65573](https://github.com/ClickHouse/ClickHouse/pull/65573) ([Yakov Olkhovskiy](https://github.com/yakov-olkhovskiy)).
|
||||||
|
* Fixed bug in MergeJoin. Column in sparse serialisation might be treated as a column of its nested type though the required conversion wasn't performed. [#65632](https://github.com/ClickHouse/ClickHouse/pull/65632) ([Nikita Taranov](https://github.com/nickitat)).
|
||||||
|
* Fixed a bug that compatibility level '23.4' was not properly applied. [#65737](https://github.com/ClickHouse/ClickHouse/pull/65737) ([cw5121](https://github.com/cw5121)).
|
||||||
|
* Fix odbc table with nullable fields. [#65738](https://github.com/ClickHouse/ClickHouse/pull/65738) ([Rodolphe Dugé de Bernonville](https://github.com/RodolpheDuge)).
|
||||||
|
* Fix data race in `TCPHandler`, which could happen on fatal error. [#65744](https://github.com/ClickHouse/ClickHouse/pull/65744) ([Kseniia Sumarokova](https://github.com/kssenii)).
|
||||||
|
* Fix invalid exceptions in function `parseDateTime` with `%F` and `%D` placeholders. [#65768](https://github.com/ClickHouse/ClickHouse/pull/65768) ([Antonio Andelic](https://github.com/antonio2368)).
|
||||||
|
* For queries that read from `PostgreSQL`, cancel the internal `PostgreSQL` query if the ClickHouse query is finished. Otherwise, `ClickHouse` query cannot be canceled until the internal `PostgreSQL` query is finished. [#65771](https://github.com/ClickHouse/ClickHouse/pull/65771) ([Maksim Kita](https://github.com/kitaisreal)).
|
||||||
|
* Fix a bug in short circuit logic when old analyzer and dictGetOrDefault is used. [#65802](https://github.com/ClickHouse/ClickHouse/pull/65802) ([jsc0218](https://github.com/jsc0218)).
|
||||||
|
* Fix a bug leads to EmbeddedRocksDB with TTL write corrupted SST files. [#65816](https://github.com/ClickHouse/ClickHouse/pull/65816) ([Duc Canh Le](https://github.com/canhld94)).
|
||||||
|
* Functions `bitTest`, `bitTestAll`, and `bitTestAny` now return an error if the specified bit index is out-of-bounds [#65818](https://github.com/ClickHouse/ClickHouse/pull/65818) ([Pablo Marcos](https://github.com/pamarcos)).
|
||||||
|
* Setting `join_any_take_last_row` is supported in any query with hash join. [#65820](https://github.com/ClickHouse/ClickHouse/pull/65820) ([vdimir](https://github.com/vdimir)).
|
||||||
|
* Better handling of join conditions involving `IS NULL` checks (for example `ON (a = b AND (a IS NOT NULL) AND (b IS NOT NULL) ) OR ( (a IS NULL) AND (b IS NULL) )` is rewritten to `ON a <=> b`), fix incorrect optimization when condition other then `IS NULL` are present. [#65835](https://github.com/ClickHouse/ClickHouse/pull/65835) ([vdimir](https://github.com/vdimir)).
|
||||||
|
* Functions `bitShiftLeft` and `bitShitfRight` return an error for out of bounds shift positions [#65838](https://github.com/ClickHouse/ClickHouse/pull/65838) ([Pablo Marcos](https://github.com/pamarcos)).
|
||||||
|
* Fix growing memory usage in S3Queue. [#65839](https://github.com/ClickHouse/ClickHouse/pull/65839) ([Kseniia Sumarokova](https://github.com/kssenii)).
|
||||||
|
* Fix tie handling in `arrayAUC` to match sklearn. [#65840](https://github.com/ClickHouse/ClickHouse/pull/65840) ([gabrielmcg44](https://github.com/gabrielmcg44)).
|
||||||
|
* Fix possible issues with MySQL server protocol TLS connections. [#65917](https://github.com/ClickHouse/ClickHouse/pull/65917) ([Azat Khuzhin](https://github.com/azat)).
|
||||||
|
* Fix possible issues with MySQL client protocol TLS connections. [#65938](https://github.com/ClickHouse/ClickHouse/pull/65938) ([Azat Khuzhin](https://github.com/azat)).
|
||||||
|
* Fix handling of `SSL_ERROR_WANT_READ`/`SSL_ERROR_WANT_WRITE` with zero timeout. [#65941](https://github.com/ClickHouse/ClickHouse/pull/65941) ([Azat Khuzhin](https://github.com/azat)).
|
||||||
|
* Add missing settings `input_format_csv_skip_first_lines/input_format_tsv_skip_first_lines/input_format_csv_try_infer_numbers_from_strings/input_format_csv_try_infer_strings_from_quoted_tuples` in schema inference cache because they can change the resulting schema. It prevents from incorrect result of schema inference with these settings changed. [#65980](https://github.com/ClickHouse/ClickHouse/pull/65980) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||||
|
* Column _size in s3 engine and s3 table function denotes the size of a file inside the archive, not a size of the archive itself. [#65993](https://github.com/ClickHouse/ClickHouse/pull/65993) ([Daniil Ivanik](https://github.com/divanik)).
|
||||||
|
* Fix resolving dynamic subcolumns in analyzer, avoid reading the whole column on dynamic subcolumn reading. [#66004](https://github.com/ClickHouse/ClickHouse/pull/66004) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||||
|
* Fix config merging for from_env with replace overrides. [#66034](https://github.com/ClickHouse/ClickHouse/pull/66034) ([Azat Khuzhin](https://github.com/azat)).
|
||||||
|
* Fix a possible hanging in `GRPCServer` during shutdown. [#66061](https://github.com/ClickHouse/ClickHouse/pull/66061) ([Vitaly Baranov](https://github.com/vitlibar)).
|
||||||
|
* Fixed several cases in function `has` with non-constant `LowCardinality` arguments. [#66088](https://github.com/ClickHouse/ClickHouse/pull/66088) ([Anton Popov](https://github.com/CurtizJ)).
|
||||||
|
* Fix for `groupArrayIntersect`. It had incorrect behavior in the `merge()` function. Also, fixed behavior in `deserialise()` for numeric and general data. [#66103](https://github.com/ClickHouse/ClickHouse/pull/66103) ([Yarik Briukhovetskyi](https://github.com/yariks5s)).
|
||||||
|
* Fixed buffer overflow bug in `unbin`/`unhex` implementation. [#66106](https://github.com/ClickHouse/ClickHouse/pull/66106) ([Nikita Taranov](https://github.com/nickitat)).
|
||||||
|
* Disable the `merge-filters` optimization introduced in [#64760](https://github.com/ClickHouse/ClickHouse/issues/64760). It may cause an exception if optimization merges two filter expressions and does not apply a short-circuit evaluation. [#66126](https://github.com/ClickHouse/ClickHouse/pull/66126) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
|
||||||
|
* Fixed the issue when the server failed to parse Avro files with negative block size arrays encoded, which is now allowed by the Avro specification. [#66130](https://github.com/ClickHouse/ClickHouse/pull/66130) ([Serge Klochkov](https://github.com/slvrtrn)).
|
||||||
|
* Fixed a bug in ZooKeeper client: a session could get stuck in unusable state after receiving a hardware error from ZooKeeper. For example, this might happen due to "soft memory limit" in ClickHouse Keeper. [#66140](https://github.com/ClickHouse/ClickHouse/pull/66140) ([Alexander Tokmakov](https://github.com/tavplubix)).
|
||||||
|
* Fix issue in SumIfToCountIfVisitor and signed integers. [#66146](https://github.com/ClickHouse/ClickHouse/pull/66146) ([Raúl Marín](https://github.com/Algunenano)).
|
||||||
|
* Fix rare case with missing data in the result of distributed query. [#66174](https://github.com/ClickHouse/ClickHouse/pull/66174) ([vdimir](https://github.com/vdimir)).
|
||||||
|
* Fix order of parsing metadata fields in StorageDeltaLake. [#66211](https://github.com/ClickHouse/ClickHouse/pull/66211) ([Kseniia Sumarokova](https://github.com/kssenii)).
|
||||||
|
* Don't throw `TIMEOUT_EXCEEDED` for `none_only_active` mode of `distributed_ddl_output_mode`. [#66218](https://github.com/ClickHouse/ClickHouse/pull/66218) ([Alexander Tokmakov](https://github.com/tavplubix)).
|
||||||
|
* Fix handling limit for `system.numbers_mt` when no index can be used. [#66231](https://github.com/ClickHouse/ClickHouse/pull/66231) ([János Benjamin Antal](https://github.com/antaljanosbenjamin)).
|
||||||
|
* Fixed how the ClickHouse server detects the maximum number of usable CPU cores as specified by cgroups v2 if the server runs in a container such as Docker. In more detail, containers often run their process in the root cgroup which has an empty name. In that case, ClickHouse ignored the CPU limits set by cgroups v2. [#66237](https://github.com/ClickHouse/ClickHouse/pull/66237) ([filimonov](https://github.com/filimonov)).
|
||||||
|
* Fix the `Not-ready set` error when a subquery with `IN` is used in the constraint. [#66261](https://github.com/ClickHouse/ClickHouse/pull/66261) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
|
||||||
|
* Fix error reporting while copying to S3 or AzureBlobStorage. [#66295](https://github.com/ClickHouse/ClickHouse/pull/66295) ([Vitaly Baranov](https://github.com/vitlibar)).
|
||||||
|
* Prevent watchdog from keeping descriptors of unlinked(rotated) log files. [#66334](https://github.com/ClickHouse/ClickHouse/pull/66334) ([Aleksei Filatov](https://github.com/aalexfvk)).
|
||||||
|
* Fix the bug that logicalexpressionoptimizerpass lost logical type of constant. [#66344](https://github.com/ClickHouse/ClickHouse/pull/66344) ([pn](https://github.com/chloro-pn)).
|
||||||
|
* Fix `Column identifier is already registered` error with `group_by_use_nulls=true` and new analyzer. [#66400](https://github.com/ClickHouse/ClickHouse/pull/66400) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
|
||||||
|
* Fix possible incorrect result for queries joining and filtering table external engine (like PostgreSQL), due to too aggressive filter pushdown. Since now, conditions from where section won't be send to external database in case of outer join with external table. [#66402](https://github.com/ClickHouse/ClickHouse/pull/66402) ([vdimir](https://github.com/vdimir)).
|
||||||
|
* Added missing column materialization for cross join. [#66413](https://github.com/ClickHouse/ClickHouse/pull/66413) ([lgbo](https://github.com/lgbo-ustc)).
|
||||||
|
* Fix `Cannot find column` error for queries with constant expression in `GROUP BY` key and new analyzer enabled. [#66433](https://github.com/ClickHouse/ClickHouse/pull/66433) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
|
||||||
|
* Avoid possible logical error during import from Npy format in case of bad array nesting level, fix testing of other kinds of errors. [#66461](https://github.com/ClickHouse/ClickHouse/pull/66461) ([Yarik Briukhovetskyi](https://github.com/yariks5s)).
|
||||||
|
* Fix wrong count() result when there is non-deterministic function in predicate. [#66510](https://github.com/ClickHouse/ClickHouse/pull/66510) ([Duc Canh Le](https://github.com/canhld94)).
|
||||||
|
* Correctly track memory for `Allocator::realloc`. [#66548](https://github.com/ClickHouse/ClickHouse/pull/66548) ([Antonio Andelic](https://github.com/antonio2368)).
|
||||||
|
* Fix reading of uninitialized memory when hashing empty tuples. [#66562](https://github.com/ClickHouse/ClickHouse/pull/66562) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||||
|
* Fix an invalid result for queries with `WINDOW`. This could happen when `PARTITION` columns have sparse serialization and window functions are executed in parallel. [#66579](https://github.com/ClickHouse/ClickHouse/pull/66579) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
|
||||||
|
* Fix removing named collections in local storage. [#66599](https://github.com/ClickHouse/ClickHouse/pull/66599) ([János Benjamin Antal](https://github.com/antaljanosbenjamin)).
|
||||||
|
* Fix `column_length` is not updated in `ColumnTuple::insertManyFrom`. [#66626](https://github.com/ClickHouse/ClickHouse/pull/66626) ([lgbo](https://github.com/lgbo-ustc)).
|
||||||
|
* Fix `Unknown identifier` and `Column is not under aggregate function` errors for queries with the expression `(column IS NULL).` The bug was triggered by [#65088](https://github.com/ClickHouse/ClickHouse/issues/65088), with the disabled analyzer only. [#66654](https://github.com/ClickHouse/ClickHouse/pull/66654) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
|
||||||
|
* Fix `Method getResultType is not supported for QUERY query node` error when scalar subquery was used as the first argument of IN (with new analyzer). [#66655](https://github.com/ClickHouse/ClickHouse/pull/66655) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
|
||||||
|
* Fix possible PARAMETER_OUT_OF_BOUND error during reading variant subcolumn. [#66659](https://github.com/ClickHouse/ClickHouse/pull/66659) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||||
|
* Fix rare case of stuck merge after drop column. [#66707](https://github.com/ClickHouse/ClickHouse/pull/66707) ([Raúl Marín](https://github.com/Algunenano)).
|
||||||
|
* Fix assertion `isUniqTypes` when insert select from remote sources. [#66722](https://github.com/ClickHouse/ClickHouse/pull/66722) ([Sema Checherinda](https://github.com/CheSema)).
|
||||||
|
* Fix logical error in PrometheusRequestHandler. [#66621](https://github.com/ClickHouse/ClickHouse/pull/66621) ([Vitaly Baranov](https://github.com/vitlibar)).
|
||||||
|
* Fix `indexHint` function case found by fuzzer. [#66286](https://github.com/ClickHouse/ClickHouse/pull/66286) ([Anton Popov](https://github.com/CurtizJ)).
|
||||||
|
* Fix AST formatting of 'create table b empty as a'. [#64951](https://github.com/ClickHouse/ClickHouse/pull/64951) ([Michael Kolupaev](https://github.com/al13n321)).
|
||||||
|
|
||||||
|
#### Build/Testing/Packaging Improvement
|
||||||
|
* Instantiate template methods ahead in different .cpp files, avoid too large translation units during compiling. [#64818](https://github.com/ClickHouse/ClickHouse/pull/64818) ([lgbo](https://github.com/lgbo-ustc)).
|
||||||
|
|
||||||
|
### <a id="246"></a> ClickHouse release 24.6, 2024-07-01
|
||||||
|
|
||||||
#### Backward Incompatible Change
|
#### Backward Incompatible Change
|
||||||
* Enable asynchronous load of databases and tables by default. See the `async_load_databases` in config.xml. While this change is fully compatible, it can introduce a difference in behavior. When `async_load_databases` is false, as in the previous versions, the server will not accept connections until all tables are loaded. When `async_load_databases` is true, as in the new version, the server can accept connections before all the tables are loaded. If a query is made to a table that is not yet loaded, it will wait for the table's loading, which can take considerable time. It can change the behavior of the server if it is part of a large distributed system under a load balancer. In the first case, the load balancer can get a connection refusal and quickly failover to another server. In the second case, the load balancer can connect to a server that is still loading the tables, and the query will have a higher latency. Moreover, if many queries accumulate in the waiting state, it can lead to a "thundering herd" problem when they start processing simultaneously. This can make a difference only for highly loaded distributed backends. You can set the value of `async_load_databases` to false to avoid this problem. [#57695](https://github.com/ClickHouse/ClickHouse/pull/57695) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
* Enable asynchronous load of databases and tables by default. See the `async_load_databases` in config.xml. While this change is fully compatible, it can introduce a difference in behavior. When `async_load_databases` is false, as in the previous versions, the server will not accept connections until all tables are loaded. When `async_load_databases` is true, as in the new version, the server can accept connections before all the tables are loaded. If a query is made to a table that is not yet loaded, it will wait for the table's loading, which can take considerable time. It can change the behavior of the server if it is part of a large distributed system under a load balancer. In the first case, the load balancer can get a connection refusal and quickly failover to another server. In the second case, the load balancer can connect to a server that is still loading the tables, and the query will have a higher latency. Moreover, if many queries accumulate in the waiting state, it can lead to a "thundering herd" problem when they start processing simultaneously. This can make a difference only for highly loaded distributed backends. You can set the value of `async_load_databases` to false to avoid this problem. [#57695](https://github.com/ClickHouse/ClickHouse/pull/57695) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||||
|
* Setting `replace_long_file_name_to_hash` is enabled by default for `MergeTree` tables. [#64457](https://github.com/ClickHouse/ClickHouse/pull/64457) ([Anton Popov](https://github.com/CurtizJ)). This setting is fully compatible, and no actions needed during upgrade. The new data format is supported from all versions starting from 23.9. After enabling this setting, you can no longer downgrade to a version 23.8 or older.
|
||||||
* Some invalid queries will fail earlier during parsing. Note: disabled the support for inline KQL expressions (the experimental Kusto language) when they are put into a `kql` table function without a string literal, e.g. `kql(garbage | trash)` instead of `kql('garbage | trash')` or `kql($$garbage | trash$$)`. This feature was introduced unintentionally and should not exist. [#61500](https://github.com/ClickHouse/ClickHouse/pull/61500) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
* Some invalid queries will fail earlier during parsing. Note: disabled the support for inline KQL expressions (the experimental Kusto language) when they are put into a `kql` table function without a string literal, e.g. `kql(garbage | trash)` instead of `kql('garbage | trash')` or `kql($$garbage | trash$$)`. This feature was introduced unintentionally and should not exist. [#61500](https://github.com/ClickHouse/ClickHouse/pull/61500) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||||
* Rework parallel processing in `Ordered` mode of storage `S3Queue`. This PR is backward incompatible for Ordered mode if you used settings `s3queue_processing_threads_num` or `s3queue_total_shards_num`. Setting `s3queue_total_shards_num` is deleted, previously it was allowed to use only under `s3queue_allow_experimental_sharded_mode`, which is now deprecated. A new setting is added - `s3queue_buckets`. [#64349](https://github.com/ClickHouse/ClickHouse/pull/64349) ([Kseniia Sumarokova](https://github.com/kssenii)).
|
* Rework parallel processing in `Ordered` mode of storage `S3Queue`. This PR is backward incompatible for Ordered mode if you used settings `s3queue_processing_threads_num` or `s3queue_total_shards_num`. Setting `s3queue_total_shards_num` is deleted, previously it was allowed to use only under `s3queue_allow_experimental_sharded_mode`, which is now deprecated. A new setting is added - `s3queue_buckets`. [#64349](https://github.com/ClickHouse/ClickHouse/pull/64349) ([Kseniia Sumarokova](https://github.com/kssenii)).
|
||||||
* New functions `snowflakeIDToDateTime`, `snowflakeIDToDateTime64`, `dateTimeToSnowflakeID`, and `dateTime64ToSnowflakeID` were added. Unlike the existing functions `snowflakeToDateTime`, `snowflakeToDateTime64`, `dateTimeToSnowflake`, and `dateTime64ToSnowflake`, the new functions are compatible with function `generateSnowflakeID`, i.e. they accept the snowflake IDs generated by `generateSnowflakeID` and produce snowflake IDs of the same type as `generateSnowflakeID` (i.e. `UInt64`). Furthermore, the new functions default to the UNIX epoch (aka. 1970-01-01), just like `generateSnowflakeID`. If necessary, a different epoch, e.g. Twitter's/X's epoch 2010-11-04 aka. 1288834974657 msec since UNIX epoch, can be passed. The old conversion functions are deprecated and will be removed after a transition period: to use them regardless, enable setting `allow_deprecated_snowflake_conversion_functions`. [#64948](https://github.com/ClickHouse/ClickHouse/pull/64948) ([Robert Schulze](https://github.com/rschu1ze)).
|
* New functions `snowflakeIDToDateTime`, `snowflakeIDToDateTime64`, `dateTimeToSnowflakeID`, and `dateTime64ToSnowflakeID` were added. Unlike the existing functions `snowflakeToDateTime`, `snowflakeToDateTime64`, `dateTimeToSnowflake`, and `dateTime64ToSnowflake`, the new functions are compatible with function `generateSnowflakeID`, i.e. they accept the snowflake IDs generated by `generateSnowflakeID` and produce snowflake IDs of the same type as `generateSnowflakeID` (i.e. `UInt64`). Furthermore, the new functions default to the UNIX epoch (aka. 1970-01-01), just like `generateSnowflakeID`. If necessary, a different epoch, e.g. Twitter's/X's epoch 2010-11-04 aka. 1288834974657 msec since UNIX epoch, can be passed. The old conversion functions are deprecated and will be removed after a transition period: to use them regardless, enable setting `allow_deprecated_snowflake_conversion_functions`. [#64948](https://github.com/ClickHouse/ClickHouse/pull/64948) ([Robert Schulze](https://github.com/rschu1ze)).
|
||||||
|
|
||||||
#### New Feature
|
#### New Feature
|
||||||
* Introduce statistics of type "number of distinct values". [#59357](https://github.com/ClickHouse/ClickHouse/pull/59357) ([Han Fei](https://github.com/hanfei1991)).
|
* Allow to store named collections in ClickHouse Keeper. [#64574](https://github.com/ClickHouse/ClickHouse/pull/64574) ([Kseniia Sumarokova](https://github.com/kssenii)).
|
||||||
|
* Support empty tuples. [#55061](https://github.com/ClickHouse/ClickHouse/pull/55061) ([Amos Bird](https://github.com/amosbird)).
|
||||||
* Add Hilbert Curve encode and decode functions. [#60156](https://github.com/ClickHouse/ClickHouse/pull/60156) ([Artem Mustafin](https://github.com/Artemmm91)).
|
* Add Hilbert Curve encode and decode functions. [#60156](https://github.com/ClickHouse/ClickHouse/pull/60156) ([Artem Mustafin](https://github.com/Artemmm91)).
|
||||||
* Added support for reading LINESTRING geometry in WKT format using function `readWKTLineString`. [#62519](https://github.com/ClickHouse/ClickHouse/pull/62519) ([Nikita Mikhaylov](https://github.com/nikitamikhaylov)).
|
|
||||||
* Allow to attach parts from a different disk. [#63087](https://github.com/ClickHouse/ClickHouse/pull/63087) ([Unalian](https://github.com/Unalian)).
|
|
||||||
* Allow proxy to be bypassed for hosts specified in `no_proxy` env variable and ClickHouse proxy configuration. [#63314](https://github.com/ClickHouse/ClickHouse/pull/63314) ([Arthur Passos](https://github.com/arthurpassos)).
|
|
||||||
* Added a new table function `loop` to support returning query results in an infinite loop. [#63452](https://github.com/ClickHouse/ClickHouse/pull/63452) ([Sariel](https://github.com/sarielwxm)).
|
|
||||||
* Added new SQL functions `generateSnowflakeID` for generating Twitter-style Snowflake IDs. [#63577](https://github.com/ClickHouse/ClickHouse/pull/63577) ([Danila Puzov](https://github.com/kazalika)).
|
|
||||||
* Add the ability to reshuffle rows during insert to optimize for size without violating the order set by `PRIMARY KEY`. It's controlled by the setting `optimize_row_order` (off by default). [#63578](https://github.com/ClickHouse/ClickHouse/pull/63578) ([Igor Markelov](https://github.com/ElderlyPassionFruit)).
|
|
||||||
* Added `merge_workload` and `mutation_workload` settings to regulate how resources are utilized and shared between merges, mutations and other workloads. [#64061](https://github.com/ClickHouse/ClickHouse/pull/64061) ([Sergei Trifonov](https://github.com/serxa)).
|
|
||||||
* Add support for comparing IPv4 and IPv6 types using the `=` operator. [#64292](https://github.com/ClickHouse/ClickHouse/pull/64292) ([Francisco J. Jurado Moreno](https://github.com/Beetelbrox)).
|
|
||||||
* Allow to store named collections in zookeeper. [#64574](https://github.com/ClickHouse/ClickHouse/pull/64574) ([Kseniia Sumarokova](https://github.com/kssenii)).
|
|
||||||
* Support decimal arguments in binary math functions (pow, atan2, max2, min2, hypot). [#64582](https://github.com/ClickHouse/ClickHouse/pull/64582) ([Mikhail Gorshkov](https://github.com/mgorshkov)).
|
|
||||||
* Add support for index analysis over `hilbertEncode`. [#64662](https://github.com/ClickHouse/ClickHouse/pull/64662) ([Artem Mustafin](https://github.com/Artemmm91)).
|
* Add support for index analysis over `hilbertEncode`. [#64662](https://github.com/ClickHouse/ClickHouse/pull/64662) ([Artem Mustafin](https://github.com/Artemmm91)).
|
||||||
|
* Added support for reading `LINESTRING` geometry in the WKT format using function `readWKTLineString`. [#62519](https://github.com/ClickHouse/ClickHouse/pull/62519) ([Nikita Mikhaylov](https://github.com/nikitamikhaylov)).
|
||||||
|
* Allow to attach parts from a different disk. [#63087](https://github.com/ClickHouse/ClickHouse/pull/63087) ([Unalian](https://github.com/Unalian)).
|
||||||
|
* Added new SQL functions `generateSnowflakeID` for generating Twitter-style Snowflake IDs. [#63577](https://github.com/ClickHouse/ClickHouse/pull/63577) ([Danila Puzov](https://github.com/kazalika)).
|
||||||
|
* Added `merge_workload` and `mutation_workload` settings to regulate how resources are utilized and shared between merges, mutations and other workloads. [#64061](https://github.com/ClickHouse/ClickHouse/pull/64061) ([Sergei Trifonov](https://github.com/serxa)).
|
||||||
|
* Add support for comparing `IPv4` and `IPv6` types using the `=` operator. [#64292](https://github.com/ClickHouse/ClickHouse/pull/64292) ([Francisco J. Jurado Moreno](https://github.com/Beetelbrox)).
|
||||||
|
* Support decimal arguments in binary math functions (pow, atan2, max2, min2, hypot). [#64582](https://github.com/ClickHouse/ClickHouse/pull/64582) ([Mikhail Gorshkov](https://github.com/mgorshkov)).
|
||||||
* Added SQL functions `parseReadableSize` (along with `OrNull` and `OrZero` variants). [#64742](https://github.com/ClickHouse/ClickHouse/pull/64742) ([Francisco J. Jurado Moreno](https://github.com/Beetelbrox)).
|
* Added SQL functions `parseReadableSize` (along with `OrNull` and `OrZero` variants). [#64742](https://github.com/ClickHouse/ClickHouse/pull/64742) ([Francisco J. Jurado Moreno](https://github.com/Beetelbrox)).
|
||||||
* Add server settings `max_table_num_to_throw` and `max_database_num_to_throw` to limit the number of databases or tables on `CREATE` queries. [#64781](https://github.com/ClickHouse/ClickHouse/pull/64781) ([Xu Jia](https://github.com/XuJia0210)).
|
* Add server settings `max_table_num_to_throw` and `max_database_num_to_throw` to limit the number of databases or tables on `CREATE` queries. [#64781](https://github.com/ClickHouse/ClickHouse/pull/64781) ([Xu Jia](https://github.com/XuJia0210)).
|
||||||
* Add _time virtual column to file alike storages (s3/file/hdfs/url/azureBlobStorage). [#64947](https://github.com/ClickHouse/ClickHouse/pull/64947) ([Ilya Golshtein](https://github.com/ilejn)).
|
* Add `_time` virtual column to file alike storages (s3/file/hdfs/url/azureBlobStorage). [#64947](https://github.com/ClickHouse/ClickHouse/pull/64947) ([Ilya Golshtein](https://github.com/ilejn)).
|
||||||
* Introduced new functions `base64URLEncode`, `base64URLDecode` and `tryBase64URLDecode`. [#64991](https://github.com/ClickHouse/ClickHouse/pull/64991) ([Mikhail Gorshkov](https://github.com/mgorshkov)).
|
* Introduced new functions `base64URLEncode`, `base64URLDecode` and `tryBase64URLDecode`. [#64991](https://github.com/ClickHouse/ClickHouse/pull/64991) ([Mikhail Gorshkov](https://github.com/mgorshkov)).
|
||||||
* Add new function `editDistanceUTF8`, which calculates the [edit distance](https://en.wikipedia.org/wiki/Edit_distance) between two UTF8 strings. [#65269](https://github.com/ClickHouse/ClickHouse/pull/65269) ([LiuNeng](https://github.com/liuneng1994)).
|
* Add new function `editDistanceUTF8`, which calculates the [edit distance](https://en.wikipedia.org/wiki/Edit_distance) between two UTF8 strings. [#65269](https://github.com/ClickHouse/ClickHouse/pull/65269) ([LiuNeng](https://github.com/liuneng1994)).
|
||||||
|
* Add `http_response_headers` configuration to support custom response headers in custom HTTP handlers. [#63562](https://github.com/ClickHouse/ClickHouse/pull/63562) ([Grigorii](https://github.com/GSokol)).
|
||||||
|
* Added a new table function `loop` to support returning query results in an infinite loop. [#63452](https://github.com/ClickHouse/ClickHouse/pull/63452) ([Sariel](https://github.com/sarielwxm)). This is useful for testing.
|
||||||
|
* Introduced two additional columns in the `system.query_log`: `used_privileges` and `missing_privileges`. `used_privileges` is populated with the privileges that were checked during query execution, and `missing_privileges` contains required privileges that are missing. [#64597](https://github.com/ClickHouse/ClickHouse/pull/64597) ([Alexey Katsman](https://github.com/alexkats)).
|
||||||
|
* Added a setting `output_format_pretty_display_footer_column_names` which when enabled displays column names at the end of the table for long tables (50 rows by default), with the threshold value for minimum number of rows controlled by `output_format_pretty_display_footer_column_names_min_rows`. [#65144](https://github.com/ClickHouse/ClickHouse/pull/65144) ([Shaun Struwig](https://github.com/Blargian)).
|
||||||
|
|
||||||
|
#### Experimental Feature
|
||||||
|
* Introduce statistics of type "number of distinct values". [#59357](https://github.com/ClickHouse/ClickHouse/pull/59357) ([Han Fei](https://github.com/hanfei1991)).
|
||||||
|
* Support statistics with ReplicatedMergeTree. [#64934](https://github.com/ClickHouse/ClickHouse/pull/64934) ([Han Fei](https://github.com/hanfei1991)).
|
||||||
|
* If "replica group" is configured for a `Replicated` database, automatically create a cluster that includes replicas from all groups. [#64312](https://github.com/ClickHouse/ClickHouse/pull/64312) ([Alexander Tokmakov](https://github.com/tavplubix)).
|
||||||
|
* Add settings `parallel_replicas_custom_key_range_lower` and `parallel_replicas_custom_key_range_upper` to control how parallel replicas with dynamic shards parallelizes queries when using a range filter. [#64604](https://github.com/ClickHouse/ClickHouse/pull/64604) ([josh-hildred](https://github.com/josh-hildred)).
|
||||||
|
|
||||||
#### Performance Improvement
|
#### Performance Improvement
|
||||||
|
* Add the ability to reshuffle rows during insert to optimize for size without violating the order set by `PRIMARY KEY`. It's controlled by the setting `optimize_row_order` (off by default). [#63578](https://github.com/ClickHouse/ClickHouse/pull/63578) ([Igor Markelov](https://github.com/ElderlyPassionFruit)).
|
||||||
* Add a native parquet reader, which can read parquet binary to ClickHouse Columns directly. It's controlled by the setting `input_format_parquet_use_native_reader` (disabled by default). [#60361](https://github.com/ClickHouse/ClickHouse/pull/60361) ([ZhiHong Zhang](https://github.com/copperybean)).
|
* Add a native parquet reader, which can read parquet binary to ClickHouse Columns directly. It's controlled by the setting `input_format_parquet_use_native_reader` (disabled by default). [#60361](https://github.com/ClickHouse/ClickHouse/pull/60361) ([ZhiHong Zhang](https://github.com/copperybean)).
|
||||||
* Reduce the number of virtual function calls in ColumnNullable::size. [#60556](https://github.com/ClickHouse/ClickHouse/pull/60556) ([HappenLee](https://github.com/HappenLee)).
|
|
||||||
* Speedup `splitByRegexp` when the regular expression argument is a single-character. [#62696](https://github.com/ClickHouse/ClickHouse/pull/62696) ([Robert Schulze](https://github.com/rschu1ze)).
|
|
||||||
* Speed up FixedHashTable by keeping track of the min and max keys used. This allows to reduce the number of cells that need to be verified. [#62746](https://github.com/ClickHouse/ClickHouse/pull/62746) ([Jiebin Sun](https://github.com/jiebinn)).
|
|
||||||
* Optimize the resolution of in(LowCardinality, ConstantSet). [#64060](https://github.com/ClickHouse/ClickHouse/pull/64060) ([Zhiguo Zhou](https://github.com/ZhiguoZh)).
|
|
||||||
* Use a thread pool to initialize and destroy hash tables inside `ConcurrentHashJoin`. [#64241](https://github.com/ClickHouse/ClickHouse/pull/64241) ([Nikita Taranov](https://github.com/nickitat)).
|
|
||||||
* Optimized vertical merges in tables with sparse columns. [#64311](https://github.com/ClickHouse/ClickHouse/pull/64311) ([Anton Popov](https://github.com/CurtizJ)).
|
|
||||||
* Enabled prefetches of data from remote filesystem during vertical merges. It improves latency of vertical merges in tables with data stored on remote filesystem. [#64314](https://github.com/ClickHouse/ClickHouse/pull/64314) ([Anton Popov](https://github.com/CurtizJ)).
|
|
||||||
* Reduce redundant calls to `isDefault()` of `ColumnSparse::filter` to improve performance. [#64426](https://github.com/ClickHouse/ClickHouse/pull/64426) ([Jiebin Sun](https://github.com/jiebinn)).
|
|
||||||
* Speedup `find_super_nodes` and `find_big_family` keeper-client commands by making multiple asynchronous getChildren requests. [#64628](https://github.com/ClickHouse/ClickHouse/pull/64628) ([Alexander Gololobov](https://github.com/davenger)).
|
|
||||||
* Improve function least/greatest for nullable numberic type arguments. [#64668](https://github.com/ClickHouse/ClickHouse/pull/64668) ([KevinyhZou](https://github.com/KevinyhZou)).
|
|
||||||
* Allow merging two consequent `FilterSteps` of a query plan. This improves filter-push-down optimization if the filter condition can be pushed down from the parent step. [#64760](https://github.com/ClickHouse/ClickHouse/pull/64760) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
|
|
||||||
* Remove bad optimization in vertical final implementation and re-enable vertical final algorithm by default. [#64783](https://github.com/ClickHouse/ClickHouse/pull/64783) ([Duc Canh Le](https://github.com/canhld94)).
|
|
||||||
* Remove ALIAS nodes from the filter expression. This slightly improves performance for queries with `PREWHERE` (with the new analyzer). [#64793](https://github.com/ClickHouse/ClickHouse/pull/64793) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
|
|
||||||
* Fix performance regression in cross join introduced in [#60459](https://github.com/ClickHouse/ClickHouse/issues/60459) (24.5). [#65243](https://github.com/ClickHouse/ClickHouse/pull/65243) ([Nikita Taranov](https://github.com/nickitat)).
|
|
||||||
|
|
||||||
#### Improvement
|
|
||||||
* Support empty tuples. [#55061](https://github.com/ClickHouse/ClickHouse/pull/55061) ([Amos Bird](https://github.com/amosbird)).
|
|
||||||
* Hot reload storage policy for distributed tables when adding a new disk. [#58285](https://github.com/ClickHouse/ClickHouse/pull/58285) ([Duc Canh Le](https://github.com/canhld94)).
|
|
||||||
* Avoid possible deadlock during MergeTree index analysis when scheduling threads in a saturated service. [#59427](https://github.com/ClickHouse/ClickHouse/pull/59427) ([Sean Haynes](https://github.com/seandhaynes)).
|
|
||||||
* Support partial trivial count optimization when the query filter is able to select exact ranges from merge tree tables. [#60463](https://github.com/ClickHouse/ClickHouse/pull/60463) ([Amos Bird](https://github.com/amosbird)).
|
* Support partial trivial count optimization when the query filter is able to select exact ranges from merge tree tables. [#60463](https://github.com/ClickHouse/ClickHouse/pull/60463) ([Amos Bird](https://github.com/amosbird)).
|
||||||
* Reduce max memory usage of multithreaded `INSERT`s by collecting chunks of multiple threads in a single transform. [#61047](https://github.com/ClickHouse/ClickHouse/pull/61047) ([Yarik Briukhovetskyi](https://github.com/yariks5s)).
|
* Reduce max memory usage of multithreaded `INSERT`s by collecting chunks of multiple threads in a single transform. [#61047](https://github.com/ClickHouse/ClickHouse/pull/61047) ([Yarik Briukhovetskyi](https://github.com/yariks5s)).
|
||||||
* Reduce the memory usage when using Azure object storage by using fixed memory allocation, avoiding the allocation of an extra buffer. [#63160](https://github.com/ClickHouse/ClickHouse/pull/63160) ([SmitaRKulkarni](https://github.com/SmitaRKulkarni)).
|
* Reduce the memory usage when using Azure object storage by using fixed memory allocation, avoiding the allocation of an extra buffer. [#63160](https://github.com/ClickHouse/ClickHouse/pull/63160) ([SmitaRKulkarni](https://github.com/SmitaRKulkarni)).
|
||||||
* Several minor corner case fixes to proxy support & tunneling. [#63427](https://github.com/ClickHouse/ClickHouse/pull/63427) ([Arthur Passos](https://github.com/arthurpassos)).
|
* Reduce the number of virtual function calls in `ColumnNullable::size`. [#60556](https://github.com/ClickHouse/ClickHouse/pull/60556) ([HappenLee](https://github.com/HappenLee)).
|
||||||
* Add `http_response_headers` setting to support custom response headers in custom HTTP handlers. [#63562](https://github.com/ClickHouse/ClickHouse/pull/63562) ([Grigorii](https://github.com/GSokol)).
|
* Speedup `splitByRegexp` when the regular expression argument is a single-character. [#62696](https://github.com/ClickHouse/ClickHouse/pull/62696) ([Robert Schulze](https://github.com/rschu1ze)).
|
||||||
* Improve io_uring resubmit visibility. Rename profile event `IOUringSQEsResubmits` -> `IOUringSQEsResubmitsAsync` and add a new one `IOUringSQEsResubmitsSync`. [#63699](https://github.com/ClickHouse/ClickHouse/pull/63699) ([Tomer Shafir](https://github.com/tomershafir)).
|
* Speed up aggregation by 8-bit and 16-bit keys by keeping track of the min and max keys used. This allows to reduce the number of cells that need to be verified. [#62746](https://github.com/ClickHouse/ClickHouse/pull/62746) ([Jiebin Sun](https://github.com/jiebinn)).
|
||||||
* Introduce assertions to verify all functions are called with columns of the right size. [#63723](https://github.com/ClickHouse/ClickHouse/pull/63723) ([Raúl Marín](https://github.com/Algunenano)).
|
* Optimize operator IN when the left hand side is `LowCardinality` and the right is a set of constants. [#64060](https://github.com/ClickHouse/ClickHouse/pull/64060) ([Zhiguo Zhou](https://github.com/ZhiguoZh)).
|
||||||
|
* Use a thread pool to initialize and destroy hash tables inside `ConcurrentHashJoin`. [#64241](https://github.com/ClickHouse/ClickHouse/pull/64241) ([Nikita Taranov](https://github.com/nickitat)).
|
||||||
|
* Optimized vertical merges in tables with sparse columns. [#64311](https://github.com/ClickHouse/ClickHouse/pull/64311) ([Anton Popov](https://github.com/CurtizJ)).
|
||||||
|
* Enabled prefetches of data from remote filesystem during vertical merges. It improves latency of vertical merges in tables with data stored on remote filesystem. [#64314](https://github.com/ClickHouse/ClickHouse/pull/64314) ([Anton Popov](https://github.com/CurtizJ)).
|
||||||
|
* Reduce redundant calls to `isDefault` of `ColumnSparse::filter` to improve performance. [#64426](https://github.com/ClickHouse/ClickHouse/pull/64426) ([Jiebin Sun](https://github.com/jiebinn)).
|
||||||
|
* Speedup `find_super_nodes` and `find_big_family` keeper-client commands by making multiple asynchronous getChildren requests. [#64628](https://github.com/ClickHouse/ClickHouse/pull/64628) ([Alexander Gololobov](https://github.com/davenger)).
|
||||||
|
* Improve function `least`/`greatest` for nullable numberic type arguments. [#64668](https://github.com/ClickHouse/ClickHouse/pull/64668) ([KevinyhZou](https://github.com/KevinyhZou)).
|
||||||
|
* Allow merging two consequent filtering steps of a query plan. This improves filter-push-down optimization if the filter condition can be pushed down from the parent step. [#64760](https://github.com/ClickHouse/ClickHouse/pull/64760) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
|
||||||
|
* Remove bad optimization in the vertical final implementation and re-enable vertical final algorithm by default. [#64783](https://github.com/ClickHouse/ClickHouse/pull/64783) ([Duc Canh Le](https://github.com/canhld94)).
|
||||||
|
* Remove ALIAS nodes from the filter expression. This slightly improves performance for queries with `PREWHERE` (with the new analyzer). [#64793](https://github.com/ClickHouse/ClickHouse/pull/64793) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
|
||||||
|
* Re-enable OpenSSL session caching. [#65111](https://github.com/ClickHouse/ClickHouse/pull/65111) ([Robert Schulze](https://github.com/rschu1ze)).
|
||||||
|
* Added settings to disable materialization of skip indexes and statistics on inserts (`materialize_skip_indexes_on_insert` and `materialize_statistics_on_insert`). [#64391](https://github.com/ClickHouse/ClickHouse/pull/64391) ([Anton Popov](https://github.com/CurtizJ)).
|
||||||
|
* Use the allocated memory size to calculate the row group size and reduce the peak memory of the parquet writer in the single-threaded mode. [#64424](https://github.com/ClickHouse/ClickHouse/pull/64424) ([LiuNeng](https://github.com/liuneng1994)).
|
||||||
|
* Improve the iterator of sparse column to reduce call of `size`. [#64497](https://github.com/ClickHouse/ClickHouse/pull/64497) ([Jiebin Sun](https://github.com/jiebinn)).
|
||||||
|
* Update condition to use server-side copy for backups to Azure blob storage. [#64518](https://github.com/ClickHouse/ClickHouse/pull/64518) ([SmitaRKulkarni](https://github.com/SmitaRKulkarni)).
|
||||||
|
* Optimized memory usage of vertical merges for tables with high number of skip indexes. [#64580](https://github.com/ClickHouse/ClickHouse/pull/64580) ([Anton Popov](https://github.com/CurtizJ)).
|
||||||
|
|
||||||
|
#### Improvement
|
||||||
* `SHOW CREATE TABLE` executed on top of system tables will now show the super handy comment unique for each table which will explain why this table is needed. [#63788](https://github.com/ClickHouse/ClickHouse/pull/63788) ([Nikita Mikhaylov](https://github.com/nikitamikhaylov)).
|
* `SHOW CREATE TABLE` executed on top of system tables will now show the super handy comment unique for each table which will explain why this table is needed. [#63788](https://github.com/ClickHouse/ClickHouse/pull/63788) ([Nikita Mikhaylov](https://github.com/nikitamikhaylov)).
|
||||||
* Added setting `metadata_storage_type` to keep free space on metadata storage disk. [#64128](https://github.com/ClickHouse/ClickHouse/pull/64128) ([MikhailBurdukov](https://github.com/MikhailBurdukov)).
|
* The second argument (scale) of functions `round()`, `roundBankers()`, `floor()`, `ceil()` and `trunc()` can now be non-const. [#64798](https://github.com/ClickHouse/ClickHouse/pull/64798) ([Mikhail Gorshkov](https://github.com/mgorshkov)).
|
||||||
* Add metrics to track the number of directories created and removed by the plain_rewritable metadata storage, and the number of entries in the local-to-remote in-memory map. [#64175](https://github.com/ClickHouse/ClickHouse/pull/64175) ([Julia Kartseva](https://github.com/jkartseva)).
|
* Hot reload storage policy for `Distributed` tables when adding a new disk. [#58285](https://github.com/ClickHouse/ClickHouse/pull/58285) ([Duc Canh Le](https://github.com/canhld94)).
|
||||||
|
* Avoid possible deadlock during MergeTree index analysis when scheduling threads in a saturated service. [#59427](https://github.com/ClickHouse/ClickHouse/pull/59427) ([Sean Haynes](https://github.com/seandhaynes)).
|
||||||
|
* Several minor corner case fixes to S3 proxy support & tunneling. [#63427](https://github.com/ClickHouse/ClickHouse/pull/63427) ([Arthur Passos](https://github.com/arthurpassos)).
|
||||||
|
* Improve io_uring resubmit visibility. Rename profile event `IOUringSQEsResubmits` -> `IOUringSQEsResubmitsAsync` and add a new one `IOUringSQEsResubmitsSync`. [#63699](https://github.com/ClickHouse/ClickHouse/pull/63699) ([Tomer Shafir](https://github.com/tomershafir)).
|
||||||
|
* Added a new setting, `metadata_keep_free_space_bytes` to keep free space on the metadata storage disk. [#64128](https://github.com/ClickHouse/ClickHouse/pull/64128) ([MikhailBurdukov](https://github.com/MikhailBurdukov)).
|
||||||
|
* Add metrics to track the number of directories created and removed by the `plain_rewritable` metadata storage, and the number of entries in the local-to-remote in-memory map. [#64175](https://github.com/ClickHouse/ClickHouse/pull/64175) ([Julia Kartseva](https://github.com/jkartseva)).
|
||||||
* The query cache now considers identical queries with different settings as different. This increases robustness in cases where different settings (e.g. `limit` or `additional_table_filters`) would affect the query result. [#64205](https://github.com/ClickHouse/ClickHouse/pull/64205) ([Robert Schulze](https://github.com/rschu1ze)).
|
* The query cache now considers identical queries with different settings as different. This increases robustness in cases where different settings (e.g. `limit` or `additional_table_filters`) would affect the query result. [#64205](https://github.com/ClickHouse/ClickHouse/pull/64205) ([Robert Schulze](https://github.com/rschu1ze)).
|
||||||
* Better Exception Message in Delete Table with Projection, users can understand the error and the steps should be taken. [#64212](https://github.com/ClickHouse/ClickHouse/pull/64212) ([jsc0218](https://github.com/jsc0218)).
|
|
||||||
* Support the non standard error code `QpsLimitExceeded` in object storage as a retryable error. [#64225](https://github.com/ClickHouse/ClickHouse/pull/64225) ([Sema Checherinda](https://github.com/CheSema)).
|
* Support the non standard error code `QpsLimitExceeded` in object storage as a retryable error. [#64225](https://github.com/ClickHouse/ClickHouse/pull/64225) ([Sema Checherinda](https://github.com/CheSema)).
|
||||||
* Forbid converting a MergeTree table to replicated if the zookeeper path for this table already exists. [#64244](https://github.com/ClickHouse/ClickHouse/pull/64244) ([Kirill](https://github.com/kirillgarbar)).
|
* Forbid converting a MergeTree table to replicated if the zookeeper path for this table already exists. [#64244](https://github.com/ClickHouse/ClickHouse/pull/64244) ([Kirill](https://github.com/kirillgarbar)).
|
||||||
* If "replica group" is configured for a `Replicated` database, automatically create a cluster that includes replicas from all groups. [#64312](https://github.com/ClickHouse/ClickHouse/pull/64312) ([Alexander Tokmakov](https://github.com/tavplubix)).
|
* Added a new setting `input_format_parquet_prefer_block_bytes` to control the average output block bytes, and modified the default value of `input_format_parquet_max_block_size` to 65409. [#64427](https://github.com/ClickHouse/ClickHouse/pull/64427) ([LiuNeng](https://github.com/liuneng1994)).
|
||||||
* Added settings to disable materialization of skip indexes and statistics on inserts (`materialize_skip_indexes_on_insert` and `materialize_statistics_on_insert`). [#64391](https://github.com/ClickHouse/ClickHouse/pull/64391) ([Anton Popov](https://github.com/CurtizJ)).
|
* Allow proxy to be bypassed for hosts specified in `no_proxy` env variable and ClickHouse proxy configuration. [#63314](https://github.com/ClickHouse/ClickHouse/pull/63314) ([Arthur Passos](https://github.com/arthurpassos)).
|
||||||
* Use the allocated memory size to calculate the row group size and reduce the peak memory of the parquet writer in single-threaded mode. [#64424](https://github.com/ClickHouse/ClickHouse/pull/64424) ([LiuNeng](https://github.com/liuneng1994)).
|
|
||||||
* Added new configuration input_format_parquet_prefer_block_bytes to control the average output block bytes, and modified the default value of input_format_parquet_max_block_size to 65409. [#64427](https://github.com/ClickHouse/ClickHouse/pull/64427) ([LiuNeng](https://github.com/liuneng1994)).
|
|
||||||
* Always start Keeper with sufficient amount of threads in global thread pool. [#64444](https://github.com/ClickHouse/ClickHouse/pull/64444) ([Duc Canh Le](https://github.com/canhld94)).
|
* Always start Keeper with sufficient amount of threads in global thread pool. [#64444](https://github.com/ClickHouse/ClickHouse/pull/64444) ([Duc Canh Le](https://github.com/canhld94)).
|
||||||
* Settings from user config doesn't affect merges and mutations for MergeTree on top of object storage. [#64456](https://github.com/ClickHouse/ClickHouse/pull/64456) ([alesapin](https://github.com/alesapin)).
|
* Settings from the user's config don't affect merges and mutations for `MergeTree` on top of object storage. [#64456](https://github.com/ClickHouse/ClickHouse/pull/64456) ([alesapin](https://github.com/alesapin)).
|
||||||
* Setting `replace_long_file_name_to_hash` is enabled by default for `MergeTree` tables. [#64457](https://github.com/ClickHouse/ClickHouse/pull/64457) ([Anton Popov](https://github.com/CurtizJ)).
|
|
||||||
* Improve the iterator of sparse column to reduce call of size(). [#64497](https://github.com/ClickHouse/ClickHouse/pull/64497) ([Jiebin Sun](https://github.com/jiebinn)).
|
|
||||||
* Update condition to use copy for azure blob storage. [#64518](https://github.com/ClickHouse/ClickHouse/pull/64518) ([SmitaRKulkarni](https://github.com/SmitaRKulkarni)).
|
|
||||||
* Support the non standard error code `TotalQpsLimitExceeded` in object storage as a retryable error. [#64520](https://github.com/ClickHouse/ClickHouse/pull/64520) ([Sema Checherinda](https://github.com/CheSema)).
|
* Support the non standard error code `TotalQpsLimitExceeded` in object storage as a retryable error. [#64520](https://github.com/ClickHouse/ClickHouse/pull/64520) ([Sema Checherinda](https://github.com/CheSema)).
|
||||||
* Optimized memory usage of vertical merges for tables with high number of skip indexes. [#64580](https://github.com/ClickHouse/ClickHouse/pull/64580) ([Anton Popov](https://github.com/CurtizJ)).
|
|
||||||
* Introduced two additional columns in the `system.query_log`: `used_privileges` and `missing_privileges`. `used_privileges` is populated with the privileges that were checked during query execution, and `missing_privileges` contains required privileges that are missing. [#64597](https://github.com/ClickHouse/ClickHouse/pull/64597) ([Alexey Katsman](https://github.com/alexkats)).
|
|
||||||
* Add settings `parallel_replicas_custom_key_range_lower` and `parallel_replicas_custom_key_range_upper` to control how parallel replicas with dynamic shards parallelizes queries when using a range filter. [#64604](https://github.com/ClickHouse/ClickHouse/pull/64604) ([josh-hildred](https://github.com/josh-hildred)).
|
|
||||||
* Updated Advanced Dashboard for both open-source and ClickHouse Cloud versions to include a chart for 'Maximum concurrent network connections'. [#64610](https://github.com/ClickHouse/ClickHouse/pull/64610) ([Thom O'Connor](https://github.com/thomoco)).
|
* Updated Advanced Dashboard for both open-source and ClickHouse Cloud versions to include a chart for 'Maximum concurrent network connections'. [#64610](https://github.com/ClickHouse/ClickHouse/pull/64610) ([Thom O'Connor](https://github.com/thomoco)).
|
||||||
* The second argument (scale) of functions `round()`, `roundBankers()`, `floor()`, `ceil()` and `trunc()` can now be non-const. [#64798](https://github.com/ClickHouse/ClickHouse/pull/64798) ([Mikhail Gorshkov](https://github.com/mgorshkov)).
|
* Improve progress report on `zeros_mt` and `generateRandom`. [#64804](https://github.com/ClickHouse/ClickHouse/pull/64804) ([Raúl Marín](https://github.com/Algunenano)).
|
||||||
* Improve progress report on zeros_mt and generateRandom. [#64804](https://github.com/ClickHouse/ClickHouse/pull/64804) ([Raúl Marín](https://github.com/Algunenano)).
|
* Add an asynchronous metric `jemalloc.profile.active` to show whether sampling is currently active. This is an activation mechanism in addition to prof.active; both must be active for the calling thread to sample. [#64842](https://github.com/ClickHouse/ClickHouse/pull/64842) ([Unalian](https://github.com/Unalian)).
|
||||||
* Add an asynchronous metric jemalloc.profile.active to show whether sampling is currently active. This is an activation mechanism in addition to prof.active; both must be active for the calling thread to sample. [#64842](https://github.com/ClickHouse/ClickHouse/pull/64842) ([Unalian](https://github.com/Unalian)).
|
|
||||||
* Support statistics with ReplicatedMergeTree. [#64934](https://github.com/ClickHouse/ClickHouse/pull/64934) ([Han Fei](https://github.com/hanfei1991)).
|
|
||||||
* Remove mark of `allow_experimental_join_condition` as important. This mark may have prevented distributed queries in a mixed versions cluster from being executed successfully. [#65008](https://github.com/ClickHouse/ClickHouse/pull/65008) ([Nikita Mikhaylov](https://github.com/nikitamikhaylov)).
|
* Remove mark of `allow_experimental_join_condition` as important. This mark may have prevented distributed queries in a mixed versions cluster from being executed successfully. [#65008](https://github.com/ClickHouse/ClickHouse/pull/65008) ([Nikita Mikhaylov](https://github.com/nikitamikhaylov)).
|
||||||
* Added server Asynchronous metrics `DiskGetObjectThrottler*` and `DiskGetObjectThrottler*` reflecting request per second rate limit defined with `s3_max_get_rps` and `s3_max_put_rps` disk settings and currently available number of requests that could be sent without hitting throttling limit on the disk. Metrics are defined for every disk that has a configured limit. [#65050](https://github.com/ClickHouse/ClickHouse/pull/65050) ([Sergei Trifonov](https://github.com/serxa)).
|
* Added server Asynchronous metrics `DiskGetObjectThrottler*` and `DiskGetObjectThrottler*` reflecting request per second rate limit defined with `s3_max_get_rps` and `s3_max_put_rps` disk settings and currently available number of requests that could be sent without hitting throttling limit on the disk. Metrics are defined for every disk that has a configured limit. [#65050](https://github.com/ClickHouse/ClickHouse/pull/65050) ([Sergei Trifonov](https://github.com/serxa)).
|
||||||
* Added a setting `output_format_pretty_display_footer_column_names` which when enabled displays column names at the end of the table for long tables (50 rows by default), with the threshold value for minimum number of rows controlled by `output_format_pretty_display_footer_column_names_min_rows`. [#65144](https://github.com/ClickHouse/ClickHouse/pull/65144) ([Shaun Struwig](https://github.com/Blargian)).
|
* Initialize global trace collector for `Poco::ThreadPool` (needed for Keeper, etc). [#65239](https://github.com/ClickHouse/ClickHouse/pull/65239) ([Kseniia Sumarokova](https://github.com/kssenii)).
|
||||||
* Returned back the behaviour of how ClickHouse works and interprets Tuples in CSV format. This change effectively reverts https://github.com/ClickHouse/ClickHouse/pull/60994 and makes it available only under a few settings: `output_format_csv_serialize_tuple_into_separate_columns`, `input_format_csv_deserialize_separate_columns_into_tuple` and `input_format_csv_try_infer_strings_from_quoted_tuples`. [#65170](https://github.com/ClickHouse/ClickHouse/pull/65170) ([Nikita Mikhaylov](https://github.com/nikitamikhaylov)).
|
* Add a validation when creating a user with `bcrypt_hash`. [#65242](https://github.com/ClickHouse/ClickHouse/pull/65242) ([Raúl Marín](https://github.com/Algunenano)).
|
||||||
* Initialize global trace collector for Poco::ThreadPool (needed for keeper, etc). [#65239](https://github.com/ClickHouse/ClickHouse/pull/65239) ([Kseniia Sumarokova](https://github.com/kssenii)).
|
* Add profile events for number of rows read during/after `PREWHERE`. [#64198](https://github.com/ClickHouse/ClickHouse/pull/64198) ([Nikita Taranov](https://github.com/nickitat)).
|
||||||
* Add validation when creating a user with bcrypt_hash. [#65242](https://github.com/ClickHouse/ClickHouse/pull/65242) ([Raúl Marín](https://github.com/Algunenano)).
|
* Print query in `EXPLAIN PLAN` with parallel replicas. [#64298](https://github.com/ClickHouse/ClickHouse/pull/64298) ([vdimir](https://github.com/vdimir)).
|
||||||
* Unite s3/hdfs/azure storage implementations into a single class working with IObjectStorage. Same for *Cluster, data lakes and Queue storages. [#59767](https://github.com/ClickHouse/ClickHouse/pull/59767) ([Kseniia Sumarokova](https://github.com/kssenii)).
|
|
||||||
* Refactor data part writer to remove dependencies on MergeTreeData and DataPart. [#63620](https://github.com/ClickHouse/ClickHouse/pull/63620) ([Alexander Gololobov](https://github.com/davenger)).
|
|
||||||
* Add profile events for number of rows read during/after prewhere. [#64198](https://github.com/ClickHouse/ClickHouse/pull/64198) ([Nikita Taranov](https://github.com/nickitat)).
|
|
||||||
* Print query in explain plan with parallel replicas. [#64298](https://github.com/ClickHouse/ClickHouse/pull/64298) ([vdimir](https://github.com/vdimir)).
|
|
||||||
* Rename `allow_deprecated_functions` to `allow_deprecated_error_prone_window_functions`. [#64358](https://github.com/ClickHouse/ClickHouse/pull/64358) ([Raúl Marín](https://github.com/Algunenano)).
|
* Rename `allow_deprecated_functions` to `allow_deprecated_error_prone_window_functions`. [#64358](https://github.com/ClickHouse/ClickHouse/pull/64358) ([Raúl Marín](https://github.com/Algunenano)).
|
||||||
* Respect `max_read_buffer_size` setting for file descriptors as well in file() table function. [#64532](https://github.com/ClickHouse/ClickHouse/pull/64532) ([Azat Khuzhin](https://github.com/azat)).
|
* Respect `max_read_buffer_size` setting for file descriptors as well in the `file` table function. [#64532](https://github.com/ClickHouse/ClickHouse/pull/64532) ([Azat Khuzhin](https://github.com/azat)).
|
||||||
* Disable transactions for unsupported storages even for materialized views. [#64918](https://github.com/ClickHouse/ClickHouse/pull/64918) ([alesapin](https://github.com/alesapin)).
|
* Disable transactions for unsupported storages even for materialized views. [#64918](https://github.com/ClickHouse/ClickHouse/pull/64918) ([alesapin](https://github.com/alesapin)).
|
||||||
* Refactor `KeyCondition` and key analysis to improve PartitionPruner and trivial count optimization. This is separated from [#60463](https://github.com/ClickHouse/ClickHouse/issues/60463) . [#61459](https://github.com/ClickHouse/ClickHouse/pull/61459) ([Amos Bird](https://github.com/amosbird)).
|
* Forbid `QUALIFY` clause in the old analyzer. The old analyzer ignored `QUALIFY`, so it could lead to unexpected data removal in mutations. [#65356](https://github.com/ClickHouse/ClickHouse/pull/65356) ([Dmitry Novik](https://github.com/novikd)).
|
||||||
|
|
||||||
#### Bug Fix (user-visible misbehavior in an official stable release)
|
#### Bug Fix (user-visible misbehavior in an official stable release)
|
||||||
|
* A bug in Apache ORC library was fixed: Fixed ORC statistics calculation, when writing, for unsigned types on all platforms and Int8 on ARM. [#64563](https://github.com/ClickHouse/ClickHouse/pull/64563) ([Michael Kolupaev](https://github.com/al13n321)).
|
||||||
|
* Returned back the behaviour of how ClickHouse works and interprets Tuples in CSV format. This change effectively reverts https://github.com/ClickHouse/ClickHouse/pull/60994 and makes it available only under a few settings: `output_format_csv_serialize_tuple_into_separate_columns`, `input_format_csv_deserialize_separate_columns_into_tuple` and `input_format_csv_try_infer_strings_from_quoted_tuples`. [#65170](https://github.com/ClickHouse/ClickHouse/pull/65170) ([Nikita Mikhaylov](https://github.com/nikitamikhaylov)).
|
||||||
* Fix a permission error where a user in a specific situation can escalate their privileges on the default database without necessary grants. [#64769](https://github.com/ClickHouse/ClickHouse/pull/64769) ([pufit](https://github.com/pufit)).
|
* Fix a permission error where a user in a specific situation can escalate their privileges on the default database without necessary grants. [#64769](https://github.com/ClickHouse/ClickHouse/pull/64769) ([pufit](https://github.com/pufit)).
|
||||||
* Fix crash with UniqInjectiveFunctionsEliminationPass and uniqCombined. [#65188](https://github.com/ClickHouse/ClickHouse/pull/65188) ([Raúl Marín](https://github.com/Algunenano)).
|
* Fix crash with UniqInjectiveFunctionsEliminationPass and uniqCombined. [#65188](https://github.com/ClickHouse/ClickHouse/pull/65188) ([Raúl Marín](https://github.com/Algunenano)).
|
||||||
* Fix a bug in ClickHouse Keeper that causes digest mismatch during closing session. [#65198](https://github.com/ClickHouse/ClickHouse/pull/65198) ([Aleksei Filatov](https://github.com/aalexfvk)).
|
* Fix a bug in ClickHouse Keeper that causes digest mismatch during closing session. [#65198](https://github.com/ClickHouse/ClickHouse/pull/65198) ([Aleksei Filatov](https://github.com/aalexfvk)).
|
||||||
* Forbid `QUALIFY` clause in the old analyzer. The old analyzer ignored `QUALIFY`, so it could lead to unexpected data removal in mutations. [#65356](https://github.com/ClickHouse/ClickHouse/pull/65356) ([Dmitry Novik](https://github.com/novikd)).
|
|
||||||
* Use correct memory alignment for Distinct combinator. Previously, crash could happen because of invalid memory allocation when the combinator was used. [#65379](https://github.com/ClickHouse/ClickHouse/pull/65379) ([Antonio Andelic](https://github.com/antonio2368)).
|
* Use correct memory alignment for Distinct combinator. Previously, crash could happen because of invalid memory allocation when the combinator was used. [#65379](https://github.com/ClickHouse/ClickHouse/pull/65379) ([Antonio Andelic](https://github.com/antonio2368)).
|
||||||
* Fix crash with `DISTINCT` and window functions. [#64767](https://github.com/ClickHouse/ClickHouse/pull/64767) ([Igor Nikonov](https://github.com/devcrafter)).
|
* Fix crash with `DISTINCT` and window functions. [#64767](https://github.com/ClickHouse/ClickHouse/pull/64767) ([Igor Nikonov](https://github.com/devcrafter)).
|
||||||
* Fixed 'set' skip index not working with IN and indexHint(). [#62083](https://github.com/ClickHouse/ClickHouse/pull/62083) ([Michael Kolupaev](https://github.com/al13n321)).
|
* Fixed 'set' skip index not working with IN and indexHint(). [#62083](https://github.com/ClickHouse/ClickHouse/pull/62083) ([Michael Kolupaev](https://github.com/al13n321)).
|
||||||
@ -132,7 +303,6 @@
|
|||||||
* Fixed `optimize_read_in_order` behaviour for ORDER BY ... NULLS FIRST / LAST on tables with nullable keys. [#64483](https://github.com/ClickHouse/ClickHouse/pull/64483) ([Eduard Karacharov](https://github.com/korowa)).
|
* Fixed `optimize_read_in_order` behaviour for ORDER BY ... NULLS FIRST / LAST on tables with nullable keys. [#64483](https://github.com/ClickHouse/ClickHouse/pull/64483) ([Eduard Karacharov](https://github.com/korowa)).
|
||||||
* Fix the `Expression nodes list expected 1 projection names` and `Unknown expression or identifier` errors for queries with aliases to `GLOBAL IN.`. [#64517](https://github.com/ClickHouse/ClickHouse/pull/64517) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
|
* Fix the `Expression nodes list expected 1 projection names` and `Unknown expression or identifier` errors for queries with aliases to `GLOBAL IN.`. [#64517](https://github.com/ClickHouse/ClickHouse/pull/64517) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
|
||||||
* Fix an error `Cannot find column` in distributed queries with constant CTE in the `GROUP BY` key. [#64519](https://github.com/ClickHouse/ClickHouse/pull/64519) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
|
* Fix an error `Cannot find column` in distributed queries with constant CTE in the `GROUP BY` key. [#64519](https://github.com/ClickHouse/ClickHouse/pull/64519) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
|
||||||
* Fixed ORC statistics calculation, when writing, for unsigned types on all platforms and Int8 on ARM. [#64563](https://github.com/ClickHouse/ClickHouse/pull/64563) ([Michael Kolupaev](https://github.com/al13n321)).
|
|
||||||
* Fix the crash loop when restoring from backup is blocked by creating an MV with a definer that hasn't been restored yet. [#64595](https://github.com/ClickHouse/ClickHouse/pull/64595) ([pufit](https://github.com/pufit)).
|
* Fix the crash loop when restoring from backup is blocked by creating an MV with a definer that hasn't been restored yet. [#64595](https://github.com/ClickHouse/ClickHouse/pull/64595) ([pufit](https://github.com/pufit)).
|
||||||
* Fix the output of function `formatDateTimeInJodaSyntax` when a formatter generates an uneven number of characters and the last character is `0`. For example, `SELECT formatDateTimeInJodaSyntax(toDate('2012-05-29'), 'D')` now correctly returns `150` instead of previously `15`. [#64614](https://github.com/ClickHouse/ClickHouse/pull/64614) ([LiuNeng](https://github.com/liuneng1994)).
|
* Fix the output of function `formatDateTimeInJodaSyntax` when a formatter generates an uneven number of characters and the last character is `0`. For example, `SELECT formatDateTimeInJodaSyntax(toDate('2012-05-29'), 'D')` now correctly returns `150` instead of previously `15`. [#64614](https://github.com/ClickHouse/ClickHouse/pull/64614) ([LiuNeng](https://github.com/liuneng1994)).
|
||||||
* Do not rewrite aggregation if `-If` combinator is already used. [#64638](https://github.com/ClickHouse/ClickHouse/pull/64638) ([Dmitry Novik](https://github.com/novikd)).
|
* Do not rewrite aggregation if `-If` combinator is already used. [#64638](https://github.com/ClickHouse/ClickHouse/pull/64638) ([Dmitry Novik](https://github.com/novikd)).
|
||||||
@ -166,21 +336,14 @@
|
|||||||
* This PR ensures that the type of the constant(IN operator's second parameter) is always visible during the IN operator's type conversion process. Otherwise, losing type information may cause some conversions to fail, such as the conversion from DateTime to Date. This fixes ([#64487](https://github.com/ClickHouse/ClickHouse/issues/64487)). [#65315](https://github.com/ClickHouse/ClickHouse/pull/65315) ([pn](https://github.com/chloro-pn)).
|
* This PR ensures that the type of the constant(IN operator's second parameter) is always visible during the IN operator's type conversion process. Otherwise, losing type information may cause some conversions to fail, such as the conversion from DateTime to Date. This fixes ([#64487](https://github.com/ClickHouse/ClickHouse/issues/64487)). [#65315](https://github.com/ClickHouse/ClickHouse/pull/65315) ([pn](https://github.com/chloro-pn)).
|
||||||
|
|
||||||
#### Build/Testing/Packaging Improvement
|
#### Build/Testing/Packaging Improvement
|
||||||
* Make `network` service be required when using the rc init script to start the ClickHouse server daemon. [#60650](https://github.com/ClickHouse/ClickHouse/pull/60650) ([Chun-Sheng, Li](https://github.com/peter279k)).
|
* Add support for LLVM XRay. [#64592](https://github.com/ClickHouse/ClickHouse/pull/64592) [#64837](https://github.com/ClickHouse/ClickHouse/pull/64837) ([Tomer Shafir](https://github.com/tomershafir)).
|
||||||
* Fix typo in test_hdfsCluster_unset_skip_unavailable_shards. The test writes data to unskip_unavailable_shards, but uses skip_unavailable_shards from the previous test. [#64243](https://github.com/ClickHouse/ClickHouse/pull/64243) ([Mikhail Artemenko](https://github.com/Michicosun)).
|
* Unite s3/hdfs/azure storage implementations into a single class working with IObjectStorage. Same for *Cluster, data lakes and Queue storages. [#59767](https://github.com/ClickHouse/ClickHouse/pull/59767) ([Kseniia Sumarokova](https://github.com/kssenii)).
|
||||||
* Reduce the size of some slow tests. [#64387](https://github.com/ClickHouse/ClickHouse/pull/64387) ([Raúl Marín](https://github.com/Algunenano)).
|
* Refactor data part writer to remove dependencies on MergeTreeData and DataPart. [#63620](https://github.com/ClickHouse/ClickHouse/pull/63620) ([Alexander Gololobov](https://github.com/davenger)).
|
||||||
* Reduce the size of some slow tests. [#64452](https://github.com/ClickHouse/ClickHouse/pull/64452) ([Raúl Marín](https://github.com/Algunenano)).
|
* Refactor `KeyCondition` and key analysis to improve PartitionPruner and trivial count optimization. This is separated from [#60463](https://github.com/ClickHouse/ClickHouse/issues/60463) . [#61459](https://github.com/ClickHouse/ClickHouse/pull/61459) ([Amos Bird](https://github.com/amosbird)).
|
||||||
* Fix test_lost_part_other_replica. [#64512](https://github.com/ClickHouse/ClickHouse/pull/64512) ([Raúl Marín](https://github.com/Algunenano)).
|
* Introduce assertions to verify all functions are called with columns of the right size. [#63723](https://github.com/ClickHouse/ClickHouse/pull/63723) ([Raúl Marín](https://github.com/Algunenano)).
|
||||||
* Add tests for experimental unequal joins and randomize new settings in clickhouse-test. [#64535](https://github.com/ClickHouse/ClickHouse/pull/64535) ([Nikita Fomichev](https://github.com/fm4v)).
|
* Make `network` service be required when using the `rc` init script to start the ClickHouse server daemon. [#60650](https://github.com/ClickHouse/ClickHouse/pull/60650) ([Chun-Sheng, Li](https://github.com/peter279k)).
|
||||||
* Upgrade tests: Update config and work with release candidates. [#64542](https://github.com/ClickHouse/ClickHouse/pull/64542) ([Raúl Marín](https://github.com/Algunenano)).
|
* Reduce the size of some slow tests. [#64387](https://github.com/ClickHouse/ClickHouse/pull/64387) [#64452](https://github.com/ClickHouse/ClickHouse/pull/64452) ([Raúl Marín](https://github.com/Algunenano)).
|
||||||
* Add support for LLVM XRay. [#64592](https://github.com/ClickHouse/ClickHouse/pull/64592) ([Tomer Shafir](https://github.com/tomershafir)).
|
|
||||||
* Speed up 02995_forget_partition. [#64761](https://github.com/ClickHouse/ClickHouse/pull/64761) ([Raúl Marín](https://github.com/Algunenano)).
|
|
||||||
* Fix 02790_async_queries_in_query_log. [#64764](https://github.com/ClickHouse/ClickHouse/pull/64764) ([Raúl Marín](https://github.com/Algunenano)).
|
|
||||||
* Support LLVM XRay on Linux amd64 only. [#64837](https://github.com/ClickHouse/ClickHouse/pull/64837) ([Tomer Shafir](https://github.com/tomershafir)).
|
|
||||||
* Get rid of custom code in `tests/ci/download_release_packages.py` and `tests/ci/get_previous_release_tag.py` to avoid issues after the https://github.com/ClickHouse/ClickHouse/pull/64759 is merged. [#64848](https://github.com/ClickHouse/ClickHouse/pull/64848) ([Mikhail f. Shiryaev](https://github.com/Felixoid)).
|
|
||||||
* Decrease the `unit-test` image a few times. [#65102](https://github.com/ClickHouse/ClickHouse/pull/65102) ([Mikhail f. Shiryaev](https://github.com/Felixoid)).
|
|
||||||
* Replay ZooKeeper logs using keeper-bench. [#62481](https://github.com/ClickHouse/ClickHouse/pull/62481) ([Antonio Andelic](https://github.com/antonio2368)).
|
* Replay ZooKeeper logs using keeper-bench. [#62481](https://github.com/ClickHouse/ClickHouse/pull/62481) ([Antonio Andelic](https://github.com/antonio2368)).
|
||||||
* Re-enable OpenSSL session caching. [#65111](https://github.com/ClickHouse/ClickHouse/pull/65111) ([Robert Schulze](https://github.com/rschu1ze)).
|
|
||||||
|
|
||||||
### <a id="245"></a> ClickHouse release 24.5, 2024-05-30
|
### <a id="245"></a> ClickHouse release 24.5, 2024-05-30
|
||||||
|
|
||||||
|
@ -319,7 +319,6 @@ endif()
|
|||||||
# Disable floating-point expression contraction in order to get consistent floating point calculation results across platforms
|
# Disable floating-point expression contraction in order to get consistent floating point calculation results across platforms
|
||||||
set (COMPILER_FLAGS "${COMPILER_FLAGS} -ffp-contract=off")
|
set (COMPILER_FLAGS "${COMPILER_FLAGS} -ffp-contract=off")
|
||||||
|
|
||||||
# Our built-in unwinder only supports DWARF version up to 4.
|
|
||||||
set (DEBUG_INFO_FLAGS "-g")
|
set (DEBUG_INFO_FLAGS "-g")
|
||||||
|
|
||||||
# Disable omit frame pointer compiler optimization using -fno-omit-frame-pointer
|
# Disable omit frame pointer compiler optimization using -fno-omit-frame-pointer
|
||||||
@ -333,15 +332,15 @@ endif()
|
|||||||
|
|
||||||
set (CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} ${COMPILER_FLAGS} ${CMAKE_CXX_FLAGS_ADD}")
|
set (CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} ${COMPILER_FLAGS} ${CMAKE_CXX_FLAGS_ADD}")
|
||||||
set (CMAKE_CXX_FLAGS_RELWITHDEBINFO "${CMAKE_CXX_FLAGS_RELWITHDEBINFO} -O3 ${DEBUG_INFO_FLAGS} ${CMAKE_CXX_FLAGS_ADD}")
|
set (CMAKE_CXX_FLAGS_RELWITHDEBINFO "${CMAKE_CXX_FLAGS_RELWITHDEBINFO} -O3 ${DEBUG_INFO_FLAGS} ${CMAKE_CXX_FLAGS_ADD}")
|
||||||
set (CMAKE_CXX_FLAGS_DEBUG "${CMAKE_CXX_FLAGS_DEBUG} -O0 ${DEBUG_INFO_FLAGS} ${CMAKE_CXX_FLAGS_ADD}")
|
set (CMAKE_CXX_FLAGS_DEBUG "${CMAKE_CXX_FLAGS_DEBUG} -Og ${DEBUG_INFO_FLAGS} ${CMAKE_CXX_FLAGS_ADD}")
|
||||||
|
|
||||||
set (CMAKE_C_FLAGS "${CMAKE_C_FLAGS} ${COMPILER_FLAGS} ${CMAKE_C_FLAGS_ADD}")
|
set (CMAKE_C_FLAGS "${CMAKE_C_FLAGS} ${COMPILER_FLAGS} ${CMAKE_C_FLAGS_ADD}")
|
||||||
set (CMAKE_C_FLAGS_RELWITHDEBINFO "${CMAKE_C_FLAGS_RELWITHDEBINFO} -O3 ${DEBUG_INFO_FLAGS} ${CMAKE_C_FLAGS_ADD}")
|
set (CMAKE_C_FLAGS_RELWITHDEBINFO "${CMAKE_C_FLAGS_RELWITHDEBINFO} -O3 ${DEBUG_INFO_FLAGS} ${CMAKE_C_FLAGS_ADD}")
|
||||||
set (CMAKE_C_FLAGS_DEBUG "${CMAKE_C_FLAGS_DEBUG} -O0 ${DEBUG_INFO_FLAGS} ${CMAKE_C_FLAGS_ADD}")
|
set (CMAKE_C_FLAGS_DEBUG "${CMAKE_C_FLAGS_DEBUG} -Og ${DEBUG_INFO_FLAGS} ${CMAKE_C_FLAGS_ADD}")
|
||||||
|
|
||||||
set (CMAKE_ASM_FLAGS "${CMAKE_ASM_FLAGS} ${COMPILER_FLAGS} ${CMAKE_ASM_FLAGS_ADD}")
|
set (CMAKE_ASM_FLAGS "${CMAKE_ASM_FLAGS} ${COMPILER_FLAGS} ${CMAKE_ASM_FLAGS_ADD}")
|
||||||
set (CMAKE_ASM_FLAGS_RELWITHDEBINFO "${CMAKE_ASM_FLAGS_RELWITHDEBINFO} -O3 ${DEBUG_INFO_FLAGS} ${CMAKE_ASM_FLAGS_ADD}")
|
set (CMAKE_ASM_FLAGS_RELWITHDEBINFO "${CMAKE_ASM_FLAGS_RELWITHDEBINFO} -O3 ${DEBUG_INFO_FLAGS} ${CMAKE_ASM_FLAGS_ADD}")
|
||||||
set (CMAKE_ASM_FLAGS_DEBUG "${CMAKE_ASM_FLAGS_DEBUG} -O0 ${DEBUG_INFO_FLAGS} ${CMAKE_ASM_FLAGS_ADD}")
|
set (CMAKE_ASM_FLAGS_DEBUG "${CMAKE_ASM_FLAGS_DEBUG} -Og ${DEBUG_INFO_FLAGS} ${CMAKE_ASM_FLAGS_ADD}")
|
||||||
|
|
||||||
if (OS_DARWIN)
|
if (OS_DARWIN)
|
||||||
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -stdlib=libc++")
|
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -stdlib=libc++")
|
||||||
|
@ -34,14 +34,12 @@ curl https://clickhouse.com/ | sh
|
|||||||
|
|
||||||
Every month we get together with the community (users, contributors, customers, those interested in learning more about ClickHouse) to discuss what is coming in the latest release. If you are interested in sharing what you've built on ClickHouse, let us know.
|
Every month we get together with the community (users, contributors, customers, those interested in learning more about ClickHouse) to discuss what is coming in the latest release. If you are interested in sharing what you've built on ClickHouse, let us know.
|
||||||
|
|
||||||
* [v24.6 Community Call](https://clickhouse.com/company/events/v24-6-community-release-call) - Jul 2
|
* [v24.7 Community Call](https://clickhouse.com/company/events/v24-7-community-release-call) - Jul 30
|
||||||
|
|
||||||
## Upcoming Events
|
## Upcoming Events
|
||||||
|
|
||||||
Keep an eye out for upcoming meetups and events around the world. Somewhere else you want us to be? Please feel free to reach out to tyler `<at>` clickhouse `<dot>` com. You can also peruse [ClickHouse Events](https://clickhouse.com/company/news-events) for a list of all upcoming trainings, meetups, speaking engagements, etc.
|
Keep an eye out for upcoming meetups and events around the world. Somewhere else you want us to be? Please feel free to reach out to tyler `<at>` clickhouse `<dot>` com. You can also peruse [ClickHouse Events](https://clickhouse.com/company/news-events) for a list of all upcoming trainings, meetups, speaking engagements, etc.
|
||||||
|
|
||||||
* [AWS Summit in DC](https://clickhouse.com/company/events/2024-06-aws-summit-dc) - Jun 26
|
|
||||||
* [ClickHouse Meetup in Amsterdam](https://www.meetup.com/clickhouse-netherlands-user-group/events/300781068/) - Jun 27
|
|
||||||
* [ClickHouse Meetup in Paris](https://www.meetup.com/clickhouse-france-user-group/events/300783448/) - Jul 9
|
* [ClickHouse Meetup in Paris](https://www.meetup.com/clickhouse-france-user-group/events/300783448/) - Jul 9
|
||||||
* [ClickHouse Cloud - Live Update Call](https://clickhouse.com/company/events/202407-cloud-update-live) - Jul 9
|
* [ClickHouse Cloud - Live Update Call](https://clickhouse.com/company/events/202407-cloud-update-live) - Jul 9
|
||||||
* [ClickHouse Meetup @ Ramp - New York City](https://www.meetup.com/clickhouse-new-york-user-group/events/300595845/) - Jul 9
|
* [ClickHouse Meetup @ Ramp - New York City](https://www.meetup.com/clickhouse-new-york-user-group/events/300595845/) - Jul 9
|
||||||
|
@ -14,6 +14,7 @@ The following versions of ClickHouse server are currently supported with securit
|
|||||||
|
|
||||||
| Version | Supported |
|
| Version | Supported |
|
||||||
|:-|:-|
|
|:-|:-|
|
||||||
|
| 24.6 | ✔️ |
|
||||||
| 24.5 | ✔️ |
|
| 24.5 | ✔️ |
|
||||||
| 24.4 | ✔️ |
|
| 24.4 | ✔️ |
|
||||||
| 24.3 | ✔️ |
|
| 24.3 | ✔️ |
|
||||||
|
@ -3,8 +3,9 @@
|
|||||||
#include <base/defines.h>
|
#include <base/defines.h>
|
||||||
|
|
||||||
#include <fstream>
|
#include <fstream>
|
||||||
#include <sstream>
|
#include <string>
|
||||||
|
|
||||||
|
namespace fs = std::filesystem;
|
||||||
|
|
||||||
bool cgroupsV2Enabled()
|
bool cgroupsV2Enabled()
|
||||||
{
|
{
|
||||||
@ -13,11 +14,11 @@ bool cgroupsV2Enabled()
|
|||||||
{
|
{
|
||||||
/// This file exists iff the host has cgroups v2 enabled.
|
/// This file exists iff the host has cgroups v2 enabled.
|
||||||
auto controllers_file = default_cgroups_mount / "cgroup.controllers";
|
auto controllers_file = default_cgroups_mount / "cgroup.controllers";
|
||||||
if (!std::filesystem::exists(controllers_file))
|
if (!fs::exists(controllers_file))
|
||||||
return false;
|
return false;
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
catch (const std::filesystem::filesystem_error &) /// all "underlying OS API errors", typically: permission denied
|
catch (const fs::filesystem_error &) /// all "underlying OS API errors", typically: permission denied
|
||||||
{
|
{
|
||||||
return false; /// not logging the exception as most callers fall back to cgroups v1
|
return false; /// not logging the exception as most callers fall back to cgroups v1
|
||||||
}
|
}
|
||||||
@ -33,8 +34,9 @@ bool cgroupsV2MemoryControllerEnabled()
|
|||||||
/// According to https://docs.kernel.org/admin-guide/cgroup-v2.html, file "cgroup.controllers" defines which controllers are available
|
/// According to https://docs.kernel.org/admin-guide/cgroup-v2.html, file "cgroup.controllers" defines which controllers are available
|
||||||
/// for the current + child cgroups. The set of available controllers can be restricted from level to level using file
|
/// for the current + child cgroups. The set of available controllers can be restricted from level to level using file
|
||||||
/// "cgroups.subtree_control". It is therefore sufficient to check the bottom-most nested "cgroup.controllers" file.
|
/// "cgroups.subtree_control". It is therefore sufficient to check the bottom-most nested "cgroup.controllers" file.
|
||||||
std::string cgroup = cgroupV2OfProcess();
|
fs::path cgroup_dir = cgroupV2PathOfProcess();
|
||||||
auto cgroup_dir = cgroup.empty() ? default_cgroups_mount : (default_cgroups_mount / cgroup);
|
if (cgroup_dir.empty())
|
||||||
|
return false;
|
||||||
std::ifstream controllers_file(cgroup_dir / "cgroup.controllers");
|
std::ifstream controllers_file(cgroup_dir / "cgroup.controllers");
|
||||||
if (!controllers_file.is_open())
|
if (!controllers_file.is_open())
|
||||||
return false;
|
return false;
|
||||||
@ -46,7 +48,7 @@ bool cgroupsV2MemoryControllerEnabled()
|
|||||||
#endif
|
#endif
|
||||||
}
|
}
|
||||||
|
|
||||||
std::string cgroupV2OfProcess()
|
fs::path cgroupV2PathOfProcess()
|
||||||
{
|
{
|
||||||
#if defined(OS_LINUX)
|
#if defined(OS_LINUX)
|
||||||
chassert(cgroupsV2Enabled());
|
chassert(cgroupsV2Enabled());
|
||||||
@ -54,17 +56,18 @@ std::string cgroupV2OfProcess()
|
|||||||
/// A simpler way to get the membership is:
|
/// A simpler way to get the membership is:
|
||||||
std::ifstream cgroup_name_file("/proc/self/cgroup");
|
std::ifstream cgroup_name_file("/proc/self/cgroup");
|
||||||
if (!cgroup_name_file.is_open())
|
if (!cgroup_name_file.is_open())
|
||||||
return "";
|
return {};
|
||||||
/// With cgroups v2, there will be a *single* line with prefix "0::/"
|
/// With cgroups v2, there will be a *single* line with prefix "0::/"
|
||||||
/// (see https://docs.kernel.org/admin-guide/cgroup-v2.html)
|
/// (see https://docs.kernel.org/admin-guide/cgroup-v2.html)
|
||||||
std::string cgroup;
|
std::string cgroup;
|
||||||
std::getline(cgroup_name_file, cgroup);
|
std::getline(cgroup_name_file, cgroup);
|
||||||
static const std::string v2_prefix = "0::/";
|
static const std::string v2_prefix = "0::/";
|
||||||
if (!cgroup.starts_with(v2_prefix))
|
if (!cgroup.starts_with(v2_prefix))
|
||||||
return "";
|
return {};
|
||||||
cgroup = cgroup.substr(v2_prefix.length());
|
cgroup = cgroup.substr(v2_prefix.length());
|
||||||
return cgroup;
|
/// Note: The 'root' cgroup can have an empty cgroup name, this is valid
|
||||||
|
return default_cgroups_mount / cgroup;
|
||||||
#else
|
#else
|
||||||
return "";
|
return {};
|
||||||
#endif
|
#endif
|
||||||
}
|
}
|
||||||
|
@ -1,7 +1,6 @@
|
|||||||
#pragma once
|
#pragma once
|
||||||
|
|
||||||
#include <filesystem>
|
#include <filesystem>
|
||||||
#include <string>
|
|
||||||
|
|
||||||
#if defined(OS_LINUX)
|
#if defined(OS_LINUX)
|
||||||
/// I think it is possible to mount the cgroups hierarchy somewhere else (e.g. when in containers).
|
/// I think it is possible to mount the cgroups hierarchy somewhere else (e.g. when in containers).
|
||||||
@ -16,7 +15,7 @@ bool cgroupsV2Enabled();
|
|||||||
/// Assumes that cgroupsV2Enabled() is enabled.
|
/// Assumes that cgroupsV2Enabled() is enabled.
|
||||||
bool cgroupsV2MemoryControllerEnabled();
|
bool cgroupsV2MemoryControllerEnabled();
|
||||||
|
|
||||||
/// Which cgroup does the process belong to?
|
/// Detects which cgroup v2 the process belongs to and returns the filesystem path to the cgroup.
|
||||||
/// Returns an empty string if the cgroup cannot be determined.
|
/// Returns an empty path the cgroup cannot be determined.
|
||||||
/// Assumes that cgroupsV2Enabled() is enabled.
|
/// Assumes that cgroupsV2Enabled() is enabled.
|
||||||
std::string cgroupV2OfProcess();
|
std::filesystem::path cgroupV2PathOfProcess();
|
||||||
|
@ -87,10 +87,13 @@
|
|||||||
# define ASAN_POISON_MEMORY_REGION(a, b)
|
# define ASAN_POISON_MEMORY_REGION(a, b)
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
#if !defined(ABORT_ON_LOGICAL_ERROR)
|
/// We used to have only ABORT_ON_LOGICAL_ERROR macro, but most of its uses were actually in places where we didn't care about logical errors
|
||||||
#if !defined(NDEBUG) || defined(ADDRESS_SANITIZER) || defined(THREAD_SANITIZER) || defined(MEMORY_SANITIZER) || defined(UNDEFINED_BEHAVIOR_SANITIZER)
|
/// but wanted to check exactly if the current build type is debug or with sanitizer. This new macro is introduced to fix those places.
|
||||||
#define ABORT_ON_LOGICAL_ERROR
|
#if !defined(DEBUG_OR_SANITIZER_BUILD)
|
||||||
#endif
|
# if !defined(NDEBUG) || defined(ADDRESS_SANITIZER) || defined(THREAD_SANITIZER) || defined(MEMORY_SANITIZER) \
|
||||||
|
|| defined(UNDEFINED_BEHAVIOR_SANITIZER)
|
||||||
|
# define DEBUG_OR_SANITIZER_BUILD
|
||||||
|
# endif
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
/// chassert(x) is similar to assert(x), but:
|
/// chassert(x) is similar to assert(x), but:
|
||||||
@ -101,7 +104,7 @@
|
|||||||
/// Also it makes sense to call abort() instead of __builtin_unreachable() in debug builds,
|
/// Also it makes sense to call abort() instead of __builtin_unreachable() in debug builds,
|
||||||
/// because SIGABRT is easier to debug than SIGTRAP (the second one makes gdb crazy)
|
/// because SIGABRT is easier to debug than SIGTRAP (the second one makes gdb crazy)
|
||||||
#if !defined(chassert)
|
#if !defined(chassert)
|
||||||
#if defined(ABORT_ON_LOGICAL_ERROR)
|
# if defined(DEBUG_OR_SANITIZER_BUILD)
|
||||||
// clang-format off
|
// clang-format off
|
||||||
#include <base/types.h>
|
#include <base/types.h>
|
||||||
namespace DB
|
namespace DB
|
||||||
|
@ -1,5 +1,6 @@
|
|||||||
#pragma once
|
#pragma once
|
||||||
|
|
||||||
|
#include <cstdlib>
|
||||||
#include <memory>
|
#include <memory>
|
||||||
#include <string>
|
#include <string>
|
||||||
|
|
||||||
|
@ -108,6 +108,14 @@ struct make_unsigned // NOLINT(readability-identifier-naming)
|
|||||||
using type = std::make_unsigned_t<T>;
|
using type = std::make_unsigned_t<T>;
|
||||||
};
|
};
|
||||||
|
|
||||||
|
template <> struct make_unsigned<Int8> { using type = UInt8; };
|
||||||
|
template <> struct make_unsigned<UInt8> { using type = UInt8; };
|
||||||
|
template <> struct make_unsigned<Int16> { using type = UInt16; };
|
||||||
|
template <> struct make_unsigned<UInt16> { using type = UInt16; };
|
||||||
|
template <> struct make_unsigned<Int32> { using type = UInt32; };
|
||||||
|
template <> struct make_unsigned<UInt32> { using type = UInt32; };
|
||||||
|
template <> struct make_unsigned<Int64> { using type = UInt64; };
|
||||||
|
template <> struct make_unsigned<UInt64> { using type = UInt64; };
|
||||||
template <> struct make_unsigned<Int128> { using type = UInt128; };
|
template <> struct make_unsigned<Int128> { using type = UInt128; };
|
||||||
template <> struct make_unsigned<UInt128> { using type = UInt128; };
|
template <> struct make_unsigned<UInt128> { using type = UInt128; };
|
||||||
template <> struct make_unsigned<Int256> { using type = UInt256; };
|
template <> struct make_unsigned<Int256> { using type = UInt256; };
|
||||||
@ -121,6 +129,14 @@ struct make_signed // NOLINT(readability-identifier-naming)
|
|||||||
using type = std::make_signed_t<T>;
|
using type = std::make_signed_t<T>;
|
||||||
};
|
};
|
||||||
|
|
||||||
|
template <> struct make_signed<Int8> { using type = Int8; };
|
||||||
|
template <> struct make_signed<UInt8> { using type = Int8; };
|
||||||
|
template <> struct make_signed<Int16> { using type = Int16; };
|
||||||
|
template <> struct make_signed<UInt16> { using type = Int16; };
|
||||||
|
template <> struct make_signed<Int32> { using type = Int32; };
|
||||||
|
template <> struct make_signed<UInt32> { using type = Int32; };
|
||||||
|
template <> struct make_signed<Int64> { using type = Int64; };
|
||||||
|
template <> struct make_signed<UInt64> { using type = Int64; };
|
||||||
template <> struct make_signed<Int128> { using type = Int128; };
|
template <> struct make_signed<Int128> { using type = Int128; };
|
||||||
template <> struct make_signed<UInt128> { using type = Int128; };
|
template <> struct make_signed<UInt128> { using type = Int128; };
|
||||||
template <> struct make_signed<Int256> { using type = Int256; };
|
template <> struct make_signed<Int256> { using type = Int256; };
|
||||||
|
@ -6,6 +6,9 @@ namespace
|
|||||||
{
|
{
|
||||||
std::string getFQDNOrHostNameImpl()
|
std::string getFQDNOrHostNameImpl()
|
||||||
{
|
{
|
||||||
|
#if defined(OS_DARWIN)
|
||||||
|
return Poco::Net::DNS::hostName();
|
||||||
|
#else
|
||||||
try
|
try
|
||||||
{
|
{
|
||||||
return Poco::Net::DNS::thisHost().name();
|
return Poco::Net::DNS::thisHost().name();
|
||||||
@ -14,6 +17,7 @@ namespace
|
|||||||
{
|
{
|
||||||
return Poco::Net::DNS::hostName();
|
return Poco::Net::DNS::hostName();
|
||||||
}
|
}
|
||||||
|
#endif
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -23,8 +23,9 @@ std::optional<uint64_t> getCgroupsV2MemoryLimit()
|
|||||||
if (!cgroupsV2MemoryControllerEnabled())
|
if (!cgroupsV2MemoryControllerEnabled())
|
||||||
return {};
|
return {};
|
||||||
|
|
||||||
std::string cgroup = cgroupV2OfProcess();
|
std::filesystem::path current_cgroup = cgroupV2PathOfProcess();
|
||||||
auto current_cgroup = cgroup.empty() ? default_cgroups_mount : (default_cgroups_mount / cgroup);
|
if (current_cgroup.empty())
|
||||||
|
return {};
|
||||||
|
|
||||||
/// Open the bottom-most nested memory limit setting file. If there is no such file at the current
|
/// Open the bottom-most nested memory limit setting file. If there is no such file at the current
|
||||||
/// level, try again at the parent level as memory settings are inherited.
|
/// level, try again at the parent level as memory settings are inherited.
|
||||||
|
9
base/base/isSharedPtrUnique.h
Normal file
9
base/base/isSharedPtrUnique.h
Normal file
@ -0,0 +1,9 @@
|
|||||||
|
#pragma once
|
||||||
|
|
||||||
|
#include <memory>
|
||||||
|
|
||||||
|
template <typename T>
|
||||||
|
bool isSharedPtrUnique(const std::shared_ptr<T> & ptr)
|
||||||
|
{
|
||||||
|
return ptr.use_count() == 1;
|
||||||
|
}
|
@ -1,32 +1,3 @@
|
|||||||
// Based on https://github.com/amdn/itoa and combined with our optimizations
|
|
||||||
//
|
|
||||||
//=== itoa.cpp - Fast integer to ascii conversion --*- C++ -*-//
|
|
||||||
//
|
|
||||||
// The MIT License (MIT)
|
|
||||||
// Copyright (c) 2016 Arturo Martin-de-Nicolas
|
|
||||||
//
|
|
||||||
// Permission is hereby granted, free of charge, to any person obtaining a copy
|
|
||||||
// of this software and associated documentation files (the "Software"), to deal
|
|
||||||
// in the Software without restriction, including without limitation the rights
|
|
||||||
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
|
||||||
// copies of the Software, and to permit persons to whom the Software is
|
|
||||||
// furnished to do so, subject to the following conditions:
|
|
||||||
//
|
|
||||||
// The above copyright notice and this permission notice shall be included
|
|
||||||
// in all copies or substantial portions of the Software.
|
|
||||||
//
|
|
||||||
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
|
||||||
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
|
||||||
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
|
||||||
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
|
||||||
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
|
||||||
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
|
||||||
// SOFTWARE.
|
|
||||||
//===----------------------------------------------------------------------===//
|
|
||||||
|
|
||||||
#include <cstddef>
|
|
||||||
#include <cstdint>
|
|
||||||
#include <cstring>
|
|
||||||
#include <type_traits>
|
#include <type_traits>
|
||||||
#include <base/defines.h>
|
#include <base/defines.h>
|
||||||
#include <base/extended_types.h>
|
#include <base/extended_types.h>
|
||||||
@ -34,99 +5,15 @@
|
|||||||
|
|
||||||
namespace
|
namespace
|
||||||
{
|
{
|
||||||
template <typename T>
|
ALWAYS_INLINE inline char * outOneDigit(char * p, uint8_t value)
|
||||||
ALWAYS_INLINE inline constexpr T pow10(size_t x)
|
|
||||||
{
|
|
||||||
return x ? 10 * pow10<T>(x - 1) : 1;
|
|
||||||
}
|
|
||||||
|
|
||||||
// Division by a power of 10 is implemented using a multiplicative inverse.
|
|
||||||
// This strength reduction is also done by optimizing compilers, but
|
|
||||||
// presently the fastest results are produced by using the values
|
|
||||||
// for the multiplication and the shift as given by the algorithm
|
|
||||||
// described by Agner Fog in "Optimizing Subroutines in Assembly Language"
|
|
||||||
//
|
|
||||||
// http://www.agner.org/optimize/optimizing_assembly.pdf
|
|
||||||
//
|
|
||||||
// "Integer division by a constant (all processors)
|
|
||||||
// A floating point number can be divided by a constant by multiplying
|
|
||||||
// with the reciprocal. If we want to do the same with integers, we have
|
|
||||||
// to scale the reciprocal by 2n and then shift the product to the right
|
|
||||||
// by n. There are various algorithms for finding a suitable value of n
|
|
||||||
// and compensating for rounding errors. The algorithm described below
|
|
||||||
// was invented by Terje Mathisen, Norway, and not published elsewhere."
|
|
||||||
|
|
||||||
/// Division by constant is performed by:
|
|
||||||
/// 1. Adding 1 if needed;
|
|
||||||
/// 2. Multiplying by another constant;
|
|
||||||
/// 3. Shifting right by another constant.
|
|
||||||
template <typename UInt, bool add_, UInt multiplier_, unsigned shift_>
|
|
||||||
struct Division
|
|
||||||
{
|
|
||||||
static constexpr bool add{add_};
|
|
||||||
static constexpr UInt multiplier{multiplier_};
|
|
||||||
static constexpr unsigned shift{shift_};
|
|
||||||
};
|
|
||||||
|
|
||||||
/// Select a type with appropriate number of bytes from the list of types.
|
|
||||||
/// First parameter is the number of bytes requested. Then goes a list of types with 1, 2, 4, ... number of bytes.
|
|
||||||
/// Example: SelectType<4, uint8_t, uint16_t, uint32_t, uint64_t> will select uint32_t.
|
|
||||||
template <size_t N, typename T, typename... Ts>
|
|
||||||
struct SelectType
|
|
||||||
{
|
|
||||||
using Result = typename SelectType<N / 2, Ts...>::Result;
|
|
||||||
};
|
|
||||||
|
|
||||||
template <typename T, typename... Ts>
|
|
||||||
struct SelectType<1, T, Ts...>
|
|
||||||
{
|
|
||||||
using Result = T;
|
|
||||||
};
|
|
||||||
|
|
||||||
|
|
||||||
/// Division by 10^N where N is the size of the type.
|
|
||||||
template <size_t N>
|
|
||||||
using DivisionBy10PowN = typename SelectType<
|
|
||||||
N,
|
|
||||||
Division<uint8_t, false, 205U, 11>, /// divide by 10
|
|
||||||
Division<uint16_t, true, 41943U, 22>, /// divide by 100
|
|
||||||
Division<uint32_t, false, 3518437209U, 45>, /// divide by 10000
|
|
||||||
Division<uint64_t, false, 12379400392853802749ULL, 90> /// divide by 100000000
|
|
||||||
>::Result;
|
|
||||||
|
|
||||||
template <size_t N>
|
|
||||||
using UnsignedOfSize = typename SelectType<N, uint8_t, uint16_t, uint32_t, uint64_t, __uint128_t>::Result;
|
|
||||||
|
|
||||||
/// Holds the result of dividing an unsigned N-byte variable by 10^N resulting in
|
|
||||||
template <size_t N>
|
|
||||||
struct QuotientAndRemainder
|
|
||||||
{
|
|
||||||
UnsignedOfSize<N> quotient; // quotient with fewer than 2*N decimal digits
|
|
||||||
UnsignedOfSize<N / 2> remainder; // remainder with at most N decimal digits
|
|
||||||
};
|
|
||||||
|
|
||||||
template <size_t N>
|
|
||||||
QuotientAndRemainder<N> inline split(UnsignedOfSize<N> value)
|
|
||||||
{
|
|
||||||
constexpr DivisionBy10PowN<N> division;
|
|
||||||
|
|
||||||
UnsignedOfSize<N> quotient = (division.multiplier * (UnsignedOfSize<2 * N>(value) + division.add)) >> division.shift;
|
|
||||||
UnsignedOfSize<N / 2> remainder = static_cast<UnsignedOfSize<N / 2>>(value - quotient * pow10<UnsignedOfSize<N / 2>>(N));
|
|
||||||
|
|
||||||
return {quotient, remainder};
|
|
||||||
}
|
|
||||||
|
|
||||||
ALWAYS_INLINE inline char * outDigit(char * p, uint8_t value)
|
|
||||||
{
|
{
|
||||||
*p = '0' + value;
|
*p = '0' + value;
|
||||||
++p;
|
return p + 1;
|
||||||
return p;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Using a lookup table to convert binary numbers from 0 to 99
|
// Using a lookup table to convert binary numbers from 0 to 99
|
||||||
// into ascii characters as described by Andrei Alexandrescu in
|
// into ascii characters as described by Andrei Alexandrescu in
|
||||||
// https://www.facebook.com/notes/facebook-engineering/three-optimization-tips-for-c/10151361643253920/
|
// https://www.facebook.com/notes/facebook-engineering/three-optimization-tips-for-c/10151361643253920/
|
||||||
|
|
||||||
const char digits[201] = "00010203040506070809"
|
const char digits[201] = "00010203040506070809"
|
||||||
"10111213141516171819"
|
"10111213141516171819"
|
||||||
"20212223242526272829"
|
"20212223242526272829"
|
||||||
@ -137,7 +24,6 @@ const char digits[201] = "00010203040506070809"
|
|||||||
"70717273747576777879"
|
"70717273747576777879"
|
||||||
"80818283848586878889"
|
"80818283848586878889"
|
||||||
"90919293949596979899";
|
"90919293949596979899";
|
||||||
|
|
||||||
ALWAYS_INLINE inline char * outTwoDigits(char * p, uint8_t value)
|
ALWAYS_INLINE inline char * outTwoDigits(char * p, uint8_t value)
|
||||||
{
|
{
|
||||||
memcpy(p, &digits[value * 2], 2);
|
memcpy(p, &digits[value * 2], 2);
|
||||||
@ -145,153 +31,260 @@ ALWAYS_INLINE inline char * outTwoDigits(char * p, uint8_t value)
|
|||||||
return p;
|
return p;
|
||||||
}
|
}
|
||||||
|
|
||||||
namespace convert
|
namespace jeaiii
|
||||||
{
|
{
|
||||||
template <typename UInt, size_t N = sizeof(UInt)>
|
/*
|
||||||
char * head(char * p, UInt u);
|
MIT License
|
||||||
template <typename UInt, size_t N = sizeof(UInt)>
|
|
||||||
char * tail(char * p, UInt u);
|
|
||||||
|
|
||||||
//===----------------------------------------------------------===//
|
Copyright (c) 2022 James Edward Anhalt III - https://github.com/jeaiii/itoa
|
||||||
// head: find most significant digit, skip leading zeros
|
|
||||||
//===----------------------------------------------------------===//
|
|
||||||
|
|
||||||
// "x" contains quotient and remainder after division by 10^N
|
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||||
// quotient is less than 10^N
|
of this software and associated documentation files (the "Software"), to deal
|
||||||
template <size_t N>
|
in the Software without restriction, including without limitation the rights
|
||||||
ALWAYS_INLINE inline char * head(char * p, QuotientAndRemainder<N> x)
|
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||||
|
copies of the Software, and to permit persons to whom the Software is
|
||||||
|
furnished to do so, subject to the following conditions:
|
||||||
|
|
||||||
|
The above copyright notice and this permission notice shall be included in all
|
||||||
|
copies or substantial portions of the Software.
|
||||||
|
|
||||||
|
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||||
|
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||||
|
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||||
|
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||||
|
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||||
|
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||||
|
SOFTWARE.
|
||||||
|
*/
|
||||||
|
struct pair
|
||||||
{
|
{
|
||||||
p = head(p, UnsignedOfSize<N / 2>(x.quotient));
|
char dd[2];
|
||||||
p = tail(p, x.remainder);
|
constexpr pair(char c) : dd{c, '\0'} { } /// NOLINT(google-explicit-constructor)
|
||||||
return p;
|
constexpr pair(int n) : dd{"0123456789"[n / 10], "0123456789"[n % 10]} { } /// NOLINT(google-explicit-constructor)
|
||||||
}
|
};
|
||||||
|
|
||||||
// "u" is less than 10^2*N
|
constexpr struct
|
||||||
template <typename UInt, size_t N>
|
|
||||||
ALWAYS_INLINE inline char * head(char * p, UInt u)
|
|
||||||
{
|
{
|
||||||
return u < pow10<UnsignedOfSize<N>>(N) ? head(p, UnsignedOfSize<N / 2>(u)) : head<N>(p, split<N>(u));
|
pair dd[100]{
|
||||||
}
|
0, 1, 2, 3, 4, 5, 6, 7, 8, 9, //
|
||||||
|
10, 11, 12, 13, 14, 15, 16, 17, 18, 19, //
|
||||||
|
20, 21, 22, 23, 24, 25, 26, 27, 28, 29, //
|
||||||
|
30, 31, 32, 33, 34, 35, 36, 37, 38, 39, //
|
||||||
|
40, 41, 42, 43, 44, 45, 46, 47, 48, 49, //
|
||||||
|
50, 51, 52, 53, 54, 55, 56, 57, 58, 59, //
|
||||||
|
60, 61, 62, 63, 64, 65, 66, 67, 68, 69, //
|
||||||
|
70, 71, 72, 73, 74, 75, 76, 77, 78, 79, //
|
||||||
|
80, 81, 82, 83, 84, 85, 86, 87, 88, 89, //
|
||||||
|
90, 91, 92, 93, 94, 95, 96, 97, 98, 99, //
|
||||||
|
};
|
||||||
|
pair fd[100]{
|
||||||
|
'0', '1', '2', '3', '4', '5', '6', '7', '8', '9', //
|
||||||
|
10, 11, 12, 13, 14, 15, 16, 17, 18, 19, //
|
||||||
|
20, 21, 22, 23, 24, 25, 26, 27, 28, 29, //
|
||||||
|
30, 31, 32, 33, 34, 35, 36, 37, 38, 39, //
|
||||||
|
40, 41, 42, 43, 44, 45, 46, 47, 48, 49, //
|
||||||
|
50, 51, 52, 53, 54, 55, 56, 57, 58, 59, //
|
||||||
|
60, 61, 62, 63, 64, 65, 66, 67, 68, 69, //
|
||||||
|
70, 71, 72, 73, 74, 75, 76, 77, 78, 79, //
|
||||||
|
80, 81, 82, 83, 84, 85, 86, 87, 88, 89, //
|
||||||
|
90, 91, 92, 93, 94, 95, 96, 97, 98, 99, //
|
||||||
|
};
|
||||||
|
} digits;
|
||||||
|
|
||||||
// recursion base case, selected when "u" is one byte
|
constexpr UInt64 mask24 = (UInt64(1) << 24) - 1;
|
||||||
template <>
|
constexpr UInt64 mask32 = (UInt64(1) << 32) - 1;
|
||||||
ALWAYS_INLINE inline char * head<UnsignedOfSize<1>, 1>(char * p, UnsignedOfSize<1> u)
|
constexpr UInt64 mask57 = (UInt64(1) << 57) - 1;
|
||||||
|
|
||||||
|
template <bool, class, class F>
|
||||||
|
struct _cond
|
||||||
{
|
{
|
||||||
return u < 10 ? outDigit(p, u) : outTwoDigits(p, u);
|
using type = F;
|
||||||
}
|
};
|
||||||
|
template <class T, class F>
|
||||||
//===----------------------------------------------------------===//
|
struct _cond<true, T, F>
|
||||||
// tail: produce all digits including leading zeros
|
|
||||||
//===----------------------------------------------------------===//
|
|
||||||
|
|
||||||
// recursive step, "u" is less than 10^2*N
|
|
||||||
template <typename UInt, size_t N>
|
|
||||||
ALWAYS_INLINE inline char * tail(char * p, UInt u)
|
|
||||||
{
|
{
|
||||||
QuotientAndRemainder<N> x = split<N>(u);
|
using type = T;
|
||||||
p = tail(p, UnsignedOfSize<N / 2>(x.quotient));
|
};
|
||||||
p = tail(p, x.remainder);
|
template <bool B, class T, class F>
|
||||||
return p;
|
using cond = typename _cond<B, T, F>::type;
|
||||||
}
|
|
||||||
|
|
||||||
// recursion base case, selected when "u" is one byte
|
template <class T>
|
||||||
template <>
|
inline ALWAYS_INLINE char * to_text_from_integer(char * b, T i)
|
||||||
ALWAYS_INLINE inline char * tail<UnsignedOfSize<1>, 1>(char * p, UnsignedOfSize<1> u)
|
|
||||||
{
|
{
|
||||||
return outTwoDigits(p, u);
|
constexpr auto q = sizeof(T);
|
||||||
}
|
using U = cond<q == 1, char8_t, cond<q <= sizeof(UInt16), UInt16, cond<q <= sizeof(UInt32), UInt32, UInt64>>>;
|
||||||
|
|
||||||
//===----------------------------------------------------------===//
|
// convert bool to int before test with unary + to silence warning if T happens to be bool
|
||||||
// large values are >= 10^2*N
|
U const n = +i < 0 ? *b++ = '-', U(0) - U(i) : U(i);
|
||||||
// where x contains quotient and remainder after division by 10^N
|
|
||||||
//===----------------------------------------------------------===//
|
|
||||||
template <size_t N>
|
|
||||||
ALWAYS_INLINE inline char * large(char * p, QuotientAndRemainder<N> x)
|
|
||||||
{
|
|
||||||
QuotientAndRemainder<N> y = split<N>(x.quotient);
|
|
||||||
p = head(p, UnsignedOfSize<N / 2>(y.quotient));
|
|
||||||
p = tail(p, y.remainder);
|
|
||||||
p = tail(p, x.remainder);
|
|
||||||
return p;
|
|
||||||
}
|
|
||||||
|
|
||||||
//===----------------------------------------------------------===//
|
if (n < U(1e2))
|
||||||
// handle values of "u" that might be >= 10^2*N
|
{
|
||||||
// where N is the size of "u" in bytes
|
/// This is changed from the original jeaiii implementation
|
||||||
//===----------------------------------------------------------===//
|
/// For small numbers the extra branch to call outOneDigit() is worth it as it saves some instructions
|
||||||
template <typename UInt, size_t N = sizeof(UInt)>
|
/// and a memory access (no need to read digits.fd[n])
|
||||||
ALWAYS_INLINE inline char * uitoa(char * p, UInt u)
|
/// This is not true for pure random numbers, but that's not the common use case of a database
|
||||||
{
|
/// Original jeaii code
|
||||||
if (u < pow10<UnsignedOfSize<N>>(N))
|
// *reinterpret_cast<pair *>(b) = digits.fd[n];
|
||||||
return head(p, UnsignedOfSize<N / 2>(u));
|
// return n < 10 ? b + 1 : b + 2;
|
||||||
QuotientAndRemainder<N> x = split<N>(u);
|
return n < 10 ? outOneDigit(b, n) : outTwoDigits(b, n);
|
||||||
|
}
|
||||||
|
if (n < UInt32(1e6))
|
||||||
|
{
|
||||||
|
if (sizeof(U) == 1 || n < U(1e4))
|
||||||
|
{
|
||||||
|
auto f0 = UInt32(10 * (1 << 24) / 1e3 + 1) * n;
|
||||||
|
*reinterpret_cast<pair *>(b) = digits.fd[f0 >> 24];
|
||||||
|
if constexpr (sizeof(U) == 1)
|
||||||
|
b -= 1;
|
||||||
|
else
|
||||||
|
b -= n < U(1e3);
|
||||||
|
auto f2 = (f0 & mask24) * 100;
|
||||||
|
*reinterpret_cast<pair *>(b + 2) = digits.dd[f2 >> 24];
|
||||||
|
return b + 4;
|
||||||
|
}
|
||||||
|
auto f0 = UInt64(10 * (1ull << 32ull) / 1e5 + 1) * n;
|
||||||
|
*reinterpret_cast<pair *>(b) = digits.fd[f0 >> 32];
|
||||||
|
if constexpr (sizeof(U) == 2)
|
||||||
|
b -= 1;
|
||||||
|
else
|
||||||
|
b -= n < U(1e5);
|
||||||
|
auto f2 = (f0 & mask32) * 100;
|
||||||
|
*reinterpret_cast<pair *>(b + 2) = digits.dd[f2 >> 32];
|
||||||
|
auto f4 = (f2 & mask32) * 100;
|
||||||
|
*reinterpret_cast<pair *>(b + 4) = digits.dd[f4 >> 32];
|
||||||
|
return b + 6;
|
||||||
|
}
|
||||||
|
if (sizeof(U) == 4 || n < UInt64(1ull << 32ull))
|
||||||
|
{
|
||||||
|
if (n < U(1e8))
|
||||||
|
{
|
||||||
|
auto f0 = UInt64(10 * (1ull << 48ull) / 1e7 + 1) * n >> 16;
|
||||||
|
*reinterpret_cast<pair *>(b) = digits.fd[f0 >> 32];
|
||||||
|
b -= n < U(1e7);
|
||||||
|
auto f2 = (f0 & mask32) * 100;
|
||||||
|
*reinterpret_cast<pair *>(b + 2) = digits.dd[f2 >> 32];
|
||||||
|
auto f4 = (f2 & mask32) * 100;
|
||||||
|
*reinterpret_cast<pair *>(b + 4) = digits.dd[f4 >> 32];
|
||||||
|
auto f6 = (f4 & mask32) * 100;
|
||||||
|
*reinterpret_cast<pair *>(b + 6) = digits.dd[f6 >> 32];
|
||||||
|
return b + 8;
|
||||||
|
}
|
||||||
|
auto f0 = UInt64(10 * (1ull << 57ull) / 1e9 + 1) * n;
|
||||||
|
*reinterpret_cast<pair *>(b) = digits.fd[f0 >> 57];
|
||||||
|
b -= n < UInt32(1e9);
|
||||||
|
auto f2 = (f0 & mask57) * 100;
|
||||||
|
*reinterpret_cast<pair *>(b + 2) = digits.dd[f2 >> 57];
|
||||||
|
auto f4 = (f2 & mask57) * 100;
|
||||||
|
*reinterpret_cast<pair *>(b + 4) = digits.dd[f4 >> 57];
|
||||||
|
auto f6 = (f4 & mask57) * 100;
|
||||||
|
*reinterpret_cast<pair *>(b + 6) = digits.dd[f6 >> 57];
|
||||||
|
auto f8 = (f6 & mask57) * 100;
|
||||||
|
*reinterpret_cast<pair *>(b + 8) = digits.dd[f8 >> 57];
|
||||||
|
return b + 10;
|
||||||
|
}
|
||||||
|
|
||||||
return u < pow10<UnsignedOfSize<N>>(2 * N) ? head<N>(p, x) : large<N>(p, x);
|
// if we get here U must be UInt64 but some compilers don't know that, so reassign n to a UInt64 to avoid warnings
|
||||||
}
|
UInt32 z = n % UInt32(1e8);
|
||||||
|
UInt64 u = n / UInt32(1e8);
|
||||||
|
|
||||||
// selected when "u" is one byte
|
if (u < UInt32(1e2))
|
||||||
template <>
|
{
|
||||||
ALWAYS_INLINE inline char * uitoa<UnsignedOfSize<1>, 1>(char * p, UnsignedOfSize<1> u)
|
// u can't be 1 digit (if u < 10 it would have been handled above as a 9 digit 32bit number)
|
||||||
{
|
*reinterpret_cast<pair *>(b) = digits.dd[u];
|
||||||
if (u < 10)
|
b += 2;
|
||||||
return outDigit(p, u);
|
}
|
||||||
else if (u < 100)
|
else if (u < UInt32(1e6))
|
||||||
return outTwoDigits(p, u);
|
{
|
||||||
|
if (u < UInt32(1e4))
|
||||||
|
{
|
||||||
|
auto f0 = UInt32(10 * (1 << 24) / 1e3 + 1) * u;
|
||||||
|
*reinterpret_cast<pair *>(b) = digits.fd[f0 >> 24];
|
||||||
|
b -= u < UInt32(1e3);
|
||||||
|
auto f2 = (f0 & mask24) * 100;
|
||||||
|
*reinterpret_cast<pair *>(b + 2) = digits.dd[f2 >> 24];
|
||||||
|
b += 4;
|
||||||
|
}
|
||||||
|
else
|
||||||
|
{
|
||||||
|
auto f0 = UInt64(10 * (1ull << 32ull) / 1e5 + 1) * u;
|
||||||
|
*reinterpret_cast<pair *>(b) = digits.fd[f0 >> 32];
|
||||||
|
b -= u < UInt32(1e5);
|
||||||
|
auto f2 = (f0 & mask32) * 100;
|
||||||
|
*reinterpret_cast<pair *>(b + 2) = digits.dd[f2 >> 32];
|
||||||
|
auto f4 = (f2 & mask32) * 100;
|
||||||
|
*reinterpret_cast<pair *>(b + 4) = digits.dd[f4 >> 32];
|
||||||
|
b += 6;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
else if (u < UInt32(1e8))
|
||||||
|
{
|
||||||
|
auto f0 = UInt64(10 * (1ull << 48ull) / 1e7 + 1) * u >> 16;
|
||||||
|
*reinterpret_cast<pair *>(b) = digits.fd[f0 >> 32];
|
||||||
|
b -= u < UInt32(1e7);
|
||||||
|
auto f2 = (f0 & mask32) * 100;
|
||||||
|
*reinterpret_cast<pair *>(b + 2) = digits.dd[f2 >> 32];
|
||||||
|
auto f4 = (f2 & mask32) * 100;
|
||||||
|
*reinterpret_cast<pair *>(b + 4) = digits.dd[f4 >> 32];
|
||||||
|
auto f6 = (f4 & mask32) * 100;
|
||||||
|
*reinterpret_cast<pair *>(b + 6) = digits.dd[f6 >> 32];
|
||||||
|
b += 8;
|
||||||
|
}
|
||||||
|
else if (u < UInt64(1ull << 32ull))
|
||||||
|
{
|
||||||
|
auto f0 = UInt64(10 * (1ull << 57ull) / 1e9 + 1) * u;
|
||||||
|
*reinterpret_cast<pair *>(b) = digits.fd[f0 >> 57];
|
||||||
|
b -= u < UInt32(1e9);
|
||||||
|
auto f2 = (f0 & mask57) * 100;
|
||||||
|
*reinterpret_cast<pair *>(b + 2) = digits.dd[f2 >> 57];
|
||||||
|
auto f4 = (f2 & mask57) * 100;
|
||||||
|
*reinterpret_cast<pair *>(b + 4) = digits.dd[f4 >> 57];
|
||||||
|
auto f6 = (f4 & mask57) * 100;
|
||||||
|
*reinterpret_cast<pair *>(b + 6) = digits.dd[f6 >> 57];
|
||||||
|
auto f8 = (f6 & mask57) * 100;
|
||||||
|
*reinterpret_cast<pair *>(b + 8) = digits.dd[f8 >> 57];
|
||||||
|
b += 10;
|
||||||
|
}
|
||||||
else
|
else
|
||||||
{
|
{
|
||||||
p = outDigit(p, u / 100);
|
UInt32 y = u % UInt32(1e8);
|
||||||
p = outTwoDigits(p, u % 100);
|
u /= UInt32(1e8);
|
||||||
return p;
|
|
||||||
|
// u is 2, 3, or 4 digits (if u < 10 it would have been handled above)
|
||||||
|
if (u < UInt32(1e2))
|
||||||
|
{
|
||||||
|
*reinterpret_cast<pair *>(b) = digits.dd[u];
|
||||||
|
b += 2;
|
||||||
|
}
|
||||||
|
else
|
||||||
|
{
|
||||||
|
auto f0 = UInt32(10 * (1 << 24) / 1e3 + 1) * u;
|
||||||
|
*reinterpret_cast<pair *>(b) = digits.fd[f0 >> 24];
|
||||||
|
b -= u < UInt32(1e3);
|
||||||
|
auto f2 = (f0 & mask24) * 100;
|
||||||
|
*reinterpret_cast<pair *>(b + 2) = digits.dd[f2 >> 24];
|
||||||
|
b += 4;
|
||||||
|
}
|
||||||
|
// do 8 digits
|
||||||
|
auto f0 = (UInt64((1ull << 48ull) / 1e6 + 1) * y >> 16) + 1;
|
||||||
|
*reinterpret_cast<pair *>(b) = digits.dd[f0 >> 32];
|
||||||
|
auto f2 = (f0 & mask32) * 100;
|
||||||
|
*reinterpret_cast<pair *>(b + 2) = digits.dd[f2 >> 32];
|
||||||
|
auto f4 = (f2 & mask32) * 100;
|
||||||
|
*reinterpret_cast<pair *>(b + 4) = digits.dd[f4 >> 32];
|
||||||
|
auto f6 = (f4 & mask32) * 100;
|
||||||
|
*reinterpret_cast<pair *>(b + 6) = digits.dd[f6 >> 32];
|
||||||
|
b += 8;
|
||||||
}
|
}
|
||||||
}
|
// do 8 digits
|
||||||
|
auto f0 = (UInt64((1ull << 48ull) / 1e6 + 1) * z >> 16) + 1;
|
||||||
//===----------------------------------------------------------===//
|
*reinterpret_cast<pair *>(b) = digits.dd[f0 >> 32];
|
||||||
// handle unsigned and signed integral operands
|
auto f2 = (f0 & mask32) * 100;
|
||||||
//===----------------------------------------------------------===//
|
*reinterpret_cast<pair *>(b + 2) = digits.dd[f2 >> 32];
|
||||||
|
auto f4 = (f2 & mask32) * 100;
|
||||||
// itoa: handle unsigned integral operands (selected by SFINAE)
|
*reinterpret_cast<pair *>(b + 4) = digits.dd[f4 >> 32];
|
||||||
template <typename U>
|
auto f6 = (f4 & mask32) * 100;
|
||||||
requires(!std::is_signed_v<U> && std::is_integral_v<U>)
|
*reinterpret_cast<pair *>(b + 6) = digits.dd[f6 >> 32];
|
||||||
ALWAYS_INLINE inline char * itoa(U u, char * p)
|
return b + 8;
|
||||||
{
|
|
||||||
return convert::uitoa(p, u);
|
|
||||||
}
|
|
||||||
|
|
||||||
// itoa: handle signed integral operands (selected by SFINAE)
|
|
||||||
template <typename I, size_t N = sizeof(I)>
|
|
||||||
requires(std::is_signed_v<I> && std::is_integral_v<I>)
|
|
||||||
ALWAYS_INLINE inline char * itoa(I i, char * p)
|
|
||||||
{
|
|
||||||
// Need "mask" to be filled with a copy of the sign bit.
|
|
||||||
// If "i" is a negative value, then the result of "operator >>"
|
|
||||||
// is implementation-defined, though usually it is an arithmetic
|
|
||||||
// right shift that replicates the sign bit.
|
|
||||||
// Use a conditional expression to be portable,
|
|
||||||
// a good optimizing compiler generates an arithmetic right shift
|
|
||||||
// and avoids the conditional branch.
|
|
||||||
UnsignedOfSize<N> mask = i < 0 ? ~UnsignedOfSize<N>(0) : 0;
|
|
||||||
// Now get the absolute value of "i" and cast to unsigned type UnsignedOfSize<N>.
|
|
||||||
// Cannot use std::abs() because the result is undefined
|
|
||||||
// in 2's complement systems for the most-negative value.
|
|
||||||
// Want to avoid conditional branch for performance reasons since
|
|
||||||
// CPU branch prediction will be ineffective when negative values
|
|
||||||
// occur randomly.
|
|
||||||
// Let "u" be "i" cast to unsigned type UnsignedOfSize<N>.
|
|
||||||
// Subtract "u" from 2*u if "i" is positive or 0 if "i" is negative.
|
|
||||||
// This yields the absolute value with the desired type without
|
|
||||||
// using a conditional branch and without invoking undefined or
|
|
||||||
// implementation defined behavior:
|
|
||||||
UnsignedOfSize<N> u = ((2 * UnsignedOfSize<N>(i)) & ~mask) - UnsignedOfSize<N>(i);
|
|
||||||
// Unconditionally store a minus sign when producing digits
|
|
||||||
// in a forward direction and increment the pointer only if
|
|
||||||
// the value is in fact negative.
|
|
||||||
// This avoids a conditional branch and is safe because we will
|
|
||||||
// always produce at least one digit and it will overwrite the
|
|
||||||
// minus sign when the value is not negative.
|
|
||||||
*p = '-';
|
|
||||||
p += (mask & 1);
|
|
||||||
p = convert::uitoa(p, u);
|
|
||||||
return p;
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -303,7 +296,7 @@ ALWAYS_INLINE inline char * writeUIntText(UInt128 _x, char * p)
|
|||||||
{
|
{
|
||||||
/// If we the highest 64bit item is empty, we can print just the lowest item as u64
|
/// If we the highest 64bit item is empty, we can print just the lowest item as u64
|
||||||
if (_x.items[UInt128::_impl::little(1)] == 0)
|
if (_x.items[UInt128::_impl::little(1)] == 0)
|
||||||
return convert::itoa(_x.items[UInt128::_impl::little(0)], p);
|
return jeaiii::to_text_from_integer(p, _x.items[UInt128::_impl::little(0)]);
|
||||||
|
|
||||||
/// Doing operations using __int128 is faster and we already rely on this feature
|
/// Doing operations using __int128 is faster and we already rely on this feature
|
||||||
using T = unsigned __int128;
|
using T = unsigned __int128;
|
||||||
@ -334,7 +327,7 @@ ALWAYS_INLINE inline char * writeUIntText(UInt128 _x, char * p)
|
|||||||
current_block += max_multiple_of_hundred_blocks;
|
current_block += max_multiple_of_hundred_blocks;
|
||||||
}
|
}
|
||||||
|
|
||||||
char * highest_part_print = convert::itoa(uint64_t(x), p);
|
char * highest_part_print = jeaiii::to_text_from_integer(p, uint64_t(x));
|
||||||
for (int i = 0; i < current_block; i++)
|
for (int i = 0; i < current_block; i++)
|
||||||
{
|
{
|
||||||
outTwoDigits(highest_part_print, two_values[current_block - 1 - i]);
|
outTwoDigits(highest_part_print, two_values[current_block - 1 - i]);
|
||||||
@ -450,12 +443,12 @@ ALWAYS_INLINE inline char * writeSIntText(T x, char * pos)
|
|||||||
|
|
||||||
char * itoa(UInt8 i, char * p)
|
char * itoa(UInt8 i, char * p)
|
||||||
{
|
{
|
||||||
return convert::itoa(uint8_t(i), p);
|
return jeaiii::to_text_from_integer(p, uint8_t(i));
|
||||||
}
|
}
|
||||||
|
|
||||||
char * itoa(Int8 i, char * p)
|
char * itoa(Int8 i, char * p)
|
||||||
{
|
{
|
||||||
return convert::itoa(int8_t(i), p);
|
return jeaiii::to_text_from_integer(p, int8_t(i));
|
||||||
}
|
}
|
||||||
|
|
||||||
char * itoa(UInt128 i, char * p)
|
char * itoa(UInt128 i, char * p)
|
||||||
@ -481,7 +474,7 @@ char * itoa(Int256 i, char * p)
|
|||||||
#define DEFAULT_ITOA(T) \
|
#define DEFAULT_ITOA(T) \
|
||||||
char * itoa(T i, char * p) \
|
char * itoa(T i, char * p) \
|
||||||
{ \
|
{ \
|
||||||
return convert::itoa(i, p); \
|
return jeaiii::to_text_from_integer(p, i); \
|
||||||
}
|
}
|
||||||
|
|
||||||
#define FOR_MISSING_INTEGER_TYPES(M) \
|
#define FOR_MISSING_INTEGER_TYPES(M) \
|
||||||
|
@ -232,7 +232,7 @@ void Foundation_API format(
|
|||||||
const Any & value10);
|
const Any & value10);
|
||||||
|
|
||||||
|
|
||||||
void Foundation_API format(std::string & result, const std::string & fmt, const std::vector<Any> & values);
|
void Foundation_API formatVector(std::string & result, const std::string & fmt, const std::vector<Any> & values);
|
||||||
/// Supports a variable number of arguments and is used by
|
/// Supports a variable number of arguments and is used by
|
||||||
/// all other variants of format().
|
/// all other variants of format().
|
||||||
|
|
||||||
|
@ -21,6 +21,8 @@
|
|||||||
#include <atomic>
|
#include <atomic>
|
||||||
#include <cstddef>
|
#include <cstddef>
|
||||||
#include <map>
|
#include <map>
|
||||||
|
#include <memory>
|
||||||
|
#include <unordered_map>
|
||||||
#include <vector>
|
#include <vector>
|
||||||
|
|
||||||
#include "Poco/Channel.h"
|
#include "Poco/Channel.h"
|
||||||
|
@ -19,6 +19,7 @@
|
|||||||
|
|
||||||
|
|
||||||
#include <map>
|
#include <map>
|
||||||
|
#include <vector>
|
||||||
#include "Poco/Foundation.h"
|
#include "Poco/Foundation.h"
|
||||||
#include "Poco/Timestamp.h"
|
#include "Poco/Timestamp.h"
|
||||||
|
|
||||||
|
@ -21,6 +21,8 @@
|
|||||||
#include "Poco/AtomicCounter.h"
|
#include "Poco/AtomicCounter.h"
|
||||||
#include "Poco/Foundation.h"
|
#include "Poco/Foundation.h"
|
||||||
|
|
||||||
|
#include <atomic>
|
||||||
|
|
||||||
|
|
||||||
namespace Poco
|
namespace Poco
|
||||||
{
|
{
|
||||||
|
@ -51,8 +51,8 @@ namespace
|
|||||||
}
|
}
|
||||||
if (width != 0) str.width(width);
|
if (width != 0) str.width(width);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
void parsePrec(std::ostream& str, std::string::const_iterator& itFmt, const std::string::const_iterator& endFmt)
|
void parsePrec(std::ostream& str, std::string::const_iterator& itFmt, const std::string::const_iterator& endFmt)
|
||||||
{
|
{
|
||||||
if (itFmt != endFmt && *itFmt == '.')
|
if (itFmt != endFmt && *itFmt == '.')
|
||||||
@ -67,7 +67,7 @@ namespace
|
|||||||
if (prec >= 0) str.precision(prec);
|
if (prec >= 0) str.precision(prec);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
char parseMod(std::string::const_iterator& itFmt, const std::string::const_iterator& endFmt)
|
char parseMod(std::string::const_iterator& itFmt, const std::string::const_iterator& endFmt)
|
||||||
{
|
{
|
||||||
char mod = 0;
|
char mod = 0;
|
||||||
@ -77,13 +77,13 @@ namespace
|
|||||||
{
|
{
|
||||||
case 'l':
|
case 'l':
|
||||||
case 'h':
|
case 'h':
|
||||||
case 'L':
|
case 'L':
|
||||||
case '?': mod = *itFmt++; break;
|
case '?': mod = *itFmt++; break;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return mod;
|
return mod;
|
||||||
}
|
}
|
||||||
|
|
||||||
std::size_t parseIndex(std::string::const_iterator& itFmt, const std::string::const_iterator& endFmt)
|
std::size_t parseIndex(std::string::const_iterator& itFmt, const std::string::const_iterator& endFmt)
|
||||||
{
|
{
|
||||||
int index = 0;
|
int index = 0;
|
||||||
@ -110,8 +110,8 @@ namespace
|
|||||||
case 'f': str << std::fixed; break;
|
case 'f': str << std::fixed; break;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
void writeAnyInt(std::ostream& str, const Any& any)
|
void writeAnyInt(std::ostream& str, const Any& any)
|
||||||
{
|
{
|
||||||
if (any.type() == typeid(char))
|
if (any.type() == typeid(char))
|
||||||
@ -201,7 +201,7 @@ namespace
|
|||||||
str << RefAnyCast<std::string>(*itVal++);
|
str << RefAnyCast<std::string>(*itVal++);
|
||||||
break;
|
break;
|
||||||
case 'z':
|
case 'z':
|
||||||
str << AnyCast<std::size_t>(*itVal++);
|
str << AnyCast<std::size_t>(*itVal++);
|
||||||
break;
|
break;
|
||||||
case 'I':
|
case 'I':
|
||||||
case 'D':
|
case 'D':
|
||||||
@ -303,7 +303,7 @@ void format(std::string& result, const std::string& fmt, const Any& value)
|
|||||||
{
|
{
|
||||||
std::vector<Any> args;
|
std::vector<Any> args;
|
||||||
args.push_back(value);
|
args.push_back(value);
|
||||||
format(result, fmt, args);
|
formatVector(result, fmt, args);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
@ -312,7 +312,7 @@ void format(std::string& result, const std::string& fmt, const Any& value1, cons
|
|||||||
std::vector<Any> args;
|
std::vector<Any> args;
|
||||||
args.push_back(value1);
|
args.push_back(value1);
|
||||||
args.push_back(value2);
|
args.push_back(value2);
|
||||||
format(result, fmt, args);
|
formatVector(result, fmt, args);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
@ -322,7 +322,7 @@ void format(std::string& result, const std::string& fmt, const Any& value1, cons
|
|||||||
args.push_back(value1);
|
args.push_back(value1);
|
||||||
args.push_back(value2);
|
args.push_back(value2);
|
||||||
args.push_back(value3);
|
args.push_back(value3);
|
||||||
format(result, fmt, args);
|
formatVector(result, fmt, args);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
@ -333,7 +333,7 @@ void format(std::string& result, const std::string& fmt, const Any& value1, cons
|
|||||||
args.push_back(value2);
|
args.push_back(value2);
|
||||||
args.push_back(value3);
|
args.push_back(value3);
|
||||||
args.push_back(value4);
|
args.push_back(value4);
|
||||||
format(result, fmt, args);
|
formatVector(result, fmt, args);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
@ -345,7 +345,7 @@ void format(std::string& result, const std::string& fmt, const Any& value1, cons
|
|||||||
args.push_back(value3);
|
args.push_back(value3);
|
||||||
args.push_back(value4);
|
args.push_back(value4);
|
||||||
args.push_back(value5);
|
args.push_back(value5);
|
||||||
format(result, fmt, args);
|
formatVector(result, fmt, args);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
@ -358,7 +358,7 @@ void format(std::string& result, const std::string& fmt, const Any& value1, cons
|
|||||||
args.push_back(value4);
|
args.push_back(value4);
|
||||||
args.push_back(value5);
|
args.push_back(value5);
|
||||||
args.push_back(value6);
|
args.push_back(value6);
|
||||||
format(result, fmt, args);
|
formatVector(result, fmt, args);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
@ -372,7 +372,7 @@ void format(std::string& result, const std::string& fmt, const Any& value1, cons
|
|||||||
args.push_back(value5);
|
args.push_back(value5);
|
||||||
args.push_back(value6);
|
args.push_back(value6);
|
||||||
args.push_back(value7);
|
args.push_back(value7);
|
||||||
format(result, fmt, args);
|
formatVector(result, fmt, args);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
@ -387,7 +387,7 @@ void format(std::string& result, const std::string& fmt, const Any& value1, cons
|
|||||||
args.push_back(value6);
|
args.push_back(value6);
|
||||||
args.push_back(value7);
|
args.push_back(value7);
|
||||||
args.push_back(value8);
|
args.push_back(value8);
|
||||||
format(result, fmt, args);
|
formatVector(result, fmt, args);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
@ -403,7 +403,7 @@ void format(std::string& result, const std::string& fmt, const Any& value1, cons
|
|||||||
args.push_back(value7);
|
args.push_back(value7);
|
||||||
args.push_back(value8);
|
args.push_back(value8);
|
||||||
args.push_back(value9);
|
args.push_back(value9);
|
||||||
format(result, fmt, args);
|
formatVector(result, fmt, args);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
@ -420,16 +420,16 @@ void format(std::string& result, const std::string& fmt, const Any& value1, cons
|
|||||||
args.push_back(value8);
|
args.push_back(value8);
|
||||||
args.push_back(value9);
|
args.push_back(value9);
|
||||||
args.push_back(value10);
|
args.push_back(value10);
|
||||||
format(result, fmt, args);
|
formatVector(result, fmt, args);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
void format(std::string& result, const std::string& fmt, const std::vector<Any>& values)
|
void formatVector(std::string& result, const std::string& fmt, const std::vector<Any>& values)
|
||||||
{
|
{
|
||||||
std::string::const_iterator itFmt = fmt.begin();
|
std::string::const_iterator itFmt = fmt.begin();
|
||||||
std::string::const_iterator endFmt = fmt.end();
|
std::string::const_iterator endFmt = fmt.end();
|
||||||
std::vector<Any>::const_iterator itVal = values.begin();
|
std::vector<Any>::const_iterator itVal = values.begin();
|
||||||
std::vector<Any>::const_iterator endVal = values.end();
|
std::vector<Any>::const_iterator endVal = values.end();
|
||||||
while (itFmt != endFmt)
|
while (itFmt != endFmt)
|
||||||
{
|
{
|
||||||
switch (*itFmt)
|
switch (*itFmt)
|
||||||
|
@ -57,7 +57,7 @@ std::string ObjectId::toString(const std::string& fmt) const
|
|||||||
|
|
||||||
for (int i = 0; i < 12; ++i)
|
for (int i = 0; i < 12; ++i)
|
||||||
{
|
{
|
||||||
s += format(fmt, (unsigned int) _id[i]);
|
s += Poco::format(fmt, (unsigned int) _id[i]);
|
||||||
}
|
}
|
||||||
return s;
|
return s;
|
||||||
}
|
}
|
||||||
|
@ -43,9 +43,9 @@ namespace Poco {
|
|||||||
namespace MongoDB {
|
namespace MongoDB {
|
||||||
|
|
||||||
|
|
||||||
static const std::string keyCursor {"cursor"};
|
[[ maybe_unused ]] static const std::string keyCursor {"cursor"};
|
||||||
static const std::string keyFirstBatch {"firstBatch"};
|
[[ maybe_unused ]] static const std::string keyFirstBatch {"firstBatch"};
|
||||||
static const std::string keyNextBatch {"nextBatch"};
|
[[ maybe_unused ]] static const std::string keyNextBatch {"nextBatch"};
|
||||||
|
|
||||||
static Poco::Int64 cursorIdFromResponse(const MongoDB::Document& doc);
|
static Poco::Int64 cursorIdFromResponse(const MongoDB::Document& doc);
|
||||||
|
|
||||||
@ -131,7 +131,7 @@ OpMsgMessage& OpMsgCursor::next(Connection& connection)
|
|||||||
connection.readResponse(_response);
|
connection.readResponse(_response);
|
||||||
}
|
}
|
||||||
else
|
else
|
||||||
#endif
|
#endif
|
||||||
{
|
{
|
||||||
_response.clear();
|
_response.clear();
|
||||||
_query.setCursor(_cursorID, _batchSize);
|
_query.setCursor(_cursorID, _batchSize);
|
||||||
|
@ -79,7 +79,7 @@ namespace Net
|
|||||||
/// Returns the value of the first name-value pair with the given name.
|
/// Returns the value of the first name-value pair with the given name.
|
||||||
/// If no value with the given name has been found, the defaultValue is returned.
|
/// If no value with the given name has been found, the defaultValue is returned.
|
||||||
|
|
||||||
const std::vector<std::reference_wrapper<const std::string>> getAll(const std::string & name) const;
|
std::vector<std::string> getAll(const std::string & name) const;
|
||||||
/// Returns all values of all name-value pairs with the given name.
|
/// Returns all values of all name-value pairs with the given name.
|
||||||
///
|
///
|
||||||
/// Returns an empty vector if there are no name-value pairs with the given name.
|
/// Returns an empty vector if there are no name-value pairs with the given name.
|
||||||
|
@ -17,9 +17,9 @@
|
|||||||
#include "Poco/NumberFormatter.h"
|
#include "Poco/NumberFormatter.h"
|
||||||
#include "Poco/NumberParser.h"
|
#include "Poco/NumberParser.h"
|
||||||
#include "Poco/String.h"
|
#include "Poco/String.h"
|
||||||
|
#include <charconv>
|
||||||
#include <format>
|
#include <format>
|
||||||
|
|
||||||
|
|
||||||
using Poco::NumberFormatter;
|
using Poco::NumberFormatter;
|
||||||
using Poco::NumberParser;
|
using Poco::NumberParser;
|
||||||
using Poco::icompare;
|
using Poco::icompare;
|
||||||
@ -75,7 +75,7 @@ void HTTPMessage::setContentLength(std::streamsize length)
|
|||||||
erase(CONTENT_LENGTH);
|
erase(CONTENT_LENGTH);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
std::streamsize HTTPMessage::getContentLength() const
|
std::streamsize HTTPMessage::getContentLength() const
|
||||||
{
|
{
|
||||||
const std::string& contentLength = get(CONTENT_LENGTH, EMPTY);
|
const std::string& contentLength = get(CONTENT_LENGTH, EMPTY);
|
||||||
@ -98,7 +98,7 @@ void HTTPMessage::setContentLength64(Poco::Int64 length)
|
|||||||
erase(CONTENT_LENGTH);
|
erase(CONTENT_LENGTH);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
Poco::Int64 HTTPMessage::getContentLength64() const
|
Poco::Int64 HTTPMessage::getContentLength64() const
|
||||||
{
|
{
|
||||||
const std::string& contentLength = get(CONTENT_LENGTH, EMPTY);
|
const std::string& contentLength = get(CONTENT_LENGTH, EMPTY);
|
||||||
@ -133,13 +133,13 @@ void HTTPMessage::setChunkedTransferEncoding(bool flag)
|
|||||||
setTransferEncoding(IDENTITY_TRANSFER_ENCODING);
|
setTransferEncoding(IDENTITY_TRANSFER_ENCODING);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
bool HTTPMessage::getChunkedTransferEncoding() const
|
bool HTTPMessage::getChunkedTransferEncoding() const
|
||||||
{
|
{
|
||||||
return icompare(getTransferEncoding(), CHUNKED_TRANSFER_ENCODING) == 0;
|
return icompare(getTransferEncoding(), CHUNKED_TRANSFER_ENCODING) == 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
void HTTPMessage::setContentType(const std::string& mediaType)
|
void HTTPMessage::setContentType(const std::string& mediaType)
|
||||||
{
|
{
|
||||||
if (mediaType.empty())
|
if (mediaType.empty())
|
||||||
@ -154,7 +154,7 @@ void HTTPMessage::setContentType(const MediaType& mediaType)
|
|||||||
setContentType(mediaType.toString());
|
setContentType(mediaType.toString());
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
const std::string& HTTPMessage::getContentType() const
|
const std::string& HTTPMessage::getContentType() const
|
||||||
{
|
{
|
||||||
return get(CONTENT_TYPE, UNKNOWN_CONTENT_TYPE);
|
return get(CONTENT_TYPE, UNKNOWN_CONTENT_TYPE);
|
||||||
|
@ -102,9 +102,9 @@ const std::string& NameValueCollection::get(const std::string& name, const std::
|
|||||||
return defaultValue;
|
return defaultValue;
|
||||||
}
|
}
|
||||||
|
|
||||||
const std::vector<std::reference_wrapper<const std::string>> NameValueCollection::getAll(const std::string& name) const
|
std::vector<std::string> NameValueCollection::getAll(const std::string& name) const
|
||||||
{
|
{
|
||||||
std::vector<std::reference_wrapper<const std::string>> values;
|
std::vector<std::string> values;
|
||||||
for (ConstIterator it = _map.find(name); it != _map.end(); it++)
|
for (ConstIterator it = _map.find(name); it != _map.end(); it++)
|
||||||
if (it->first == name)
|
if (it->first == name)
|
||||||
values.push_back(it->second);
|
values.push_back(it->second);
|
||||||
|
@ -235,8 +235,6 @@ namespace Net
|
|||||||
/// Note that simply closing a socket is not sufficient
|
/// Note that simply closing a socket is not sufficient
|
||||||
/// to be able to re-use it again.
|
/// to be able to re-use it again.
|
||||||
|
|
||||||
Poco::Timespan getMaxTimeout();
|
|
||||||
|
|
||||||
private:
|
private:
|
||||||
SecureSocketImpl(const SecureSocketImpl &);
|
SecureSocketImpl(const SecureSocketImpl &);
|
||||||
SecureSocketImpl & operator=(const SecureSocketImpl &);
|
SecureSocketImpl & operator=(const SecureSocketImpl &);
|
||||||
@ -250,6 +248,9 @@ namespace Net
|
|||||||
Session::Ptr _pSession;
|
Session::Ptr _pSession;
|
||||||
|
|
||||||
friend class SecureStreamSocketImpl;
|
friend class SecureStreamSocketImpl;
|
||||||
|
|
||||||
|
Poco::Timespan getMaxTimeoutOrLimit();
|
||||||
|
//// Return max(send, receive) if non zero, otherwise maximum timeout
|
||||||
};
|
};
|
||||||
|
|
||||||
|
|
||||||
|
@ -199,7 +199,7 @@ void SecureSocketImpl::connectSSL(bool performHandshake)
|
|||||||
if (performHandshake && _pSocket->getBlocking())
|
if (performHandshake && _pSocket->getBlocking())
|
||||||
{
|
{
|
||||||
int ret;
|
int ret;
|
||||||
Poco::Timespan remaining_time = getMaxTimeout();
|
Poco::Timespan remaining_time = getMaxTimeoutOrLimit();
|
||||||
do
|
do
|
||||||
{
|
{
|
||||||
RemainingTimeCounter counter(remaining_time);
|
RemainingTimeCounter counter(remaining_time);
|
||||||
@ -302,7 +302,7 @@ int SecureSocketImpl::sendBytes(const void* buffer, int length, int flags)
|
|||||||
return rc;
|
return rc;
|
||||||
}
|
}
|
||||||
|
|
||||||
Poco::Timespan remaining_time = getMaxTimeout();
|
Poco::Timespan remaining_time = getMaxTimeoutOrLimit();
|
||||||
do
|
do
|
||||||
{
|
{
|
||||||
RemainingTimeCounter counter(remaining_time);
|
RemainingTimeCounter counter(remaining_time);
|
||||||
@ -338,7 +338,7 @@ int SecureSocketImpl::receiveBytes(void* buffer, int length, int flags)
|
|||||||
return rc;
|
return rc;
|
||||||
}
|
}
|
||||||
|
|
||||||
Poco::Timespan remaining_time = getMaxTimeout();
|
Poco::Timespan remaining_time = getMaxTimeoutOrLimit();
|
||||||
do
|
do
|
||||||
{
|
{
|
||||||
/// SSL record may consist of several TCP packets,
|
/// SSL record may consist of several TCP packets,
|
||||||
@ -372,7 +372,7 @@ int SecureSocketImpl::completeHandshake()
|
|||||||
poco_check_ptr (_pSSL);
|
poco_check_ptr (_pSSL);
|
||||||
|
|
||||||
int rc;
|
int rc;
|
||||||
Poco::Timespan remaining_time = getMaxTimeout();
|
Poco::Timespan remaining_time = getMaxTimeoutOrLimit();
|
||||||
do
|
do
|
||||||
{
|
{
|
||||||
RemainingTimeCounter counter(remaining_time);
|
RemainingTimeCounter counter(remaining_time);
|
||||||
@ -453,18 +453,29 @@ X509* SecureSocketImpl::peerCertificate() const
|
|||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
Poco::Timespan SecureSocketImpl::getMaxTimeout()
|
Poco::Timespan SecureSocketImpl::getMaxTimeoutOrLimit()
|
||||||
{
|
{
|
||||||
std::lock_guard<std::recursive_mutex> lock(_mutex);
|
std::lock_guard<std::recursive_mutex> lock(_mutex);
|
||||||
Poco::Timespan remaining_time = _pSocket->getReceiveTimeout();
|
Poco::Timespan remaining_time = _pSocket->getReceiveTimeout();
|
||||||
Poco::Timespan send_timeout = _pSocket->getSendTimeout();
|
Poco::Timespan send_timeout = _pSocket->getSendTimeout();
|
||||||
if (remaining_time < send_timeout)
|
if (remaining_time < send_timeout)
|
||||||
remaining_time = send_timeout;
|
remaining_time = send_timeout;
|
||||||
|
/// zero SO_SNDTIMEO/SO_RCVTIMEO works as no timeout, let's replicate this
|
||||||
|
///
|
||||||
|
/// NOTE: we cannot use INT64_MAX (std::numeric_limits<Poco::Timespan::TimeDiff>::max()),
|
||||||
|
/// since it will be later passed to poll() which accept int timeout, and
|
||||||
|
/// even though poll() accepts milliseconds and Timespan() accepts
|
||||||
|
/// microseconds, let's use smaller maximum value just to avoid some possible
|
||||||
|
/// issues, this should be enough anyway (it is ~24 days).
|
||||||
|
if (remaining_time == 0)
|
||||||
|
remaining_time = Poco::Timespan(std::numeric_limits<int>::max());
|
||||||
return remaining_time;
|
return remaining_time;
|
||||||
}
|
}
|
||||||
|
|
||||||
bool SecureSocketImpl::mustRetry(int rc, Poco::Timespan& remaining_time)
|
bool SecureSocketImpl::mustRetry(int rc, Poco::Timespan& remaining_time)
|
||||||
{
|
{
|
||||||
|
if (remaining_time == 0)
|
||||||
|
return false;
|
||||||
std::lock_guard<std::recursive_mutex> lock(_mutex);
|
std::lock_guard<std::recursive_mutex> lock(_mutex);
|
||||||
if (rc <= 0)
|
if (rc <= 0)
|
||||||
{
|
{
|
||||||
@ -475,9 +486,7 @@ bool SecureSocketImpl::mustRetry(int rc, Poco::Timespan& remaining_time)
|
|||||||
case SSL_ERROR_WANT_READ:
|
case SSL_ERROR_WANT_READ:
|
||||||
if (_pSocket->getBlocking())
|
if (_pSocket->getBlocking())
|
||||||
{
|
{
|
||||||
/// Level-triggered mode of epoll_wait is used, so if SSL_read don't read all available data from socket,
|
if (_pSocket->pollImpl(remaining_time, Poco::Net::Socket::SELECT_READ))
|
||||||
/// epoll_wait returns true without waiting for new data even if remaining_time == 0
|
|
||||||
if (_pSocket->pollImpl(remaining_time, Poco::Net::Socket::SELECT_READ) && remaining_time != 0)
|
|
||||||
return true;
|
return true;
|
||||||
else
|
else
|
||||||
throw Poco::TimeoutException();
|
throw Poco::TimeoutException();
|
||||||
@ -486,13 +495,15 @@ bool SecureSocketImpl::mustRetry(int rc, Poco::Timespan& remaining_time)
|
|||||||
case SSL_ERROR_WANT_WRITE:
|
case SSL_ERROR_WANT_WRITE:
|
||||||
if (_pSocket->getBlocking())
|
if (_pSocket->getBlocking())
|
||||||
{
|
{
|
||||||
/// The same as for SSL_ERROR_WANT_READ
|
if (_pSocket->pollImpl(remaining_time, Poco::Net::Socket::SELECT_WRITE))
|
||||||
if (_pSocket->pollImpl(remaining_time, Poco::Net::Socket::SELECT_WRITE) && remaining_time != 0)
|
|
||||||
return true;
|
return true;
|
||||||
else
|
else
|
||||||
throw Poco::TimeoutException();
|
throw Poco::TimeoutException();
|
||||||
}
|
}
|
||||||
break;
|
break;
|
||||||
|
/// NOTE: POCO_EINTR is the same as SSL_ERROR_WANT_READ (at least in
|
||||||
|
/// OpenSSL), so this likely dead code, but let's leave it for
|
||||||
|
/// compatibility with other implementations
|
||||||
case SSL_ERROR_SYSCALL:
|
case SSL_ERROR_SYSCALL:
|
||||||
return socketError == POCO_EAGAIN || socketError == POCO_EINTR;
|
return socketError == POCO_EAGAIN || socketError == POCO_EINTR;
|
||||||
default:
|
default:
|
||||||
|
@ -2,11 +2,11 @@
|
|||||||
|
|
||||||
# NOTE: VERSION_REVISION has nothing common with DBMS_TCP_PROTOCOL_VERSION,
|
# NOTE: VERSION_REVISION has nothing common with DBMS_TCP_PROTOCOL_VERSION,
|
||||||
# only DBMS_TCP_PROTOCOL_VERSION should be incremented on protocol changes.
|
# only DBMS_TCP_PROTOCOL_VERSION should be incremented on protocol changes.
|
||||||
SET(VERSION_REVISION 54488)
|
SET(VERSION_REVISION 54489)
|
||||||
SET(VERSION_MAJOR 24)
|
SET(VERSION_MAJOR 24)
|
||||||
SET(VERSION_MINOR 7)
|
SET(VERSION_MINOR 8)
|
||||||
SET(VERSION_PATCH 1)
|
SET(VERSION_PATCH 1)
|
||||||
SET(VERSION_GITHASH aa023477a9265e403982fca5ee29a714db5133d9)
|
SET(VERSION_GITHASH 3f8b27d7accd2b5ec4afe7d0dd459115323304af)
|
||||||
SET(VERSION_DESCRIBE v24.7.1.1-testing)
|
SET(VERSION_DESCRIBE v24.8.1.1-testing)
|
||||||
SET(VERSION_STRING 24.7.1.1)
|
SET(VERSION_STRING 24.8.1.1)
|
||||||
# end of autochange
|
# end of autochange
|
||||||
|
@ -42,9 +42,19 @@ endif ()
|
|||||||
# But use 2 parallel jobs, since:
|
# But use 2 parallel jobs, since:
|
||||||
# - this is what llvm does
|
# - this is what llvm does
|
||||||
# - and I've verfied that lld-11 does not use all available CPU time (in peak) while linking one binary
|
# - and I've verfied that lld-11 does not use all available CPU time (in peak) while linking one binary
|
||||||
if (CMAKE_BUILD_TYPE_UC STREQUAL "RELWITHDEBINFO" AND ENABLE_THINLTO AND PARALLEL_LINK_JOBS GREATER 2)
|
if (CMAKE_BUILD_TYPE_UC STREQUAL "RELWITHDEBINFO" AND ENABLE_THINLTO)
|
||||||
message(STATUS "ThinLTO provides its own parallel linking - limiting parallel link jobs to 2.")
|
if (ARCH_AARCH64)
|
||||||
set (PARALLEL_LINK_JOBS 2)
|
# aarch64 builds start to often fail with OOMs (reason not yet clear), for now let's limit the concurrency
|
||||||
|
message(STATUS "ThinLTO provides its own parallel linking - limiting parallel link jobs to 1.")
|
||||||
|
set (PARALLEL_LINK_JOBS 1)
|
||||||
|
if (LINKER_NAME MATCHES "lld")
|
||||||
|
math(EXPR LTO_JOBS ${NUMBER_OF_LOGICAL_CORES}/4)
|
||||||
|
set (CMAKE_EXE_LINKER_FLAGS_RELWITHDEBINFO "${CMAKE_EXE_LINKER_FLAGS_RELWITHDEBINFO} -Wl,--thinlto-jobs=${LTO_JOBS}")
|
||||||
|
endif()
|
||||||
|
elseif (PARALLEL_LINK_JOBS GREATER 2)
|
||||||
|
message(STATUS "ThinLTO provides its own parallel linking - limiting parallel link jobs to 2.")
|
||||||
|
set (PARALLEL_LINK_JOBS 2)
|
||||||
|
endif ()
|
||||||
endif()
|
endif()
|
||||||
|
|
||||||
message(STATUS "Building sub-tree with ${PARALLEL_COMPILE_JOBS} compile jobs and ${PARALLEL_LINK_JOBS} linker jobs (system: ${NUMBER_OF_LOGICAL_CORES} cores, ${TOTAL_PHYSICAL_MEMORY} MB RAM, 'OFF' means the native core count).")
|
message(STATUS "Building sub-tree with ${PARALLEL_COMPILE_JOBS} compile jobs and ${PARALLEL_LINK_JOBS} linker jobs (system: ${NUMBER_OF_LOGICAL_CORES} cores, ${TOTAL_PHYSICAL_MEMORY} MB RAM, 'OFF' means the native core count).")
|
||||||
|
@ -84,5 +84,5 @@ if (CMAKE_CROSSCOMPILING)
|
|||||||
message (FATAL_ERROR "Trying to cross-compile to unsupported system: ${CMAKE_SYSTEM_NAME}!")
|
message (FATAL_ERROR "Trying to cross-compile to unsupported system: ${CMAKE_SYSTEM_NAME}!")
|
||||||
endif ()
|
endif ()
|
||||||
|
|
||||||
message (STATUS "Cross-compiling for target: ${CMAKE_CXX_COMPILE_TARGET}")
|
message (STATUS "Cross-compiling for target: ${CMAKE_CXX_COMPILER_TARGET}")
|
||||||
endif ()
|
endif ()
|
||||||
|
2
contrib/CMakeLists.txt
vendored
2
contrib/CMakeLists.txt
vendored
@ -230,6 +230,8 @@ add_contrib (ulid-c-cmake ulid-c)
|
|||||||
|
|
||||||
add_contrib (libssh-cmake libssh)
|
add_contrib (libssh-cmake libssh)
|
||||||
|
|
||||||
|
add_contrib (prometheus-protobufs-cmake prometheus-protobufs prometheus-protobufs-gogo)
|
||||||
|
|
||||||
# Put all targets defined here and in subdirectories under "contrib/<immediate-subdir>" folders in GUI-based IDEs.
|
# Put all targets defined here and in subdirectories under "contrib/<immediate-subdir>" folders in GUI-based IDEs.
|
||||||
# Some of third-party projects may override CMAKE_FOLDER or FOLDER property of their targets, so they would not appear
|
# Some of third-party projects may override CMAKE_FOLDER or FOLDER property of their targets, so they would not appear
|
||||||
# in "contrib/..." as originally planned, so we workaround this by fixing FOLDER properties of all targets manually,
|
# in "contrib/..." as originally planned, so we workaround this by fixing FOLDER properties of all targets manually,
|
||||||
|
2
contrib/avro
vendored
2
contrib/avro
vendored
@ -1 +1 @@
|
|||||||
Subproject commit d43acc84d3d455b016f847d6666fbc3cd27f16a9
|
Subproject commit 545e7002683cbc2198164d93088ac8e4955b4628
|
@ -125,7 +125,7 @@ configure_file("${AWS_SDK_CORE_DIR}/include/aws/core/SDKConfig.h.in"
|
|||||||
"${CMAKE_CURRENT_BINARY_DIR}/include/aws/core/SDKConfig.h" @ONLY)
|
"${CMAKE_CURRENT_BINARY_DIR}/include/aws/core/SDKConfig.h" @ONLY)
|
||||||
|
|
||||||
aws_get_version(AWS_CRT_CPP_VERSION_MAJOR AWS_CRT_CPP_VERSION_MINOR AWS_CRT_CPP_VERSION_PATCH FULL_VERSION GIT_HASH)
|
aws_get_version(AWS_CRT_CPP_VERSION_MAJOR AWS_CRT_CPP_VERSION_MINOR AWS_CRT_CPP_VERSION_PATCH FULL_VERSION GIT_HASH)
|
||||||
configure_file("${AWS_CRT_DIR}/include/aws/crt/Config.h.in" "${AWS_CRT_DIR}/include/aws/crt/Config.h" @ONLY)
|
configure_file("${AWS_CRT_DIR}/include/aws/crt/Config.h.in" "${CMAKE_CURRENT_BINARY_DIR}/include/aws/crt/Config.h" @ONLY)
|
||||||
|
|
||||||
list(APPEND AWS_SOURCES ${AWS_SDK_CORE_SRC} ${AWS_SDK_CORE_NET_SRC} ${AWS_SDK_CORE_PLATFORM_SRC})
|
list(APPEND AWS_SOURCES ${AWS_SDK_CORE_SRC} ${AWS_SDK_CORE_NET_SRC} ${AWS_SDK_CORE_PLATFORM_SRC})
|
||||||
|
|
||||||
|
2
contrib/azure
vendored
2
contrib/azure
vendored
@ -1 +1 @@
|
|||||||
Subproject commit 6262a76ef4c4c330c84e58dd4f6f13f4e6230fcd
|
Subproject commit ea3e19a7be08519134c643177d56c7484dfec884
|
@ -9,6 +9,7 @@ set(DATASKETCHES_LIBRARY theta)
|
|||||||
add_library(_datasketches INTERFACE)
|
add_library(_datasketches INTERFACE)
|
||||||
target_include_directories(_datasketches SYSTEM BEFORE INTERFACE
|
target_include_directories(_datasketches SYSTEM BEFORE INTERFACE
|
||||||
"${ClickHouse_SOURCE_DIR}/contrib/datasketches-cpp/common/include"
|
"${ClickHouse_SOURCE_DIR}/contrib/datasketches-cpp/common/include"
|
||||||
|
"${ClickHouse_SOURCE_DIR}/contrib/datasketches-cpp/count/include"
|
||||||
"${ClickHouse_SOURCE_DIR}/contrib/datasketches-cpp/theta/include")
|
"${ClickHouse_SOURCE_DIR}/contrib/datasketches-cpp/theta/include")
|
||||||
|
|
||||||
add_library(ch_contrib::datasketches ALIAS _datasketches)
|
add_library(ch_contrib::datasketches ALIAS _datasketches)
|
||||||
|
@ -157,15 +157,13 @@ function(protobuf_generate)
|
|||||||
|
|
||||||
set(_generated_srcs_all)
|
set(_generated_srcs_all)
|
||||||
foreach(_proto ${protobuf_generate_PROTOS})
|
foreach(_proto ${protobuf_generate_PROTOS})
|
||||||
get_filename_component(_abs_file ${_proto} ABSOLUTE)
|
# The protobuf compiler doesn't return paths to the files it generates so we have to calculate those paths here:
|
||||||
get_filename_component(_abs_dir ${_abs_file} DIRECTORY)
|
# _abs_file - absolute path to a .proto file,
|
||||||
get_filename_component(_basename ${_proto} NAME_WE)
|
# _possible_rel_dir - relative path to the .proto file from some import directory specified in Protobuf_IMPORT_DIRS,
|
||||||
file(RELATIVE_PATH _rel_dir ${CMAKE_CURRENT_SOURCE_DIR} ${_abs_dir})
|
# _basename - filename of the .proto file (without path and without extenstion).
|
||||||
|
get_proto_absolute_path(_abs_file "${_proto}" ${_protobuf_include_path})
|
||||||
set(_possible_rel_dir)
|
get_proto_relative_path(_possible_rel_dir "${_abs_file}" ${_protobuf_include_path})
|
||||||
if (NOT protobuf_generate_APPEND_PATH)
|
get_filename_component(_basename "${_abs_file}" NAME_WE)
|
||||||
set(_possible_rel_dir ${_rel_dir}/)
|
|
||||||
endif()
|
|
||||||
|
|
||||||
set(_generated_srcs)
|
set(_generated_srcs)
|
||||||
foreach(_ext ${protobuf_generate_GENERATE_EXTENSIONS})
|
foreach(_ext ${protobuf_generate_GENERATE_EXTENSIONS})
|
||||||
@ -173,7 +171,7 @@ function(protobuf_generate)
|
|||||||
endforeach()
|
endforeach()
|
||||||
|
|
||||||
if(protobuf_generate_DESCRIPTORS AND protobuf_generate_LANGUAGE STREQUAL cpp)
|
if(protobuf_generate_DESCRIPTORS AND protobuf_generate_LANGUAGE STREQUAL cpp)
|
||||||
set(_descriptor_file "${CMAKE_CURRENT_BINARY_DIR}/${_basename}.desc")
|
set(_descriptor_file "${protobuf_generate_PROTOC_OUT_DIR}/${_possible_rel_dir}${_basename}.desc")
|
||||||
set(_dll_desc_out "--descriptor_set_out=${_descriptor_file}")
|
set(_dll_desc_out "--descriptor_set_out=${_descriptor_file}")
|
||||||
list(APPEND _generated_srcs ${_descriptor_file})
|
list(APPEND _generated_srcs ${_descriptor_file})
|
||||||
endif()
|
endif()
|
||||||
@ -196,3 +194,36 @@ function(protobuf_generate)
|
|||||||
target_sources(${protobuf_generate_TARGET} PRIVATE ${_generated_srcs_all})
|
target_sources(${protobuf_generate_TARGET} PRIVATE ${_generated_srcs_all})
|
||||||
endif()
|
endif()
|
||||||
endfunction()
|
endfunction()
|
||||||
|
|
||||||
|
# Calculates the absolute path to a .proto file.
|
||||||
|
function(get_proto_absolute_path result proto)
|
||||||
|
cmake_path(IS_ABSOLUTE proto _is_abs_path)
|
||||||
|
if(_is_abs_path)
|
||||||
|
set(${result} "${proto}" PARENT_SCOPE)
|
||||||
|
return()
|
||||||
|
endif()
|
||||||
|
foreach(_include_dir ${ARGN})
|
||||||
|
if(EXISTS "${_include_dir}/${proto}")
|
||||||
|
set(${result} "${_include_dir}/${proto}" PARENT_SCOPE)
|
||||||
|
return()
|
||||||
|
endif()
|
||||||
|
endforeach()
|
||||||
|
message(SEND_ERROR "Not found protobuf ${proto} in Protobuf_IMPORT_DIRS: ${ARGN}")
|
||||||
|
endfunction()
|
||||||
|
|
||||||
|
# Calculates a relative path to a .proto file. The returned path is relative to one of include directories.
|
||||||
|
function(get_proto_relative_path result abs_path)
|
||||||
|
set(${result} "" PARENT_SCOPE)
|
||||||
|
get_filename_component(_abs_dir "${abs_path}" DIRECTORY)
|
||||||
|
foreach(_include_dir ${ARGN})
|
||||||
|
cmake_path(IS_PREFIX _include_dir "${_abs_dir}" _is_prefix)
|
||||||
|
if(_is_prefix)
|
||||||
|
file(RELATIVE_PATH _rel_dir "${_include_dir}" "${_abs_dir}")
|
||||||
|
if(NOT _rel_dir STREQUAL "")
|
||||||
|
set(${result} "${_rel_dir}/" PARENT_SCOPE)
|
||||||
|
endif()
|
||||||
|
return()
|
||||||
|
endif()
|
||||||
|
endforeach()
|
||||||
|
message(WARNING "Not found protobuf ${abs_path} in Protobuf_IMPORT_DIRS: ${ARGN}")
|
||||||
|
endfunction()
|
||||||
|
2
contrib/grpc
vendored
2
contrib/grpc
vendored
@ -1 +1 @@
|
|||||||
Subproject commit 77b2737a709d43d8c6895e3f03ca62b00bd9201c
|
Subproject commit 1716359d2e28d304a250f9df0e6c0ccad03de8db
|
@ -5,7 +5,7 @@ else ()
|
|||||||
endif ()
|
endif ()
|
||||||
|
|
||||||
if (NOT ENABLE_ICU)
|
if (NOT ENABLE_ICU)
|
||||||
message(STATUS "Not using icu")
|
message(STATUS "Not using ICU")
|
||||||
return()
|
return()
|
||||||
endif()
|
endif()
|
||||||
|
|
||||||
|
@ -34,7 +34,7 @@ if (OS_LINUX)
|
|||||||
# avoid spurious latencies and additional work associated with
|
# avoid spurious latencies and additional work associated with
|
||||||
# MADV_DONTNEED. See
|
# MADV_DONTNEED. See
|
||||||
# https://github.com/ClickHouse/ClickHouse/issues/11121 for motivation.
|
# https://github.com/ClickHouse/ClickHouse/issues/11121 for motivation.
|
||||||
set (JEMALLOC_CONFIG_MALLOC_CONF "percpu_arena:percpu,oversize_threshold:0,muzzy_decay_ms:0,dirty_decay_ms:5000")
|
set (JEMALLOC_CONFIG_MALLOC_CONF "percpu_arena:percpu,oversize_threshold:0,muzzy_decay_ms:0,dirty_decay_ms:5000,prof:true,prof_active:false,background_thread:true")
|
||||||
else()
|
else()
|
||||||
set (JEMALLOC_CONFIG_MALLOC_CONF "oversize_threshold:0,muzzy_decay_ms:0,dirty_decay_ms:5000")
|
set (JEMALLOC_CONFIG_MALLOC_CONF "oversize_threshold:0,muzzy_decay_ms:0,dirty_decay_ms:5000")
|
||||||
endif()
|
endif()
|
||||||
@ -175,12 +175,19 @@ endif ()
|
|||||||
|
|
||||||
target_compile_definitions(_jemalloc PRIVATE -DJEMALLOC_PROF=1)
|
target_compile_definitions(_jemalloc PRIVATE -DJEMALLOC_PROF=1)
|
||||||
|
|
||||||
# jemalloc provides support for two different libunwind flavors: the original HP libunwind and the one coming with gcc / g++ / libstdc++.
|
# jemalloc provides support two unwind flavors:
|
||||||
# The latter is identified by `JEMALLOC_PROF_LIBGCC` and uses `_Unwind_Backtrace` method instead of `unw_backtrace`.
|
# - JEMALLOC_PROF_LIBUNWIND - unw_backtrace() - gnu libunwind (compatible with llvm libunwind)
|
||||||
# At the time ClickHouse uses LLVM libunwind which follows libgcc's way of backtracking.
|
# - JEMALLOC_PROF_LIBGCC - _Unwind_Backtrace() - the original HP libunwind and the one coming with gcc / g++ / libstdc++.
|
||||||
#
|
#
|
||||||
# ClickHouse has to provide `unw_backtrace` method by the means of [commit 8e2b31e](https://github.com/ClickHouse/libunwind/commit/8e2b31e766dd502f6df74909e04a7dbdf5182eb1).
|
# But for JEMALLOC_PROF_LIBGCC it also calls _Unwind_Backtrace() during
|
||||||
target_compile_definitions (_jemalloc PRIVATE -DJEMALLOC_PROF_LIBGCC=1)
|
# bootstraping of jemalloc, which may lead to deadlock, if the dlsym will do
|
||||||
|
# allocations somewhere (like glibc does prio 2.34, see [1]).
|
||||||
|
#
|
||||||
|
# [1]: https://sourceware.org/git/?p=glibc.git;a=commit;h=fada9018199c21c469ff0e731ef75c6020074ac9
|
||||||
|
#
|
||||||
|
# And since ClickHouse unwind already supports unw_backtrace() we can safely
|
||||||
|
# switch to it to avoid this deadlock.
|
||||||
|
target_compile_definitions (_jemalloc PRIVATE -DJEMALLOC_PROF_LIBUNWIND=1)
|
||||||
target_link_libraries (_jemalloc PRIVATE unwind)
|
target_link_libraries (_jemalloc PRIVATE unwind)
|
||||||
|
|
||||||
# for RTLD_NEXT
|
# for RTLD_NEXT
|
||||||
|
@ -4,3 +4,14 @@ It allows to integrate JEMalloc into CMake project.
|
|||||||
- Added JEMALLOC_CONFIG_MALLOC_CONF substitution
|
- Added JEMALLOC_CONFIG_MALLOC_CONF substitution
|
||||||
- Add musl support (USE_MUSL)
|
- Add musl support (USE_MUSL)
|
||||||
- Also note, that darwin build requires JEMALLOC_PREFIX, while others do not
|
- Also note, that darwin build requires JEMALLOC_PREFIX, while others do not
|
||||||
|
- JEMALLOC_HAVE_CLOCK_MONOTONIC_COARSE should be disabled
|
||||||
|
|
||||||
|
CLOCK_MONOTONIC_COARSE can go backwards after clock_adjtime(ADJ_FREQUENCY)
|
||||||
|
Let's disable it for now, and this menas that CLOCK_MONOTONIC will be used,
|
||||||
|
and this, should not be a problem, since:
|
||||||
|
- jemalloc do not call clock_gettime() that frequently
|
||||||
|
- the difference is CLOCK_MONOTONIC 20ns and CLOCK_MONOTONIC_COARSE 4ns
|
||||||
|
|
||||||
|
This can be done with the following command:
|
||||||
|
|
||||||
|
gg JEMALLOC_HAVE_CLOCK_MONOTONIC_COARSE | cut -d: -f1 | xargs sed -i 's@#define JEMALLOC_HAVE_CLOCK_MONOTONIC_COARSE@/* #undef JEMALLOC_HAVE_CLOCK_MONOTONIC_COARSE */@'
|
||||||
|
@ -96,7 +96,7 @@
|
|||||||
/*
|
/*
|
||||||
* Defined if clock_gettime(CLOCK_MONOTONIC_COARSE, ...) is available.
|
* Defined if clock_gettime(CLOCK_MONOTONIC_COARSE, ...) is available.
|
||||||
*/
|
*/
|
||||||
#define JEMALLOC_HAVE_CLOCK_MONOTONIC_COARSE
|
/* #undef JEMALLOC_HAVE_CLOCK_MONOTONIC_COARSE */
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Defined if clock_gettime(CLOCK_MONOTONIC, ...) is available.
|
* Defined if clock_gettime(CLOCK_MONOTONIC, ...) is available.
|
||||||
|
@ -98,7 +98,7 @@
|
|||||||
/*
|
/*
|
||||||
* Defined if clock_gettime(CLOCK_MONOTONIC_COARSE, ...) is available.
|
* Defined if clock_gettime(CLOCK_MONOTONIC_COARSE, ...) is available.
|
||||||
*/
|
*/
|
||||||
#define JEMALLOC_HAVE_CLOCK_MONOTONIC_COARSE
|
/* #undef JEMALLOC_HAVE_CLOCK_MONOTONIC_COARSE */
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Defined if clock_gettime(CLOCK_MONOTONIC, ...) is available.
|
* Defined if clock_gettime(CLOCK_MONOTONIC, ...) is available.
|
||||||
|
@ -98,7 +98,7 @@
|
|||||||
/*
|
/*
|
||||||
* Defined if clock_gettime(CLOCK_MONOTONIC_COARSE, ...) is available.
|
* Defined if clock_gettime(CLOCK_MONOTONIC_COARSE, ...) is available.
|
||||||
*/
|
*/
|
||||||
#define JEMALLOC_HAVE_CLOCK_MONOTONIC_COARSE
|
/* #undef JEMALLOC_HAVE_CLOCK_MONOTONIC_COARSE */
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Defined if clock_gettime(CLOCK_MONOTONIC, ...) is available.
|
* Defined if clock_gettime(CLOCK_MONOTONIC, ...) is available.
|
||||||
|
@ -98,7 +98,7 @@
|
|||||||
/*
|
/*
|
||||||
* Defined if clock_gettime(CLOCK_MONOTONIC_COARSE, ...) is available.
|
* Defined if clock_gettime(CLOCK_MONOTONIC_COARSE, ...) is available.
|
||||||
*/
|
*/
|
||||||
#define JEMALLOC_HAVE_CLOCK_MONOTONIC_COARSE
|
/* #undef JEMALLOC_HAVE_CLOCK_MONOTONIC_COARSE */
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Defined if clock_gettime(CLOCK_MONOTONIC, ...) is available.
|
* Defined if clock_gettime(CLOCK_MONOTONIC, ...) is available.
|
||||||
|
@ -96,7 +96,7 @@
|
|||||||
/*
|
/*
|
||||||
* Defined if clock_gettime(CLOCK_MONOTONIC_COARSE, ...) is available.
|
* Defined if clock_gettime(CLOCK_MONOTONIC_COARSE, ...) is available.
|
||||||
*/
|
*/
|
||||||
#define JEMALLOC_HAVE_CLOCK_MONOTONIC_COARSE
|
/* #undef JEMALLOC_HAVE_CLOCK_MONOTONIC_COARSE */
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Defined if clock_gettime(CLOCK_MONOTONIC, ...) is available.
|
* Defined if clock_gettime(CLOCK_MONOTONIC, ...) is available.
|
||||||
|
@ -98,7 +98,7 @@
|
|||||||
/*
|
/*
|
||||||
* Defined if clock_gettime(CLOCK_MONOTONIC_COARSE, ...) is available.
|
* Defined if clock_gettime(CLOCK_MONOTONIC_COARSE, ...) is available.
|
||||||
*/
|
*/
|
||||||
#define JEMALLOC_HAVE_CLOCK_MONOTONIC_COARSE
|
/* #undef JEMALLOC_HAVE_CLOCK_MONOTONIC_COARSE */
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Defined if clock_gettime(CLOCK_MONOTONIC, ...) is available.
|
* Defined if clock_gettime(CLOCK_MONOTONIC, ...) is available.
|
||||||
|
@ -99,7 +99,7 @@
|
|||||||
/*
|
/*
|
||||||
* Defined if clock_gettime(CLOCK_MONOTONIC_COARSE, ...) is available.
|
* Defined if clock_gettime(CLOCK_MONOTONIC_COARSE, ...) is available.
|
||||||
*/
|
*/
|
||||||
#define JEMALLOC_HAVE_CLOCK_MONOTONIC_COARSE
|
/* #undef JEMALLOC_HAVE_CLOCK_MONOTONIC_COARSE */
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Defined if clock_gettime(CLOCK_MONOTONIC, ...) is available.
|
* Defined if clock_gettime(CLOCK_MONOTONIC, ...) is available.
|
||||||
|
@ -54,7 +54,6 @@ set(SRCS
|
|||||||
"${LIBPQ_SOURCE_DIR}/port/pgstrcasecmp.c"
|
"${LIBPQ_SOURCE_DIR}/port/pgstrcasecmp.c"
|
||||||
"${LIBPQ_SOURCE_DIR}/port/thread.c"
|
"${LIBPQ_SOURCE_DIR}/port/thread.c"
|
||||||
"${LIBPQ_SOURCE_DIR}/port/path.c"
|
"${LIBPQ_SOURCE_DIR}/port/path.c"
|
||||||
"${LIBPQ_SOURCE_DIR}/port/explicit_bzero.c"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
add_library(_libpq ${SRCS})
|
add_library(_libpq ${SRCS})
|
||||||
|
2
contrib/libunwind
vendored
2
contrib/libunwind
vendored
@ -1 +1 @@
|
|||||||
Subproject commit d6a01c46327e56fd86beb8aaa31591fcd9a6b7df
|
Subproject commit a89d904befea07814628c6ce0b44083c4e149c62
|
@ -4,9 +4,6 @@ set(LIBUNWIND_CXX_SOURCES
|
|||||||
"${LIBUNWIND_SOURCE_DIR}/src/libunwind.cpp"
|
"${LIBUNWIND_SOURCE_DIR}/src/libunwind.cpp"
|
||||||
"${LIBUNWIND_SOURCE_DIR}/src/Unwind-EHABI.cpp"
|
"${LIBUNWIND_SOURCE_DIR}/src/Unwind-EHABI.cpp"
|
||||||
"${LIBUNWIND_SOURCE_DIR}/src/Unwind-seh.cpp")
|
"${LIBUNWIND_SOURCE_DIR}/src/Unwind-seh.cpp")
|
||||||
if (APPLE)
|
|
||||||
set(LIBUNWIND_CXX_SOURCES ${LIBUNWIND_CXX_SOURCES} "${LIBUNWIND_SOURCE_DIR}/src/Unwind_AppleExtras.cpp")
|
|
||||||
endif ()
|
|
||||||
|
|
||||||
set(LIBUNWIND_C_SOURCES
|
set(LIBUNWIND_C_SOURCES
|
||||||
"${LIBUNWIND_SOURCE_DIR}/src/UnwindLevel1.c"
|
"${LIBUNWIND_SOURCE_DIR}/src/UnwindLevel1.c"
|
||||||
@ -32,6 +29,7 @@ set_target_properties(unwind PROPERTIES FOLDER "contrib/libunwind-cmake")
|
|||||||
|
|
||||||
target_include_directories(unwind SYSTEM BEFORE PUBLIC $<BUILD_INTERFACE:${LIBUNWIND_SOURCE_DIR}/include>)
|
target_include_directories(unwind SYSTEM BEFORE PUBLIC $<BUILD_INTERFACE:${LIBUNWIND_SOURCE_DIR}/include>)
|
||||||
target_compile_definitions(unwind PRIVATE -D_LIBUNWIND_NO_HEAP=1)
|
target_compile_definitions(unwind PRIVATE -D_LIBUNWIND_NO_HEAP=1)
|
||||||
|
target_compile_definitions(unwind PRIVATE -D_LIBUNWIND_REMEMBER_STACK_ALLOC=1)
|
||||||
# NOTE: from this macros sizeof(unw_context_t)/sizeof(unw_cursor_t) is depends, so it should be set always
|
# NOTE: from this macros sizeof(unw_context_t)/sizeof(unw_cursor_t) is depends, so it should be set always
|
||||||
target_compile_definitions(unwind PUBLIC -D_LIBUNWIND_IS_NATIVE_ONLY)
|
target_compile_definitions(unwind PUBLIC -D_LIBUNWIND_IS_NATIVE_ONLY)
|
||||||
|
|
||||||
|
2
contrib/llvm-project
vendored
2
contrib/llvm-project
vendored
@ -1 +1 @@
|
|||||||
Subproject commit d2142eed98046a47ff7112e3cc1e197c8a5cd80f
|
Subproject commit 2a8967b60cbe5bc2df253712bac343cc5263c5fc
|
2
contrib/mariadb-connector-c
vendored
2
contrib/mariadb-connector-c
vendored
@ -1 +1 @@
|
|||||||
Subproject commit e39608998f5f6944ece9ec61f48e9172ec1de660
|
Subproject commit d0a788c5b9fcaca2368d9233770d3ca91ea79f88
|
2
contrib/openssl
vendored
2
contrib/openssl
vendored
@ -1 +1 @@
|
|||||||
Subproject commit 5d81fa7068fc8c07f4d0997d5b703f3c541a637c
|
Subproject commit 66deddc1e53cda8706604a019777259372d1bd62
|
@ -1298,7 +1298,6 @@ elseif(ARCH_PPC64LE)
|
|||||||
${OPENSSL_SOURCE_DIR}/crypto/camellia/camellia.c
|
${OPENSSL_SOURCE_DIR}/crypto/camellia/camellia.c
|
||||||
${OPENSSL_SOURCE_DIR}/crypto/camellia/cmll_cbc.c
|
${OPENSSL_SOURCE_DIR}/crypto/camellia/cmll_cbc.c
|
||||||
${OPENSSL_SOURCE_DIR}/crypto/chacha/chacha_enc.c
|
${OPENSSL_SOURCE_DIR}/crypto/chacha/chacha_enc.c
|
||||||
${OPENSSL_SOURCE_DIR}/crypto/mem_clr.c
|
|
||||||
${OPENSSL_SOURCE_DIR}/crypto/rc4/rc4_enc.c
|
${OPENSSL_SOURCE_DIR}/crypto/rc4/rc4_enc.c
|
||||||
${OPENSSL_SOURCE_DIR}/crypto/rc4/rc4_skey.c
|
${OPENSSL_SOURCE_DIR}/crypto/rc4/rc4_skey.c
|
||||||
${OPENSSL_SOURCE_DIR}/crypto/sha/keccak1600.c
|
${OPENSSL_SOURCE_DIR}/crypto/sha/keccak1600.c
|
||||||
|
2
contrib/orc
vendored
2
contrib/orc
vendored
@ -1 +1 @@
|
|||||||
Subproject commit 947cebaf9432d708253ac08dc3012daa6b4ede6f
|
Subproject commit bcc025c09828c556f54cfbdf83a66b9acae7d17f
|
2
contrib/pocketfft
vendored
2
contrib/pocketfft
vendored
@ -1 +1 @@
|
|||||||
Subproject commit 9efd4da52cf8d28d14531d14e43ad9d913807546
|
Subproject commit f4c1aa8aa9ce79ad39e80f2c9c41b92ead90fda3
|
34
contrib/prometheus-protobufs-cmake/CMakeLists.txt
Normal file
34
contrib/prometheus-protobufs-cmake/CMakeLists.txt
Normal file
@ -0,0 +1,34 @@
|
|||||||
|
option(ENABLE_PROMETHEUS_PROTOBUFS "Enable Prometheus Protobufs" ${ENABLE_PROTOBUF})
|
||||||
|
|
||||||
|
if(NOT ENABLE_PROMETHEUS_PROTOBUFS)
|
||||||
|
message(STATUS "Not using prometheus-protobufs")
|
||||||
|
return()
|
||||||
|
endif()
|
||||||
|
|
||||||
|
set(Protobuf_INCLUDE_DIR "${ClickHouse_SOURCE_DIR}/contrib/google-protobuf/src")
|
||||||
|
set(Prometheus_INCLUDE_DIR "${ClickHouse_SOURCE_DIR}/contrib/prometheus-protobufs")
|
||||||
|
set(GogoProto_INCLUDE_DIR "${ClickHouse_SOURCE_DIR}/contrib/prometheus-protobufs-gogo")
|
||||||
|
|
||||||
|
# Protobuf_IMPORT_DIRS specify where the protobuf compiler will look for .proto files.
|
||||||
|
set(Old_Protobuf_IMPORT_DIRS ${Protobuf_IMPORT_DIRS})
|
||||||
|
list(APPEND Protobuf_IMPORT_DIRS "${Protobuf_INCLUDE_DIR}" "${Prometheus_INCLUDE_DIR}" "${GogoProto_INCLUDE_DIR}")
|
||||||
|
|
||||||
|
PROTOBUF_GENERATE_CPP(prometheus_protobufs_sources prometheus_protobufs_headers
|
||||||
|
"prompb/remote.proto"
|
||||||
|
"prompb/types.proto"
|
||||||
|
"gogoproto/gogo.proto"
|
||||||
|
)
|
||||||
|
|
||||||
|
set(Protobuf_IMPORT_DIRS ${Old_Protobuf_IMPORT_DIRS})
|
||||||
|
|
||||||
|
# Ignore warnings while compiling protobuf-generated *.pb.h and *.pb.cpp files.
|
||||||
|
set (CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -w")
|
||||||
|
|
||||||
|
# Disable clang-tidy for protobuf-generated *.pb.h and *.pb.cpp files.
|
||||||
|
set (CMAKE_CXX_CLANG_TIDY "")
|
||||||
|
|
||||||
|
add_library(_prometheus_protobufs ${prometheus_protobufs_sources} ${prometheus_protobufs_headers})
|
||||||
|
target_include_directories(_prometheus_protobufs SYSTEM PUBLIC "${CMAKE_CURRENT_BINARY_DIR}")
|
||||||
|
target_link_libraries (_prometheus_protobufs PUBLIC ch_contrib::protobuf)
|
||||||
|
|
||||||
|
add_library (ch_contrib::prometheus_protobufs ALIAS _prometheus_protobufs)
|
35
contrib/prometheus-protobufs-gogo/LICENSE
Normal file
35
contrib/prometheus-protobufs-gogo/LICENSE
Normal file
@ -0,0 +1,35 @@
|
|||||||
|
Copyright (c) 2022, The Cosmos SDK Authors. All rights reserved.
|
||||||
|
Copyright (c) 2013, The GoGo Authors. All rights reserved.
|
||||||
|
|
||||||
|
Protocol Buffers for Go with Gadgets
|
||||||
|
|
||||||
|
Go support for Protocol Buffers - Google's data interchange format
|
||||||
|
|
||||||
|
Copyright 2010 The Go Authors. All rights reserved.
|
||||||
|
https://github.com/golang/protobuf
|
||||||
|
|
||||||
|
Redistribution and use in source and binary forms, with or without
|
||||||
|
modification, are permitted provided that the following conditions are
|
||||||
|
met:
|
||||||
|
|
||||||
|
* Redistributions of source code must retain the above copyright
|
||||||
|
notice, this list of conditions and the following disclaimer.
|
||||||
|
* Redistributions in binary form must reproduce the above
|
||||||
|
copyright notice, this list of conditions and the following disclaimer
|
||||||
|
in the documentation and/or other materials provided with the
|
||||||
|
distribution.
|
||||||
|
* Neither the name of Google Inc. nor the names of its
|
||||||
|
contributors may be used to endorse or promote products derived from
|
||||||
|
this software without specific prior written permission.
|
||||||
|
|
||||||
|
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||||
|
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||||
|
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||||
|
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||||
|
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||||
|
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||||
|
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||||
|
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||||
|
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||||
|
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||||
|
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
4
contrib/prometheus-protobufs-gogo/README
Normal file
4
contrib/prometheus-protobufs-gogo/README
Normal file
@ -0,0 +1,4 @@
|
|||||||
|
File "gogoproto/gogo.proto" was downloaded from the "Protocol Buffers for Go with Gadgets" project:
|
||||||
|
https://github.com/cosmos/gogoproto/blob/main/gogoproto/gogo.proto
|
||||||
|
|
||||||
|
File "gogoproto/gogo.proto" is used in ClickHouse to compile prometheus protobufs.
|
145
contrib/prometheus-protobufs-gogo/gogoproto/gogo.proto
Normal file
145
contrib/prometheus-protobufs-gogo/gogoproto/gogo.proto
Normal file
@ -0,0 +1,145 @@
|
|||||||
|
// Protocol Buffers for Go with Gadgets
|
||||||
|
//
|
||||||
|
// Copyright (c) 2013, The GoGo Authors. All rights reserved.
|
||||||
|
// http://github.com/cosmos/gogoproto
|
||||||
|
//
|
||||||
|
// Redistribution and use in source and binary forms, with or without
|
||||||
|
// modification, are permitted provided that the following conditions are
|
||||||
|
// met:
|
||||||
|
//
|
||||||
|
// * Redistributions of source code must retain the above copyright
|
||||||
|
// notice, this list of conditions and the following disclaimer.
|
||||||
|
// * Redistributions in binary form must reproduce the above
|
||||||
|
// copyright notice, this list of conditions and the following disclaimer
|
||||||
|
// in the documentation and/or other materials provided with the
|
||||||
|
// distribution.
|
||||||
|
//
|
||||||
|
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||||
|
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||||
|
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||||
|
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||||
|
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||||
|
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||||
|
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||||
|
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||||
|
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||||
|
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||||
|
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||||
|
|
||||||
|
syntax = "proto2";
|
||||||
|
package gogoproto;
|
||||||
|
|
||||||
|
import "google/protobuf/descriptor.proto";
|
||||||
|
|
||||||
|
option java_package = "com.google.protobuf";
|
||||||
|
option java_outer_classname = "GoGoProtos";
|
||||||
|
option go_package = "github.com/cosmos/gogoproto/gogoproto";
|
||||||
|
|
||||||
|
extend google.protobuf.EnumOptions {
|
||||||
|
optional bool goproto_enum_prefix = 62001;
|
||||||
|
optional bool goproto_enum_stringer = 62021;
|
||||||
|
optional bool enum_stringer = 62022;
|
||||||
|
optional string enum_customname = 62023;
|
||||||
|
optional bool enumdecl = 62024;
|
||||||
|
}
|
||||||
|
|
||||||
|
extend google.protobuf.EnumValueOptions {
|
||||||
|
optional string enumvalue_customname = 66001;
|
||||||
|
}
|
||||||
|
|
||||||
|
extend google.protobuf.FileOptions {
|
||||||
|
optional bool goproto_getters_all = 63001;
|
||||||
|
optional bool goproto_enum_prefix_all = 63002;
|
||||||
|
optional bool goproto_stringer_all = 63003;
|
||||||
|
optional bool verbose_equal_all = 63004;
|
||||||
|
optional bool face_all = 63005;
|
||||||
|
optional bool gostring_all = 63006;
|
||||||
|
optional bool populate_all = 63007;
|
||||||
|
optional bool stringer_all = 63008;
|
||||||
|
optional bool onlyone_all = 63009;
|
||||||
|
|
||||||
|
optional bool equal_all = 63013;
|
||||||
|
optional bool description_all = 63014;
|
||||||
|
optional bool testgen_all = 63015;
|
||||||
|
optional bool benchgen_all = 63016;
|
||||||
|
optional bool marshaler_all = 63017;
|
||||||
|
optional bool unmarshaler_all = 63018;
|
||||||
|
optional bool stable_marshaler_all = 63019;
|
||||||
|
|
||||||
|
optional bool sizer_all = 63020;
|
||||||
|
|
||||||
|
optional bool goproto_enum_stringer_all = 63021;
|
||||||
|
optional bool enum_stringer_all = 63022;
|
||||||
|
|
||||||
|
optional bool unsafe_marshaler_all = 63023;
|
||||||
|
optional bool unsafe_unmarshaler_all = 63024;
|
||||||
|
|
||||||
|
optional bool goproto_extensions_map_all = 63025;
|
||||||
|
optional bool goproto_unrecognized_all = 63026;
|
||||||
|
optional bool gogoproto_import = 63027;
|
||||||
|
optional bool protosizer_all = 63028;
|
||||||
|
optional bool compare_all = 63029;
|
||||||
|
optional bool typedecl_all = 63030;
|
||||||
|
optional bool enumdecl_all = 63031;
|
||||||
|
|
||||||
|
optional bool goproto_registration = 63032;
|
||||||
|
optional bool messagename_all = 63033;
|
||||||
|
|
||||||
|
optional bool goproto_sizecache_all = 63034;
|
||||||
|
optional bool goproto_unkeyed_all = 63035;
|
||||||
|
}
|
||||||
|
|
||||||
|
extend google.protobuf.MessageOptions {
|
||||||
|
optional bool goproto_getters = 64001;
|
||||||
|
optional bool goproto_stringer = 64003;
|
||||||
|
optional bool verbose_equal = 64004;
|
||||||
|
optional bool face = 64005;
|
||||||
|
optional bool gostring = 64006;
|
||||||
|
optional bool populate = 64007;
|
||||||
|
optional bool stringer = 67008;
|
||||||
|
optional bool onlyone = 64009;
|
||||||
|
|
||||||
|
optional bool equal = 64013;
|
||||||
|
optional bool description = 64014;
|
||||||
|
optional bool testgen = 64015;
|
||||||
|
optional bool benchgen = 64016;
|
||||||
|
optional bool marshaler = 64017;
|
||||||
|
optional bool unmarshaler = 64018;
|
||||||
|
optional bool stable_marshaler = 64019;
|
||||||
|
|
||||||
|
optional bool sizer = 64020;
|
||||||
|
|
||||||
|
optional bool unsafe_marshaler = 64023;
|
||||||
|
optional bool unsafe_unmarshaler = 64024;
|
||||||
|
|
||||||
|
optional bool goproto_extensions_map = 64025;
|
||||||
|
optional bool goproto_unrecognized = 64026;
|
||||||
|
|
||||||
|
optional bool protosizer = 64028;
|
||||||
|
optional bool compare = 64029;
|
||||||
|
|
||||||
|
optional bool typedecl = 64030;
|
||||||
|
|
||||||
|
optional bool messagename = 64033;
|
||||||
|
|
||||||
|
optional bool goproto_sizecache = 64034;
|
||||||
|
optional bool goproto_unkeyed = 64035;
|
||||||
|
}
|
||||||
|
|
||||||
|
extend google.protobuf.FieldOptions {
|
||||||
|
optional bool nullable = 65001;
|
||||||
|
optional bool embed = 65002;
|
||||||
|
optional string customtype = 65003;
|
||||||
|
optional string customname = 65004;
|
||||||
|
optional string jsontag = 65005;
|
||||||
|
optional string moretags = 65006;
|
||||||
|
optional string casttype = 65007;
|
||||||
|
optional string castkey = 65008;
|
||||||
|
optional string castvalue = 65009;
|
||||||
|
|
||||||
|
optional bool stdtime = 65010;
|
||||||
|
optional bool stdduration = 65011;
|
||||||
|
optional bool wktpointer = 65012;
|
||||||
|
|
||||||
|
optional string castrepeated = 65013;
|
||||||
|
}
|
201
contrib/prometheus-protobufs/LICENSE
Normal file
201
contrib/prometheus-protobufs/LICENSE
Normal file
@ -0,0 +1,201 @@
|
|||||||
|
Apache License
|
||||||
|
Version 2.0, January 2004
|
||||||
|
http://www.apache.org/licenses/
|
||||||
|
|
||||||
|
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
|
||||||
|
|
||||||
|
1. Definitions.
|
||||||
|
|
||||||
|
"License" shall mean the terms and conditions for use, reproduction,
|
||||||
|
and distribution as defined by Sections 1 through 9 of this document.
|
||||||
|
|
||||||
|
"Licensor" shall mean the copyright owner or entity authorized by
|
||||||
|
the copyright owner that is granting the License.
|
||||||
|
|
||||||
|
"Legal Entity" shall mean the union of the acting entity and all
|
||||||
|
other entities that control, are controlled by, or are under common
|
||||||
|
control with that entity. For the purposes of this definition,
|
||||||
|
"control" means (i) the power, direct or indirect, to cause the
|
||||||
|
direction or management of such entity, whether by contract or
|
||||||
|
otherwise, or (ii) ownership of fifty percent (50%) or more of the
|
||||||
|
outstanding shares, or (iii) beneficial ownership of such entity.
|
||||||
|
|
||||||
|
"You" (or "Your") shall mean an individual or Legal Entity
|
||||||
|
exercising permissions granted by this License.
|
||||||
|
|
||||||
|
"Source" form shall mean the preferred form for making modifications,
|
||||||
|
including but not limited to software source code, documentation
|
||||||
|
source, and configuration files.
|
||||||
|
|
||||||
|
"Object" form shall mean any form resulting from mechanical
|
||||||
|
transformation or translation of a Source form, including but
|
||||||
|
not limited to compiled object code, generated documentation,
|
||||||
|
and conversions to other media types.
|
||||||
|
|
||||||
|
"Work" shall mean the work of authorship, whether in Source or
|
||||||
|
Object form, made available under the License, as indicated by a
|
||||||
|
copyright notice that is included in or attached to the work
|
||||||
|
(an example is provided in the Appendix below).
|
||||||
|
|
||||||
|
"Derivative Works" shall mean any work, whether in Source or Object
|
||||||
|
form, that is based on (or derived from) the Work and for which the
|
||||||
|
editorial revisions, annotations, elaborations, or other modifications
|
||||||
|
represent, as a whole, an original work of authorship. For the purposes
|
||||||
|
of this License, Derivative Works shall not include works that remain
|
||||||
|
separable from, or merely link (or bind by name) to the interfaces of,
|
||||||
|
the Work and Derivative Works thereof.
|
||||||
|
|
||||||
|
"Contribution" shall mean any work of authorship, including
|
||||||
|
the original version of the Work and any modifications or additions
|
||||||
|
to that Work or Derivative Works thereof, that is intentionally
|
||||||
|
submitted to Licensor for inclusion in the Work by the copyright owner
|
||||||
|
or by an individual or Legal Entity authorized to submit on behalf of
|
||||||
|
the copyright owner. For the purposes of this definition, "submitted"
|
||||||
|
means any form of electronic, verbal, or written communication sent
|
||||||
|
to the Licensor or its representatives, including but not limited to
|
||||||
|
communication on electronic mailing lists, source code control systems,
|
||||||
|
and issue tracking systems that are managed by, or on behalf of, the
|
||||||
|
Licensor for the purpose of discussing and improving the Work, but
|
||||||
|
excluding communication that is conspicuously marked or otherwise
|
||||||
|
designated in writing by the copyright owner as "Not a Contribution."
|
||||||
|
|
||||||
|
"Contributor" shall mean Licensor and any individual or Legal Entity
|
||||||
|
on behalf of whom a Contribution has been received by Licensor and
|
||||||
|
subsequently incorporated within the Work.
|
||||||
|
|
||||||
|
2. Grant of Copyright License. Subject to the terms and conditions of
|
||||||
|
this License, each Contributor hereby grants to You a perpetual,
|
||||||
|
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||||
|
copyright license to reproduce, prepare Derivative Works of,
|
||||||
|
publicly display, publicly perform, sublicense, and distribute the
|
||||||
|
Work and such Derivative Works in Source or Object form.
|
||||||
|
|
||||||
|
3. Grant of Patent License. Subject to the terms and conditions of
|
||||||
|
this License, each Contributor hereby grants to You a perpetual,
|
||||||
|
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||||
|
(except as stated in this section) patent license to make, have made,
|
||||||
|
use, offer to sell, sell, import, and otherwise transfer the Work,
|
||||||
|
where such license applies only to those patent claims licensable
|
||||||
|
by such Contributor that are necessarily infringed by their
|
||||||
|
Contribution(s) alone or by combination of their Contribution(s)
|
||||||
|
with the Work to which such Contribution(s) was submitted. If You
|
||||||
|
institute patent litigation against any entity (including a
|
||||||
|
cross-claim or counterclaim in a lawsuit) alleging that the Work
|
||||||
|
or a Contribution incorporated within the Work constitutes direct
|
||||||
|
or contributory patent infringement, then any patent licenses
|
||||||
|
granted to You under this License for that Work shall terminate
|
||||||
|
as of the date such litigation is filed.
|
||||||
|
|
||||||
|
4. Redistribution. You may reproduce and distribute copies of the
|
||||||
|
Work or Derivative Works thereof in any medium, with or without
|
||||||
|
modifications, and in Source or Object form, provided that You
|
||||||
|
meet the following conditions:
|
||||||
|
|
||||||
|
(a) You must give any other recipients of the Work or
|
||||||
|
Derivative Works a copy of this License; and
|
||||||
|
|
||||||
|
(b) You must cause any modified files to carry prominent notices
|
||||||
|
stating that You changed the files; and
|
||||||
|
|
||||||
|
(c) You must retain, in the Source form of any Derivative Works
|
||||||
|
that You distribute, all copyright, patent, trademark, and
|
||||||
|
attribution notices from the Source form of the Work,
|
||||||
|
excluding those notices that do not pertain to any part of
|
||||||
|
the Derivative Works; and
|
||||||
|
|
||||||
|
(d) If the Work includes a "NOTICE" text file as part of its
|
||||||
|
distribution, then any Derivative Works that You distribute must
|
||||||
|
include a readable copy of the attribution notices contained
|
||||||
|
within such NOTICE file, excluding those notices that do not
|
||||||
|
pertain to any part of the Derivative Works, in at least one
|
||||||
|
of the following places: within a NOTICE text file distributed
|
||||||
|
as part of the Derivative Works; within the Source form or
|
||||||
|
documentation, if provided along with the Derivative Works; or,
|
||||||
|
within a display generated by the Derivative Works, if and
|
||||||
|
wherever such third-party notices normally appear. The contents
|
||||||
|
of the NOTICE file are for informational purposes only and
|
||||||
|
do not modify the License. You may add Your own attribution
|
||||||
|
notices within Derivative Works that You distribute, alongside
|
||||||
|
or as an addendum to the NOTICE text from the Work, provided
|
||||||
|
that such additional attribution notices cannot be construed
|
||||||
|
as modifying the License.
|
||||||
|
|
||||||
|
You may add Your own copyright statement to Your modifications and
|
||||||
|
may provide additional or different license terms and conditions
|
||||||
|
for use, reproduction, or distribution of Your modifications, or
|
||||||
|
for any such Derivative Works as a whole, provided Your use,
|
||||||
|
reproduction, and distribution of the Work otherwise complies with
|
||||||
|
the conditions stated in this License.
|
||||||
|
|
||||||
|
5. Submission of Contributions. Unless You explicitly state otherwise,
|
||||||
|
any Contribution intentionally submitted for inclusion in the Work
|
||||||
|
by You to the Licensor shall be under the terms and conditions of
|
||||||
|
this License, without any additional terms or conditions.
|
||||||
|
Notwithstanding the above, nothing herein shall supersede or modify
|
||||||
|
the terms of any separate license agreement you may have executed
|
||||||
|
with Licensor regarding such Contributions.
|
||||||
|
|
||||||
|
6. Trademarks. This License does not grant permission to use the trade
|
||||||
|
names, trademarks, service marks, or product names of the Licensor,
|
||||||
|
except as required for reasonable and customary use in describing the
|
||||||
|
origin of the Work and reproducing the content of the NOTICE file.
|
||||||
|
|
||||||
|
7. Disclaimer of Warranty. Unless required by applicable law or
|
||||||
|
agreed to in writing, Licensor provides the Work (and each
|
||||||
|
Contributor provides its Contributions) on an "AS IS" BASIS,
|
||||||
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||||
|
implied, including, without limitation, any warranties or conditions
|
||||||
|
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
|
||||||
|
PARTICULAR PURPOSE. You are solely responsible for determining the
|
||||||
|
appropriateness of using or redistributing the Work and assume any
|
||||||
|
risks associated with Your exercise of permissions under this License.
|
||||||
|
|
||||||
|
8. Limitation of Liability. In no event and under no legal theory,
|
||||||
|
whether in tort (including negligence), contract, or otherwise,
|
||||||
|
unless required by applicable law (such as deliberate and grossly
|
||||||
|
negligent acts) or agreed to in writing, shall any Contributor be
|
||||||
|
liable to You for damages, including any direct, indirect, special,
|
||||||
|
incidental, or consequential damages of any character arising as a
|
||||||
|
result of this License or out of the use or inability to use the
|
||||||
|
Work (including but not limited to damages for loss of goodwill,
|
||||||
|
work stoppage, computer failure or malfunction, or any and all
|
||||||
|
other commercial damages or losses), even if such Contributor
|
||||||
|
has been advised of the possibility of such damages.
|
||||||
|
|
||||||
|
9. Accepting Warranty or Additional Liability. While redistributing
|
||||||
|
the Work or Derivative Works thereof, You may choose to offer,
|
||||||
|
and charge a fee for, acceptance of support, warranty, indemnity,
|
||||||
|
or other liability obligations and/or rights consistent with this
|
||||||
|
License. However, in accepting such obligations, You may act only
|
||||||
|
on Your own behalf and on Your sole responsibility, not on behalf
|
||||||
|
of any other Contributor, and only if You agree to indemnify,
|
||||||
|
defend, and hold each Contributor harmless for any liability
|
||||||
|
incurred by, or claims asserted against, such Contributor by reason
|
||||||
|
of your accepting any such warranty or additional liability.
|
||||||
|
|
||||||
|
END OF TERMS AND CONDITIONS
|
||||||
|
|
||||||
|
APPENDIX: How to apply the Apache License to your work.
|
||||||
|
|
||||||
|
To apply the Apache License to your work, attach the following
|
||||||
|
boilerplate notice, with the fields enclosed by brackets "[]"
|
||||||
|
replaced with your own identifying information. (Don't include
|
||||||
|
the brackets!) The text should be enclosed in the appropriate
|
||||||
|
comment syntax for the file format. We also recommend that a
|
||||||
|
file or class name and description of purpose be included on the
|
||||||
|
same "printed page" as the copyright notice for easier
|
||||||
|
identification within third-party archives.
|
||||||
|
|
||||||
|
Copyright [yyyy] [name of copyright owner]
|
||||||
|
|
||||||
|
Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
you may not use this file except in compliance with the License.
|
||||||
|
You may obtain a copy of the License at
|
||||||
|
|
||||||
|
http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
|
||||||
|
Unless required by applicable law or agreed to in writing, software
|
||||||
|
distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
See the License for the specific language governing permissions and
|
||||||
|
limitations under the License.
|
2
contrib/prometheus-protobufs/README
Normal file
2
contrib/prometheus-protobufs/README
Normal file
@ -0,0 +1,2 @@
|
|||||||
|
Files "prompb/remote.proto" and "prompb/types.proto" were downloaded from the Prometheus repository:
|
||||||
|
https://github.com/prometheus/prometheus/tree/main/prompb
|
88
contrib/prometheus-protobufs/prompb/remote.proto
Normal file
88
contrib/prometheus-protobufs/prompb/remote.proto
Normal file
@ -0,0 +1,88 @@
|
|||||||
|
// Copyright 2016 Prometheus Team
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
syntax = "proto3";
|
||||||
|
package prometheus;
|
||||||
|
|
||||||
|
option go_package = "prompb";
|
||||||
|
|
||||||
|
import "prompb/types.proto";
|
||||||
|
import "gogoproto/gogo.proto";
|
||||||
|
|
||||||
|
message WriteRequest {
|
||||||
|
repeated prometheus.TimeSeries timeseries = 1 [(gogoproto.nullable) = false];
|
||||||
|
// Cortex uses this field to determine the source of the write request.
|
||||||
|
// We reserve it to avoid any compatibility issues.
|
||||||
|
reserved 2;
|
||||||
|
repeated prometheus.MetricMetadata metadata = 3 [(gogoproto.nullable) = false];
|
||||||
|
}
|
||||||
|
|
||||||
|
// ReadRequest represents a remote read request.
|
||||||
|
message ReadRequest {
|
||||||
|
repeated Query queries = 1;
|
||||||
|
|
||||||
|
enum ResponseType {
|
||||||
|
// Server will return a single ReadResponse message with matched series that includes list of raw samples.
|
||||||
|
// It's recommended to use streamed response types instead.
|
||||||
|
//
|
||||||
|
// Response headers:
|
||||||
|
// Content-Type: "application/x-protobuf"
|
||||||
|
// Content-Encoding: "snappy"
|
||||||
|
SAMPLES = 0;
|
||||||
|
// Server will stream a delimited ChunkedReadResponse message that
|
||||||
|
// contains XOR or HISTOGRAM(!) encoded chunks for a single series.
|
||||||
|
// Each message is following varint size and fixed size bigendian
|
||||||
|
// uint32 for CRC32 Castagnoli checksum.
|
||||||
|
//
|
||||||
|
// Response headers:
|
||||||
|
// Content-Type: "application/x-streamed-protobuf; proto=prometheus.ChunkedReadResponse"
|
||||||
|
// Content-Encoding: ""
|
||||||
|
STREAMED_XOR_CHUNKS = 1;
|
||||||
|
}
|
||||||
|
|
||||||
|
// accepted_response_types allows negotiating the content type of the response.
|
||||||
|
//
|
||||||
|
// Response types are taken from the list in the FIFO order. If no response type in `accepted_response_types` is
|
||||||
|
// implemented by server, error is returned.
|
||||||
|
// For request that do not contain `accepted_response_types` field the SAMPLES response type will be used.
|
||||||
|
repeated ResponseType accepted_response_types = 2;
|
||||||
|
}
|
||||||
|
|
||||||
|
// ReadResponse is a response when response_type equals SAMPLES.
|
||||||
|
message ReadResponse {
|
||||||
|
// In same order as the request's queries.
|
||||||
|
repeated QueryResult results = 1;
|
||||||
|
}
|
||||||
|
|
||||||
|
message Query {
|
||||||
|
int64 start_timestamp_ms = 1;
|
||||||
|
int64 end_timestamp_ms = 2;
|
||||||
|
repeated prometheus.LabelMatcher matchers = 3;
|
||||||
|
prometheus.ReadHints hints = 4;
|
||||||
|
}
|
||||||
|
|
||||||
|
message QueryResult {
|
||||||
|
// Samples within a time series must be ordered by time.
|
||||||
|
repeated prometheus.TimeSeries timeseries = 1;
|
||||||
|
}
|
||||||
|
|
||||||
|
// ChunkedReadResponse is a response when response_type equals STREAMED_XOR_CHUNKS.
|
||||||
|
// We strictly stream full series after series, optionally split by time. This means that a single frame can contain
|
||||||
|
// partition of the single series, but once a new series is started to be streamed it means that no more chunks will
|
||||||
|
// be sent for previous one. Series are returned sorted in the same way TSDB block are internally.
|
||||||
|
message ChunkedReadResponse {
|
||||||
|
repeated prometheus.ChunkedSeries chunked_series = 1;
|
||||||
|
|
||||||
|
// query_index represents an index of the query from ReadRequest.queries these chunks relates to.
|
||||||
|
int64 query_index = 2;
|
||||||
|
}
|
187
contrib/prometheus-protobufs/prompb/types.proto
Normal file
187
contrib/prometheus-protobufs/prompb/types.proto
Normal file
@ -0,0 +1,187 @@
|
|||||||
|
// Copyright 2017 Prometheus Team
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
syntax = "proto3";
|
||||||
|
package prometheus;
|
||||||
|
|
||||||
|
option go_package = "prompb";
|
||||||
|
|
||||||
|
import "gogoproto/gogo.proto";
|
||||||
|
|
||||||
|
message MetricMetadata {
|
||||||
|
enum MetricType {
|
||||||
|
UNKNOWN = 0;
|
||||||
|
COUNTER = 1;
|
||||||
|
GAUGE = 2;
|
||||||
|
HISTOGRAM = 3;
|
||||||
|
GAUGEHISTOGRAM = 4;
|
||||||
|
SUMMARY = 5;
|
||||||
|
INFO = 6;
|
||||||
|
STATESET = 7;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Represents the metric type, these match the set from Prometheus.
|
||||||
|
// Refer to github.com/prometheus/common/model/metadata.go for details.
|
||||||
|
MetricType type = 1;
|
||||||
|
string metric_family_name = 2;
|
||||||
|
string help = 4;
|
||||||
|
string unit = 5;
|
||||||
|
}
|
||||||
|
|
||||||
|
message Sample {
|
||||||
|
double value = 1;
|
||||||
|
// timestamp is in ms format, see model/timestamp/timestamp.go for
|
||||||
|
// conversion from time.Time to Prometheus timestamp.
|
||||||
|
int64 timestamp = 2;
|
||||||
|
}
|
||||||
|
|
||||||
|
message Exemplar {
|
||||||
|
// Optional, can be empty.
|
||||||
|
repeated Label labels = 1 [(gogoproto.nullable) = false];
|
||||||
|
double value = 2;
|
||||||
|
// timestamp is in ms format, see model/timestamp/timestamp.go for
|
||||||
|
// conversion from time.Time to Prometheus timestamp.
|
||||||
|
int64 timestamp = 3;
|
||||||
|
}
|
||||||
|
|
||||||
|
// A native histogram, also known as a sparse histogram.
|
||||||
|
// Original design doc:
|
||||||
|
// https://docs.google.com/document/d/1cLNv3aufPZb3fNfaJgdaRBZsInZKKIHo9E6HinJVbpM/edit
|
||||||
|
// The appendix of this design doc also explains the concept of float
|
||||||
|
// histograms. This Histogram message can represent both, the usual
|
||||||
|
// integer histogram as well as a float histogram.
|
||||||
|
message Histogram {
|
||||||
|
enum ResetHint {
|
||||||
|
UNKNOWN = 0; // Need to test for a counter reset explicitly.
|
||||||
|
YES = 1; // This is the 1st histogram after a counter reset.
|
||||||
|
NO = 2; // There was no counter reset between this and the previous Histogram.
|
||||||
|
GAUGE = 3; // This is a gauge histogram where counter resets don't happen.
|
||||||
|
}
|
||||||
|
|
||||||
|
oneof count { // Count of observations in the histogram.
|
||||||
|
uint64 count_int = 1;
|
||||||
|
double count_float = 2;
|
||||||
|
}
|
||||||
|
double sum = 3; // Sum of observations in the histogram.
|
||||||
|
// The schema defines the bucket schema. Currently, valid numbers
|
||||||
|
// are -4 <= n <= 8. They are all for base-2 bucket schemas, where 1
|
||||||
|
// is a bucket boundary in each case, and then each power of two is
|
||||||
|
// divided into 2^n logarithmic buckets. Or in other words, each
|
||||||
|
// bucket boundary is the previous boundary times 2^(2^-n). In the
|
||||||
|
// future, more bucket schemas may be added using numbers < -4 or >
|
||||||
|
// 8.
|
||||||
|
sint32 schema = 4;
|
||||||
|
double zero_threshold = 5; // Breadth of the zero bucket.
|
||||||
|
oneof zero_count { // Count in zero bucket.
|
||||||
|
uint64 zero_count_int = 6;
|
||||||
|
double zero_count_float = 7;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Negative Buckets.
|
||||||
|
repeated BucketSpan negative_spans = 8 [(gogoproto.nullable) = false];
|
||||||
|
// Use either "negative_deltas" or "negative_counts", the former for
|
||||||
|
// regular histograms with integer counts, the latter for float
|
||||||
|
// histograms.
|
||||||
|
repeated sint64 negative_deltas = 9; // Count delta of each bucket compared to previous one (or to zero for 1st bucket).
|
||||||
|
repeated double negative_counts = 10; // Absolute count of each bucket.
|
||||||
|
|
||||||
|
// Positive Buckets.
|
||||||
|
repeated BucketSpan positive_spans = 11 [(gogoproto.nullable) = false];
|
||||||
|
// Use either "positive_deltas" or "positive_counts", the former for
|
||||||
|
// regular histograms with integer counts, the latter for float
|
||||||
|
// histograms.
|
||||||
|
repeated sint64 positive_deltas = 12; // Count delta of each bucket compared to previous one (or to zero for 1st bucket).
|
||||||
|
repeated double positive_counts = 13; // Absolute count of each bucket.
|
||||||
|
|
||||||
|
ResetHint reset_hint = 14;
|
||||||
|
// timestamp is in ms format, see model/timestamp/timestamp.go for
|
||||||
|
// conversion from time.Time to Prometheus timestamp.
|
||||||
|
int64 timestamp = 15;
|
||||||
|
}
|
||||||
|
|
||||||
|
// A BucketSpan defines a number of consecutive buckets with their
|
||||||
|
// offset. Logically, it would be more straightforward to include the
|
||||||
|
// bucket counts in the Span. However, the protobuf representation is
|
||||||
|
// more compact in the way the data is structured here (with all the
|
||||||
|
// buckets in a single array separate from the Spans).
|
||||||
|
message BucketSpan {
|
||||||
|
sint32 offset = 1; // Gap to previous span, or starting point for 1st span (which can be negative).
|
||||||
|
uint32 length = 2; // Length of consecutive buckets.
|
||||||
|
}
|
||||||
|
|
||||||
|
// TimeSeries represents samples and labels for a single time series.
|
||||||
|
message TimeSeries {
|
||||||
|
// For a timeseries to be valid, and for the samples and exemplars
|
||||||
|
// to be ingested by the remote system properly, the labels field is required.
|
||||||
|
repeated Label labels = 1 [(gogoproto.nullable) = false];
|
||||||
|
repeated Sample samples = 2 [(gogoproto.nullable) = false];
|
||||||
|
repeated Exemplar exemplars = 3 [(gogoproto.nullable) = false];
|
||||||
|
repeated Histogram histograms = 4 [(gogoproto.nullable) = false];
|
||||||
|
}
|
||||||
|
|
||||||
|
message Label {
|
||||||
|
string name = 1;
|
||||||
|
string value = 2;
|
||||||
|
}
|
||||||
|
|
||||||
|
message Labels {
|
||||||
|
repeated Label labels = 1 [(gogoproto.nullable) = false];
|
||||||
|
}
|
||||||
|
|
||||||
|
// Matcher specifies a rule, which can match or set of labels or not.
|
||||||
|
message LabelMatcher {
|
||||||
|
enum Type {
|
||||||
|
EQ = 0;
|
||||||
|
NEQ = 1;
|
||||||
|
RE = 2;
|
||||||
|
NRE = 3;
|
||||||
|
}
|
||||||
|
Type type = 1;
|
||||||
|
string name = 2;
|
||||||
|
string value = 3;
|
||||||
|
}
|
||||||
|
|
||||||
|
message ReadHints {
|
||||||
|
int64 step_ms = 1; // Query step size in milliseconds.
|
||||||
|
string func = 2; // String representation of surrounding function or aggregation.
|
||||||
|
int64 start_ms = 3; // Start time in milliseconds.
|
||||||
|
int64 end_ms = 4; // End time in milliseconds.
|
||||||
|
repeated string grouping = 5; // List of label names used in aggregation.
|
||||||
|
bool by = 6; // Indicate whether it is without or by.
|
||||||
|
int64 range_ms = 7; // Range vector selector range in milliseconds.
|
||||||
|
}
|
||||||
|
|
||||||
|
// Chunk represents a TSDB chunk.
|
||||||
|
// Time range [min, max] is inclusive.
|
||||||
|
message Chunk {
|
||||||
|
int64 min_time_ms = 1;
|
||||||
|
int64 max_time_ms = 2;
|
||||||
|
|
||||||
|
// We require this to match chunkenc.Encoding.
|
||||||
|
enum Encoding {
|
||||||
|
UNKNOWN = 0;
|
||||||
|
XOR = 1;
|
||||||
|
HISTOGRAM = 2;
|
||||||
|
FLOAT_HISTOGRAM = 3;
|
||||||
|
}
|
||||||
|
Encoding type = 3;
|
||||||
|
bytes data = 4;
|
||||||
|
}
|
||||||
|
|
||||||
|
// ChunkedSeries represents single, encoded time series.
|
||||||
|
message ChunkedSeries {
|
||||||
|
// Labels should be sorted.
|
||||||
|
repeated Label labels = 1 [(gogoproto.nullable) = false];
|
||||||
|
// Chunks will be in start time order and may overlap.
|
||||||
|
repeated Chunk chunks = 2 [(gogoproto.nullable) = false];
|
||||||
|
}
|
2
contrib/rocksdb
vendored
2
contrib/rocksdb
vendored
@ -1 +1 @@
|
|||||||
Subproject commit 3a0b80ca9d6eebb38fad7ea3f41dfc9db4f6a984
|
Subproject commit be366233921293bd07a84dc4ea6991858665f202
|
@ -1,24 +1,17 @@
|
|||||||
option (ENABLE_ROCKSDB "Enable rocksdb library" ${ENABLE_LIBRARIES})
|
option (ENABLE_ROCKSDB "Enable RocksDB" ${ENABLE_LIBRARIES})
|
||||||
|
|
||||||
if (NOT ENABLE_ROCKSDB)
|
if (NOT ENABLE_ROCKSDB)
|
||||||
message (STATUS "Not using rocksdb")
|
message (STATUS "Not using RocksDB")
|
||||||
return()
|
return()
|
||||||
endif()
|
endif()
|
||||||
|
|
||||||
## this file is extracted from `contrib/rocksdb/CMakeLists.txt`
|
# Always disable jemalloc for rocksdb by default because it introduces non-standard jemalloc APIs
|
||||||
set(ROCKSDB_SOURCE_DIR "${ClickHouse_SOURCE_DIR}/contrib/rocksdb")
|
|
||||||
list(APPEND CMAKE_MODULE_PATH "${ROCKSDB_SOURCE_DIR}/cmake/modules/")
|
|
||||||
|
|
||||||
set(PORTABLE ON)
|
|
||||||
## always disable jemalloc for rocksdb by default
|
|
||||||
## because it introduces non-standard jemalloc APIs
|
|
||||||
option(WITH_JEMALLOC "build with JeMalloc" OFF)
|
option(WITH_JEMALLOC "build with JeMalloc" OFF)
|
||||||
set(USE_SNAPPY OFF)
|
|
||||||
if (TARGET ch_contrib::snappy)
|
option(WITH_LIBURING "build with liburing" OFF) # TODO could try to enable this conditionally, depending on ClickHouse's ENABLE_LIBURING
|
||||||
set(USE_SNAPPY ON)
|
|
||||||
endif()
|
# ClickHouse cannot be compiled without snappy, lz4, zlib, zstd
|
||||||
option(WITH_SNAPPY "build with SNAPPY" ${USE_SNAPPY})
|
option(WITH_SNAPPY "build with SNAPPY" ON)
|
||||||
## lz4, zlib, zstd is enabled in ClickHouse by default
|
|
||||||
option(WITH_LZ4 "build with lz4" ON)
|
option(WITH_LZ4 "build with lz4" ON)
|
||||||
option(WITH_ZLIB "build with zlib" ON)
|
option(WITH_ZLIB "build with zlib" ON)
|
||||||
option(WITH_ZSTD "build with zstd" ON)
|
option(WITH_ZSTD "build with zstd" ON)
|
||||||
@ -26,78 +19,46 @@ option(WITH_ZSTD "build with zstd" ON)
|
|||||||
# third-party/folly is only validated to work on Linux and Windows for now.
|
# third-party/folly is only validated to work on Linux and Windows for now.
|
||||||
# So only turn it on there by default.
|
# So only turn it on there by default.
|
||||||
if(CMAKE_SYSTEM_NAME MATCHES "Linux|Windows")
|
if(CMAKE_SYSTEM_NAME MATCHES "Linux|Windows")
|
||||||
if(MSVC AND MSVC_VERSION LESS 1910)
|
option(WITH_FOLLY_DISTRIBUTED_MUTEX "build with folly::DistributedMutex" ON)
|
||||||
# Folly does not compile with MSVC older than VS2017
|
|
||||||
option(WITH_FOLLY_DISTRIBUTED_MUTEX "build with folly::DistributedMutex" OFF)
|
|
||||||
else()
|
|
||||||
option(WITH_FOLLY_DISTRIBUTED_MUTEX "build with folly::DistributedMutex" ON)
|
|
||||||
endif()
|
|
||||||
else()
|
else()
|
||||||
option(WITH_FOLLY_DISTRIBUTED_MUTEX "build with folly::DistributedMutex" OFF)
|
option(WITH_FOLLY_DISTRIBUTED_MUTEX "build with folly::DistributedMutex" OFF)
|
||||||
endif()
|
endif()
|
||||||
|
|
||||||
if( NOT DEFINED CMAKE_CXX_STANDARD )
|
if(WITH_SNAPPY)
|
||||||
set(CMAKE_CXX_STANDARD 11)
|
add_definitions(-DSNAPPY)
|
||||||
|
list(APPEND THIRDPARTY_LIBS ch_contrib::snappy)
|
||||||
endif()
|
endif()
|
||||||
|
|
||||||
if(MSVC)
|
if(WITH_ZLIB)
|
||||||
option(WITH_XPRESS "build with windows built in compression" OFF)
|
add_definitions(-DZLIB)
|
||||||
include("${ROCKSDB_SOURCE_DIR}/thirdparty.inc")
|
list(APPEND THIRDPARTY_LIBS ch_contrib::zlib)
|
||||||
else()
|
|
||||||
if(CMAKE_SYSTEM_NAME MATCHES "FreeBSD" AND NOT CMAKE_SYSTEM_NAME MATCHES "kFreeBSD")
|
|
||||||
# FreeBSD has jemalloc as default malloc
|
|
||||||
# but it does not have all the jemalloc files in include/...
|
|
||||||
set(WITH_JEMALLOC ON)
|
|
||||||
else()
|
|
||||||
if(WITH_JEMALLOC AND TARGET ch_contrib::jemalloc)
|
|
||||||
add_definitions(-DROCKSDB_JEMALLOC -DJEMALLOC_NO_DEMANGLE)
|
|
||||||
list(APPEND THIRDPARTY_LIBS ch_contrib::jemalloc)
|
|
||||||
endif()
|
|
||||||
endif()
|
|
||||||
|
|
||||||
if(WITH_SNAPPY)
|
|
||||||
add_definitions(-DSNAPPY)
|
|
||||||
list(APPEND THIRDPARTY_LIBS ch_contrib::snappy)
|
|
||||||
endif()
|
|
||||||
|
|
||||||
if(WITH_ZLIB)
|
|
||||||
add_definitions(-DZLIB)
|
|
||||||
list(APPEND THIRDPARTY_LIBS ch_contrib::zlib)
|
|
||||||
endif()
|
|
||||||
|
|
||||||
if(WITH_LZ4)
|
|
||||||
add_definitions(-DLZ4)
|
|
||||||
list(APPEND THIRDPARTY_LIBS ch_contrib::lz4)
|
|
||||||
endif()
|
|
||||||
|
|
||||||
if(WITH_ZSTD)
|
|
||||||
add_definitions(-DZSTD)
|
|
||||||
list(APPEND THIRDPARTY_LIBS ch_contrib::zstd)
|
|
||||||
endif()
|
|
||||||
endif()
|
endif()
|
||||||
|
|
||||||
if(CMAKE_SYSTEM_PROCESSOR MATCHES "^(powerpc|ppc)64")
|
if(WITH_LZ4)
|
||||||
if(POWER9)
|
add_definitions(-DLZ4)
|
||||||
set(HAS_POWER9 1)
|
list(APPEND THIRDPARTY_LIBS ch_contrib::lz4)
|
||||||
set(HAS_ALTIVEC 1)
|
endif()
|
||||||
else()
|
|
||||||
set(HAS_POWER8 1)
|
|
||||||
set(HAS_ALTIVEC 1)
|
|
||||||
endif(POWER9)
|
|
||||||
endif(CMAKE_SYSTEM_PROCESSOR MATCHES "^(powerpc|ppc)64")
|
|
||||||
|
|
||||||
if(CMAKE_SYSTEM_PROCESSOR MATCHES "aarch64|AARCH64|arm64|ARM64")
|
if(WITH_ZSTD)
|
||||||
set(HAS_ARMV8_CRC 1)
|
add_definitions(-DZSTD)
|
||||||
set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -march=armv8-a+crc+crypto -Wno-unused-function")
|
list(APPEND THIRDPARTY_LIBS ch_contrib::zstd)
|
||||||
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -march=armv8-a+crc+crypto -Wno-unused-function")
|
endif()
|
||||||
endif(CMAKE_SYSTEM_PROCESSOR MATCHES "aarch64|AARCH64|arm64|ARM64")
|
|
||||||
|
|
||||||
|
option(PORTABLE "build a portable binary" ON)
|
||||||
|
|
||||||
if(ENABLE_AVX2 AND ENABLE_PCLMULQDQ)
|
if(ENABLE_SSE42 AND ENABLE_PCLMULQDQ)
|
||||||
add_definitions(-DHAVE_SSE42)
|
add_definitions(-DHAVE_SSE42)
|
||||||
add_definitions(-DHAVE_PCLMUL)
|
add_definitions(-DHAVE_PCLMUL)
|
||||||
endif()
|
endif()
|
||||||
|
|
||||||
|
if(CMAKE_SYSTEM_PROCESSOR MATCHES "arm64|aarch64|AARCH64")
|
||||||
|
set (HAS_ARMV8_CRC 1)
|
||||||
|
# the original build descriptions set specific flags for ARM. These flags are already subsumed by ClickHouse's general
|
||||||
|
# ARM flags, see cmake/cpu_features.cmake
|
||||||
|
# set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -march=armv8-a+crc+crypto -Wno-unused-function")
|
||||||
|
# set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -march=armv8-a+crc+crypto -Wno-unused-function")
|
||||||
|
endif()
|
||||||
|
|
||||||
set (HAVE_THREAD_LOCAL 1)
|
set (HAVE_THREAD_LOCAL 1)
|
||||||
if(HAVE_THREAD_LOCAL)
|
if(HAVE_THREAD_LOCAL)
|
||||||
add_definitions(-DROCKSDB_SUPPORT_THREAD_LOCAL)
|
add_definitions(-DROCKSDB_SUPPORT_THREAD_LOCAL)
|
||||||
@ -107,8 +68,6 @@ if(CMAKE_SYSTEM_NAME MATCHES "Darwin")
|
|||||||
add_definitions(-DOS_MACOSX)
|
add_definitions(-DOS_MACOSX)
|
||||||
elseif(CMAKE_SYSTEM_NAME MATCHES "Linux")
|
elseif(CMAKE_SYSTEM_NAME MATCHES "Linux")
|
||||||
add_definitions(-DOS_LINUX)
|
add_definitions(-DOS_LINUX)
|
||||||
elseif(CMAKE_SYSTEM_NAME MATCHES "SunOS")
|
|
||||||
add_definitions(-DOS_SOLARIS)
|
|
||||||
elseif(CMAKE_SYSTEM_NAME MATCHES "FreeBSD")
|
elseif(CMAKE_SYSTEM_NAME MATCHES "FreeBSD")
|
||||||
add_definitions(-DOS_FREEBSD)
|
add_definitions(-DOS_FREEBSD)
|
||||||
elseif(CMAKE_SYSTEM_NAME MATCHES "Android")
|
elseif(CMAKE_SYSTEM_NAME MATCHES "Android")
|
||||||
@ -123,12 +82,10 @@ endif()
|
|||||||
|
|
||||||
if (OS_LINUX)
|
if (OS_LINUX)
|
||||||
add_definitions(-DROCKSDB_SCHED_GETCPU_PRESENT)
|
add_definitions(-DROCKSDB_SCHED_GETCPU_PRESENT)
|
||||||
add_definitions(-DROCKSDB_AUXV_SYSAUXV_PRESENT)
|
|
||||||
add_definitions(-DROCKSDB_AUXV_GETAUXVAL_PRESENT)
|
add_definitions(-DROCKSDB_AUXV_GETAUXVAL_PRESENT)
|
||||||
elseif (OS_FREEBSD)
|
|
||||||
add_definitions(-DROCKSDB_AUXV_SYSAUXV_PRESENT)
|
|
||||||
endif()
|
endif()
|
||||||
|
|
||||||
|
set(ROCKSDB_SOURCE_DIR "${ClickHouse_SOURCE_DIR}/contrib/rocksdb")
|
||||||
|
|
||||||
include_directories(${ROCKSDB_SOURCE_DIR})
|
include_directories(${ROCKSDB_SOURCE_DIR})
|
||||||
include_directories("${ROCKSDB_SOURCE_DIR}/include")
|
include_directories("${ROCKSDB_SOURCE_DIR}/include")
|
||||||
@ -136,11 +93,11 @@ if(WITH_FOLLY_DISTRIBUTED_MUTEX)
|
|||||||
include_directories("${ROCKSDB_SOURCE_DIR}/third-party/folly")
|
include_directories("${ROCKSDB_SOURCE_DIR}/third-party/folly")
|
||||||
endif()
|
endif()
|
||||||
|
|
||||||
# Main library source code
|
|
||||||
|
|
||||||
set(SOURCES
|
set(SOURCES
|
||||||
${ROCKSDB_SOURCE_DIR}/cache/cache.cc
|
${ROCKSDB_SOURCE_DIR}/cache/cache.cc
|
||||||
${ROCKSDB_SOURCE_DIR}/cache/cache_entry_roles.cc
|
${ROCKSDB_SOURCE_DIR}/cache/cache_entry_roles.cc
|
||||||
|
${ROCKSDB_SOURCE_DIR}/cache/cache_key.cc
|
||||||
|
${ROCKSDB_SOURCE_DIR}/cache/cache_reservation_manager.cc
|
||||||
${ROCKSDB_SOURCE_DIR}/cache/clock_cache.cc
|
${ROCKSDB_SOURCE_DIR}/cache/clock_cache.cc
|
||||||
${ROCKSDB_SOURCE_DIR}/cache/lru_cache.cc
|
${ROCKSDB_SOURCE_DIR}/cache/lru_cache.cc
|
||||||
${ROCKSDB_SOURCE_DIR}/cache/sharded_cache.cc
|
${ROCKSDB_SOURCE_DIR}/cache/sharded_cache.cc
|
||||||
@ -156,6 +113,7 @@ set(SOURCES
|
|||||||
${ROCKSDB_SOURCE_DIR}/db/blob/blob_log_format.cc
|
${ROCKSDB_SOURCE_DIR}/db/blob/blob_log_format.cc
|
||||||
${ROCKSDB_SOURCE_DIR}/db/blob/blob_log_sequential_reader.cc
|
${ROCKSDB_SOURCE_DIR}/db/blob/blob_log_sequential_reader.cc
|
||||||
${ROCKSDB_SOURCE_DIR}/db/blob/blob_log_writer.cc
|
${ROCKSDB_SOURCE_DIR}/db/blob/blob_log_writer.cc
|
||||||
|
${ROCKSDB_SOURCE_DIR}/db/blob/prefetch_buffer_collection.cc
|
||||||
${ROCKSDB_SOURCE_DIR}/db/builder.cc
|
${ROCKSDB_SOURCE_DIR}/db/builder.cc
|
||||||
${ROCKSDB_SOURCE_DIR}/db/c.cc
|
${ROCKSDB_SOURCE_DIR}/db/c.cc
|
||||||
${ROCKSDB_SOURCE_DIR}/db/column_family.cc
|
${ROCKSDB_SOURCE_DIR}/db/column_family.cc
|
||||||
@ -229,6 +187,7 @@ set(SOURCES
|
|||||||
${ROCKSDB_SOURCE_DIR}/env/file_system_tracer.cc
|
${ROCKSDB_SOURCE_DIR}/env/file_system_tracer.cc
|
||||||
${ROCKSDB_SOURCE_DIR}/env/fs_remap.cc
|
${ROCKSDB_SOURCE_DIR}/env/fs_remap.cc
|
||||||
${ROCKSDB_SOURCE_DIR}/env/mock_env.cc
|
${ROCKSDB_SOURCE_DIR}/env/mock_env.cc
|
||||||
|
${ROCKSDB_SOURCE_DIR}/env/unique_id_gen.cc
|
||||||
${ROCKSDB_SOURCE_DIR}/file/delete_scheduler.cc
|
${ROCKSDB_SOURCE_DIR}/file/delete_scheduler.cc
|
||||||
${ROCKSDB_SOURCE_DIR}/file/file_prefetch_buffer.cc
|
${ROCKSDB_SOURCE_DIR}/file/file_prefetch_buffer.cc
|
||||||
${ROCKSDB_SOURCE_DIR}/file/file_util.cc
|
${ROCKSDB_SOURCE_DIR}/file/file_util.cc
|
||||||
@ -247,6 +206,7 @@ set(SOURCES
|
|||||||
${ROCKSDB_SOURCE_DIR}/memory/concurrent_arena.cc
|
${ROCKSDB_SOURCE_DIR}/memory/concurrent_arena.cc
|
||||||
${ROCKSDB_SOURCE_DIR}/memory/jemalloc_nodump_allocator.cc
|
${ROCKSDB_SOURCE_DIR}/memory/jemalloc_nodump_allocator.cc
|
||||||
${ROCKSDB_SOURCE_DIR}/memory/memkind_kmem_allocator.cc
|
${ROCKSDB_SOURCE_DIR}/memory/memkind_kmem_allocator.cc
|
||||||
|
${ROCKSDB_SOURCE_DIR}/memory/memory_allocator.cc
|
||||||
${ROCKSDB_SOURCE_DIR}/memtable/alloc_tracker.cc
|
${ROCKSDB_SOURCE_DIR}/memtable/alloc_tracker.cc
|
||||||
${ROCKSDB_SOURCE_DIR}/memtable/hash_linklist_rep.cc
|
${ROCKSDB_SOURCE_DIR}/memtable/hash_linklist_rep.cc
|
||||||
${ROCKSDB_SOURCE_DIR}/memtable/hash_skiplist_rep.cc
|
${ROCKSDB_SOURCE_DIR}/memtable/hash_skiplist_rep.cc
|
||||||
@ -322,6 +282,7 @@ set(SOURCES
|
|||||||
${ROCKSDB_SOURCE_DIR}/table/table_factory.cc
|
${ROCKSDB_SOURCE_DIR}/table/table_factory.cc
|
||||||
${ROCKSDB_SOURCE_DIR}/table/table_properties.cc
|
${ROCKSDB_SOURCE_DIR}/table/table_properties.cc
|
||||||
${ROCKSDB_SOURCE_DIR}/table/two_level_iterator.cc
|
${ROCKSDB_SOURCE_DIR}/table/two_level_iterator.cc
|
||||||
|
${ROCKSDB_SOURCE_DIR}/table/unique_id.cc
|
||||||
${ROCKSDB_SOURCE_DIR}/test_util/sync_point.cc
|
${ROCKSDB_SOURCE_DIR}/test_util/sync_point.cc
|
||||||
${ROCKSDB_SOURCE_DIR}/test_util/sync_point_impl.cc
|
${ROCKSDB_SOURCE_DIR}/test_util/sync_point_impl.cc
|
||||||
${ROCKSDB_SOURCE_DIR}/test_util/testutil.cc
|
${ROCKSDB_SOURCE_DIR}/test_util/testutil.cc
|
||||||
@ -333,9 +294,12 @@ set(SOURCES
|
|||||||
${ROCKSDB_SOURCE_DIR}/tools/ldb_tool.cc
|
${ROCKSDB_SOURCE_DIR}/tools/ldb_tool.cc
|
||||||
${ROCKSDB_SOURCE_DIR}/tools/sst_dump_tool.cc
|
${ROCKSDB_SOURCE_DIR}/tools/sst_dump_tool.cc
|
||||||
${ROCKSDB_SOURCE_DIR}/tools/trace_analyzer_tool.cc
|
${ROCKSDB_SOURCE_DIR}/tools/trace_analyzer_tool.cc
|
||||||
${ROCKSDB_SOURCE_DIR}/trace_replay/trace_replay.cc
|
|
||||||
${ROCKSDB_SOURCE_DIR}/trace_replay/block_cache_tracer.cc
|
${ROCKSDB_SOURCE_DIR}/trace_replay/block_cache_tracer.cc
|
||||||
${ROCKSDB_SOURCE_DIR}/trace_replay/io_tracer.cc
|
${ROCKSDB_SOURCE_DIR}/trace_replay/io_tracer.cc
|
||||||
|
${ROCKSDB_SOURCE_DIR}/trace_replay/trace_record_handler.cc
|
||||||
|
${ROCKSDB_SOURCE_DIR}/trace_replay/trace_record_result.cc
|
||||||
|
${ROCKSDB_SOURCE_DIR}/trace_replay/trace_record.cc
|
||||||
|
${ROCKSDB_SOURCE_DIR}/trace_replay/trace_replay.cc
|
||||||
${ROCKSDB_SOURCE_DIR}/util/coding.cc
|
${ROCKSDB_SOURCE_DIR}/util/coding.cc
|
||||||
${ROCKSDB_SOURCE_DIR}/util/compaction_job_stats_impl.cc
|
${ROCKSDB_SOURCE_DIR}/util/compaction_job_stats_impl.cc
|
||||||
${ROCKSDB_SOURCE_DIR}/util/comparator.cc
|
${ROCKSDB_SOURCE_DIR}/util/comparator.cc
|
||||||
@ -347,6 +311,7 @@ set(SOURCES
|
|||||||
${ROCKSDB_SOURCE_DIR}/util/murmurhash.cc
|
${ROCKSDB_SOURCE_DIR}/util/murmurhash.cc
|
||||||
${ROCKSDB_SOURCE_DIR}/util/random.cc
|
${ROCKSDB_SOURCE_DIR}/util/random.cc
|
||||||
${ROCKSDB_SOURCE_DIR}/util/rate_limiter.cc
|
${ROCKSDB_SOURCE_DIR}/util/rate_limiter.cc
|
||||||
|
${ROCKSDB_SOURCE_DIR}/util/regex.cc
|
||||||
${ROCKSDB_SOURCE_DIR}/util/ribbon_config.cc
|
${ROCKSDB_SOURCE_DIR}/util/ribbon_config.cc
|
||||||
${ROCKSDB_SOURCE_DIR}/util/slice.cc
|
${ROCKSDB_SOURCE_DIR}/util/slice.cc
|
||||||
${ROCKSDB_SOURCE_DIR}/util/file_checksum_helper.cc
|
${ROCKSDB_SOURCE_DIR}/util/file_checksum_helper.cc
|
||||||
@ -362,18 +327,23 @@ set(SOURCES
|
|||||||
${ROCKSDB_SOURCE_DIR}/utilities/blob_db/blob_db_impl_filesnapshot.cc
|
${ROCKSDB_SOURCE_DIR}/utilities/blob_db/blob_db_impl_filesnapshot.cc
|
||||||
${ROCKSDB_SOURCE_DIR}/utilities/blob_db/blob_dump_tool.cc
|
${ROCKSDB_SOURCE_DIR}/utilities/blob_db/blob_dump_tool.cc
|
||||||
${ROCKSDB_SOURCE_DIR}/utilities/blob_db/blob_file.cc
|
${ROCKSDB_SOURCE_DIR}/utilities/blob_db/blob_file.cc
|
||||||
|
${ROCKSDB_SOURCE_DIR}/utilities/cache_dump_load.cc
|
||||||
|
${ROCKSDB_SOURCE_DIR}/utilities/cache_dump_load_impl.cc
|
||||||
${ROCKSDB_SOURCE_DIR}/utilities/cassandra/cassandra_compaction_filter.cc
|
${ROCKSDB_SOURCE_DIR}/utilities/cassandra/cassandra_compaction_filter.cc
|
||||||
${ROCKSDB_SOURCE_DIR}/utilities/cassandra/format.cc
|
${ROCKSDB_SOURCE_DIR}/utilities/cassandra/format.cc
|
||||||
${ROCKSDB_SOURCE_DIR}/utilities/cassandra/merge_operator.cc
|
${ROCKSDB_SOURCE_DIR}/utilities/cassandra/merge_operator.cc
|
||||||
${ROCKSDB_SOURCE_DIR}/utilities/checkpoint/checkpoint_impl.cc
|
${ROCKSDB_SOURCE_DIR}/utilities/checkpoint/checkpoint_impl.cc
|
||||||
|
${ROCKSDB_SOURCE_DIR}/utilities/compaction_filters.cc
|
||||||
${ROCKSDB_SOURCE_DIR}/utilities/compaction_filters/remove_emptyvalue_compactionfilter.cc
|
${ROCKSDB_SOURCE_DIR}/utilities/compaction_filters/remove_emptyvalue_compactionfilter.cc
|
||||||
${ROCKSDB_SOURCE_DIR}/utilities/debug.cc
|
${ROCKSDB_SOURCE_DIR}/utilities/debug.cc
|
||||||
${ROCKSDB_SOURCE_DIR}/utilities/env_mirror.cc
|
${ROCKSDB_SOURCE_DIR}/utilities/env_mirror.cc
|
||||||
${ROCKSDB_SOURCE_DIR}/utilities/env_timed.cc
|
${ROCKSDB_SOURCE_DIR}/utilities/env_timed.cc
|
||||||
${ROCKSDB_SOURCE_DIR}/utilities/fault_injection_env.cc
|
${ROCKSDB_SOURCE_DIR}/utilities/fault_injection_env.cc
|
||||||
${ROCKSDB_SOURCE_DIR}/utilities/fault_injection_fs.cc
|
${ROCKSDB_SOURCE_DIR}/utilities/fault_injection_fs.cc
|
||||||
|
${ROCKSDB_SOURCE_DIR}/utilities/fault_injection_secondary_cache.cc
|
||||||
${ROCKSDB_SOURCE_DIR}/utilities/leveldb_options/leveldb_options.cc
|
${ROCKSDB_SOURCE_DIR}/utilities/leveldb_options/leveldb_options.cc
|
||||||
${ROCKSDB_SOURCE_DIR}/utilities/memory/memory_util.cc
|
${ROCKSDB_SOURCE_DIR}/utilities/memory/memory_util.cc
|
||||||
|
${ROCKSDB_SOURCE_DIR}/utilities/merge_operators.cc
|
||||||
${ROCKSDB_SOURCE_DIR}/utilities/merge_operators/bytesxor.cc
|
${ROCKSDB_SOURCE_DIR}/utilities/merge_operators/bytesxor.cc
|
||||||
${ROCKSDB_SOURCE_DIR}/utilities/merge_operators/max.cc
|
${ROCKSDB_SOURCE_DIR}/utilities/merge_operators/max.cc
|
||||||
${ROCKSDB_SOURCE_DIR}/utilities/merge_operators/put.cc
|
${ROCKSDB_SOURCE_DIR}/utilities/merge_operators/put.cc
|
||||||
@ -393,6 +363,7 @@ set(SOURCES
|
|||||||
${ROCKSDB_SOURCE_DIR}/utilities/simulator_cache/sim_cache.cc
|
${ROCKSDB_SOURCE_DIR}/utilities/simulator_cache/sim_cache.cc
|
||||||
${ROCKSDB_SOURCE_DIR}/utilities/table_properties_collectors/compact_on_deletion_collector.cc
|
${ROCKSDB_SOURCE_DIR}/utilities/table_properties_collectors/compact_on_deletion_collector.cc
|
||||||
${ROCKSDB_SOURCE_DIR}/utilities/trace/file_trace_reader_writer.cc
|
${ROCKSDB_SOURCE_DIR}/utilities/trace/file_trace_reader_writer.cc
|
||||||
|
${ROCKSDB_SOURCE_DIR}/utilities/trace/replayer_impl.cc
|
||||||
${ROCKSDB_SOURCE_DIR}/utilities/transactions/lock/lock_manager.cc
|
${ROCKSDB_SOURCE_DIR}/utilities/transactions/lock/lock_manager.cc
|
||||||
${ROCKSDB_SOURCE_DIR}/utilities/transactions/lock/point/point_lock_tracker.cc
|
${ROCKSDB_SOURCE_DIR}/utilities/transactions/lock/point/point_lock_tracker.cc
|
||||||
${ROCKSDB_SOURCE_DIR}/utilities/transactions/lock/point/point_lock_manager.cc
|
${ROCKSDB_SOURCE_DIR}/utilities/transactions/lock/point/point_lock_manager.cc
|
||||||
@ -411,6 +382,7 @@ set(SOURCES
|
|||||||
${ROCKSDB_SOURCE_DIR}/utilities/transactions/write_unprepared_txn.cc
|
${ROCKSDB_SOURCE_DIR}/utilities/transactions/write_unprepared_txn.cc
|
||||||
${ROCKSDB_SOURCE_DIR}/utilities/transactions/write_unprepared_txn_db.cc
|
${ROCKSDB_SOURCE_DIR}/utilities/transactions/write_unprepared_txn_db.cc
|
||||||
${ROCKSDB_SOURCE_DIR}/utilities/ttl/db_ttl_impl.cc
|
${ROCKSDB_SOURCE_DIR}/utilities/ttl/db_ttl_impl.cc
|
||||||
|
${ROCKSDB_SOURCE_DIR}/utilities/wal_filter.cc
|
||||||
${ROCKSDB_SOURCE_DIR}/utilities/write_batch_with_index/write_batch_with_index.cc
|
${ROCKSDB_SOURCE_DIR}/utilities/write_batch_with_index/write_batch_with_index.cc
|
||||||
${ROCKSDB_SOURCE_DIR}/utilities/write_batch_with_index/write_batch_with_index_internal.cc
|
${ROCKSDB_SOURCE_DIR}/utilities/write_batch_with_index/write_batch_with_index_internal.cc
|
||||||
${ROCKSDB_SOURCE_DIR}/utilities/transactions/lock/range/range_tree/lib/locktree/concurrent_tree.cc
|
${ROCKSDB_SOURCE_DIR}/utilities/transactions/lock/range/range_tree/lib/locktree/concurrent_tree.cc
|
||||||
@ -425,7 +397,7 @@ set(SOURCES
|
|||||||
${ROCKSDB_SOURCE_DIR}/utilities/transactions/lock/range/range_tree/lib/standalone_port.cc
|
${ROCKSDB_SOURCE_DIR}/utilities/transactions/lock/range/range_tree/lib/standalone_port.cc
|
||||||
${ROCKSDB_SOURCE_DIR}/utilities/transactions/lock/range/range_tree/lib/util/dbt.cc
|
${ROCKSDB_SOURCE_DIR}/utilities/transactions/lock/range/range_tree/lib/util/dbt.cc
|
||||||
${ROCKSDB_SOURCE_DIR}/utilities/transactions/lock/range/range_tree/lib/util/memarena.cc
|
${ROCKSDB_SOURCE_DIR}/utilities/transactions/lock/range/range_tree/lib/util/memarena.cc
|
||||||
rocksdb_build_version.cc)
|
build_version.cc) # generated by hand
|
||||||
|
|
||||||
if(ENABLE_SSE42 AND ENABLE_PCLMULQDQ)
|
if(ENABLE_SSE42 AND ENABLE_PCLMULQDQ)
|
||||||
set_source_files_properties(
|
set_source_files_properties(
|
||||||
@ -462,5 +434,6 @@ endif()
|
|||||||
add_library(_rocksdb ${SOURCES})
|
add_library(_rocksdb ${SOURCES})
|
||||||
add_library(ch_contrib::rocksdb ALIAS _rocksdb)
|
add_library(ch_contrib::rocksdb ALIAS _rocksdb)
|
||||||
target_link_libraries(_rocksdb PRIVATE ${THIRDPARTY_LIBS} ${SYSTEM_LIBS})
|
target_link_libraries(_rocksdb PRIVATE ${THIRDPARTY_LIBS} ${SYSTEM_LIBS})
|
||||||
|
|
||||||
# SYSTEM is required to overcome some issues
|
# SYSTEM is required to overcome some issues
|
||||||
target_include_directories(_rocksdb SYSTEM BEFORE INTERFACE "${ROCKSDB_SOURCE_DIR}/include")
|
target_include_directories(_rocksdb SYSTEM BEFORE INTERFACE "${ROCKSDB_SOURCE_DIR}/include")
|
||||||
|
2
contrib/s2geometry
vendored
2
contrib/s2geometry
vendored
@ -1 +1 @@
|
|||||||
Subproject commit 0547c38371777a1c1c8be263a6f05c3bf71bb05b
|
Subproject commit 6522a40338d58752c2a4227a3fc2bc4107c73e43
|
@ -1,7 +1,7 @@
|
|||||||
option(ENABLE_S2_GEOMETRY "Enable S2 geometry library" ${ENABLE_LIBRARIES})
|
option(ENABLE_S2_GEOMETRY "Enable S2 Geometry" ${ENABLE_LIBRARIES})
|
||||||
|
|
||||||
if (NOT ENABLE_S2_GEOMETRY)
|
if (NOT ENABLE_S2_GEOMETRY)
|
||||||
message(STATUS "Not using S2 geometry")
|
message(STATUS "Not using S2 Geometry")
|
||||||
return()
|
return()
|
||||||
endif()
|
endif()
|
||||||
|
|
||||||
@ -38,6 +38,7 @@ set(S2_SRCS
|
|||||||
"${S2_SOURCE_DIR}/s2/s2cell_index.cc"
|
"${S2_SOURCE_DIR}/s2/s2cell_index.cc"
|
||||||
"${S2_SOURCE_DIR}/s2/s2cell_union.cc"
|
"${S2_SOURCE_DIR}/s2/s2cell_union.cc"
|
||||||
"${S2_SOURCE_DIR}/s2/s2centroids.cc"
|
"${S2_SOURCE_DIR}/s2/s2centroids.cc"
|
||||||
|
"${S2_SOURCE_DIR}/s2/s2chain_interpolation_query.cc"
|
||||||
"${S2_SOURCE_DIR}/s2/s2closest_cell_query.cc"
|
"${S2_SOURCE_DIR}/s2/s2closest_cell_query.cc"
|
||||||
"${S2_SOURCE_DIR}/s2/s2closest_edge_query.cc"
|
"${S2_SOURCE_DIR}/s2/s2closest_edge_query.cc"
|
||||||
"${S2_SOURCE_DIR}/s2/s2closest_point_query.cc"
|
"${S2_SOURCE_DIR}/s2/s2closest_point_query.cc"
|
||||||
@ -46,6 +47,7 @@ set(S2_SRCS
|
|||||||
"${S2_SOURCE_DIR}/s2/s2coords.cc"
|
"${S2_SOURCE_DIR}/s2/s2coords.cc"
|
||||||
"${S2_SOURCE_DIR}/s2/s2crossing_edge_query.cc"
|
"${S2_SOURCE_DIR}/s2/s2crossing_edge_query.cc"
|
||||||
"${S2_SOURCE_DIR}/s2/s2debug.cc"
|
"${S2_SOURCE_DIR}/s2/s2debug.cc"
|
||||||
|
"${S2_SOURCE_DIR}/s2/s2density_tree.cc"
|
||||||
"${S2_SOURCE_DIR}/s2/s2earth.cc"
|
"${S2_SOURCE_DIR}/s2/s2earth.cc"
|
||||||
"${S2_SOURCE_DIR}/s2/s2edge_clipping.cc"
|
"${S2_SOURCE_DIR}/s2/s2edge_clipping.cc"
|
||||||
"${S2_SOURCE_DIR}/s2/s2edge_crosser.cc"
|
"${S2_SOURCE_DIR}/s2/s2edge_crosser.cc"
|
||||||
@ -53,8 +55,10 @@ set(S2_SRCS
|
|||||||
"${S2_SOURCE_DIR}/s2/s2edge_distances.cc"
|
"${S2_SOURCE_DIR}/s2/s2edge_distances.cc"
|
||||||
"${S2_SOURCE_DIR}/s2/s2edge_tessellator.cc"
|
"${S2_SOURCE_DIR}/s2/s2edge_tessellator.cc"
|
||||||
"${S2_SOURCE_DIR}/s2/s2error.cc"
|
"${S2_SOURCE_DIR}/s2/s2error.cc"
|
||||||
|
"${S2_SOURCE_DIR}/s2/s2fractal.cc"
|
||||||
"${S2_SOURCE_DIR}/s2/s2furthest_edge_query.cc"
|
"${S2_SOURCE_DIR}/s2/s2furthest_edge_query.cc"
|
||||||
"${S2_SOURCE_DIR}/s2/s2hausdorff_distance_query.cc"
|
"${S2_SOURCE_DIR}/s2/s2hausdorff_distance_query.cc"
|
||||||
|
"${S2_SOURCE_DIR}/s2/s2index_cell_data.cc"
|
||||||
"${S2_SOURCE_DIR}/s2/s2latlng.cc"
|
"${S2_SOURCE_DIR}/s2/s2latlng.cc"
|
||||||
"${S2_SOURCE_DIR}/s2/s2latlng_rect.cc"
|
"${S2_SOURCE_DIR}/s2/s2latlng_rect.cc"
|
||||||
"${S2_SOURCE_DIR}/s2/s2latlng_rect_bounder.cc"
|
"${S2_SOURCE_DIR}/s2/s2latlng_rect_bounder.cc"
|
||||||
@ -63,10 +67,10 @@ set(S2_SRCS
|
|||||||
"${S2_SOURCE_DIR}/s2/s2lax_polyline_shape.cc"
|
"${S2_SOURCE_DIR}/s2/s2lax_polyline_shape.cc"
|
||||||
"${S2_SOURCE_DIR}/s2/s2loop.cc"
|
"${S2_SOURCE_DIR}/s2/s2loop.cc"
|
||||||
"${S2_SOURCE_DIR}/s2/s2loop_measures.cc"
|
"${S2_SOURCE_DIR}/s2/s2loop_measures.cc"
|
||||||
|
"${S2_SOURCE_DIR}/s2/s2max_distance_targets.cc"
|
||||||
"${S2_SOURCE_DIR}/s2/s2measures.cc"
|
"${S2_SOURCE_DIR}/s2/s2measures.cc"
|
||||||
"${S2_SOURCE_DIR}/s2/s2memory_tracker.cc"
|
"${S2_SOURCE_DIR}/s2/s2memory_tracker.cc"
|
||||||
"${S2_SOURCE_DIR}/s2/s2metrics.cc"
|
"${S2_SOURCE_DIR}/s2/s2metrics.cc"
|
||||||
"${S2_SOURCE_DIR}/s2/s2max_distance_targets.cc"
|
|
||||||
"${S2_SOURCE_DIR}/s2/s2min_distance_targets.cc"
|
"${S2_SOURCE_DIR}/s2/s2min_distance_targets.cc"
|
||||||
"${S2_SOURCE_DIR}/s2/s2padded_cell.cc"
|
"${S2_SOURCE_DIR}/s2/s2padded_cell.cc"
|
||||||
"${S2_SOURCE_DIR}/s2/s2point_compression.cc"
|
"${S2_SOURCE_DIR}/s2/s2point_compression.cc"
|
||||||
@ -80,10 +84,11 @@ set(S2_SRCS
|
|||||||
"${S2_SOURCE_DIR}/s2/s2predicates.cc"
|
"${S2_SOURCE_DIR}/s2/s2predicates.cc"
|
||||||
"${S2_SOURCE_DIR}/s2/s2projections.cc"
|
"${S2_SOURCE_DIR}/s2/s2projections.cc"
|
||||||
"${S2_SOURCE_DIR}/s2/s2r2rect.cc"
|
"${S2_SOURCE_DIR}/s2/s2r2rect.cc"
|
||||||
"${S2_SOURCE_DIR}/s2/s2region.cc"
|
"${S2_SOURCE_DIR}/s2/s2random.cc"
|
||||||
"${S2_SOURCE_DIR}/s2/s2region_term_indexer.cc"
|
|
||||||
"${S2_SOURCE_DIR}/s2/s2region_coverer.cc"
|
"${S2_SOURCE_DIR}/s2/s2region_coverer.cc"
|
||||||
"${S2_SOURCE_DIR}/s2/s2region_intersection.cc"
|
"${S2_SOURCE_DIR}/s2/s2region_intersection.cc"
|
||||||
|
"${S2_SOURCE_DIR}/s2/s2region_sharder.cc"
|
||||||
|
"${S2_SOURCE_DIR}/s2/s2region_term_indexer.cc"
|
||||||
"${S2_SOURCE_DIR}/s2/s2region_union.cc"
|
"${S2_SOURCE_DIR}/s2/s2region_union.cc"
|
||||||
"${S2_SOURCE_DIR}/s2/s2shape_index.cc"
|
"${S2_SOURCE_DIR}/s2/s2shape_index.cc"
|
||||||
"${S2_SOURCE_DIR}/s2/s2shape_index_buffered_region.cc"
|
"${S2_SOURCE_DIR}/s2/s2shape_index_buffered_region.cc"
|
||||||
@ -94,9 +99,12 @@ set(S2_SRCS
|
|||||||
"${S2_SOURCE_DIR}/s2/s2shapeutil_coding.cc"
|
"${S2_SOURCE_DIR}/s2/s2shapeutil_coding.cc"
|
||||||
"${S2_SOURCE_DIR}/s2/s2shapeutil_contains_brute_force.cc"
|
"${S2_SOURCE_DIR}/s2/s2shapeutil_contains_brute_force.cc"
|
||||||
"${S2_SOURCE_DIR}/s2/s2shapeutil_conversion.cc"
|
"${S2_SOURCE_DIR}/s2/s2shapeutil_conversion.cc"
|
||||||
|
"${S2_SOURCE_DIR}/s2/s2shapeutil_count_vertices.cc"
|
||||||
"${S2_SOURCE_DIR}/s2/s2shapeutil_edge_iterator.cc"
|
"${S2_SOURCE_DIR}/s2/s2shapeutil_edge_iterator.cc"
|
||||||
|
"${S2_SOURCE_DIR}/s2/s2shapeutil_edge_wrap.cc"
|
||||||
"${S2_SOURCE_DIR}/s2/s2shapeutil_get_reference_point.cc"
|
"${S2_SOURCE_DIR}/s2/s2shapeutil_get_reference_point.cc"
|
||||||
"${S2_SOURCE_DIR}/s2/s2shapeutil_visit_crossing_edge_pairs.cc"
|
"${S2_SOURCE_DIR}/s2/s2shapeutil_visit_crossing_edge_pairs.cc"
|
||||||
|
"${S2_SOURCE_DIR}/s2/s2testing.cc"
|
||||||
"${S2_SOURCE_DIR}/s2/s2text_format.cc"
|
"${S2_SOURCE_DIR}/s2/s2text_format.cc"
|
||||||
"${S2_SOURCE_DIR}/s2/s2wedge_relations.cc"
|
"${S2_SOURCE_DIR}/s2/s2wedge_relations.cc"
|
||||||
"${S2_SOURCE_DIR}/s2/s2winding_operation.cc"
|
"${S2_SOURCE_DIR}/s2/s2winding_operation.cc"
|
||||||
@ -140,6 +148,7 @@ target_link_libraries(_s2 PRIVATE
|
|||||||
absl::strings
|
absl::strings
|
||||||
absl::type_traits
|
absl::type_traits
|
||||||
absl::utility
|
absl::utility
|
||||||
|
absl::vlog_is_on
|
||||||
)
|
)
|
||||||
|
|
||||||
target_include_directories(_s2 SYSTEM BEFORE PUBLIC "${S2_SOURCE_DIR}/")
|
target_include_directories(_s2 SYSTEM BEFORE PUBLIC "${S2_SOURCE_DIR}/")
|
||||||
|
2
contrib/sysroot
vendored
2
contrib/sysroot
vendored
@ -1 +1 @@
|
|||||||
Subproject commit 39c4713334f9f156dbf508f548d510d9129a657c
|
Subproject commit cc385041b226d1fc28ead14dbab5d40a5f821dd8
|
2
contrib/vectorscan
vendored
2
contrib/vectorscan
vendored
@ -1 +1 @@
|
|||||||
Subproject commit 38431d111781843741a781a57a6381a527d900a4
|
Subproject commit d29730e1cb9daaa66bda63426cdce83505d2c809
|
@ -1,11 +1,8 @@
|
|||||||
# We use vectorscan, a portable and API/ABI-compatible drop-in replacement for hyperscan.
|
# Vectorscan is drop-in replacement for Hyperscan.
|
||||||
|
|
||||||
if ((ARCH_AMD64 AND NOT NO_SSE3_OR_HIGHER) OR ARCH_AARCH64)
|
if ((ARCH_AMD64 AND NOT NO_SSE3_OR_HIGHER) OR ARCH_AARCH64)
|
||||||
option (ENABLE_VECTORSCAN "Enable vectorscan library" ${ENABLE_LIBRARIES})
|
option (ENABLE_VECTORSCAN "Enable vectorscan" ${ENABLE_LIBRARIES})
|
||||||
endif()
|
endif()
|
||||||
|
|
||||||
# TODO PPC should generally work but needs manual generation of ppc/config.h file on a PPC machine
|
|
||||||
|
|
||||||
if (NOT ENABLE_VECTORSCAN)
|
if (NOT ENABLE_VECTORSCAN)
|
||||||
message (STATUS "Not using vectorscan")
|
message (STATUS "Not using vectorscan")
|
||||||
return()
|
return()
|
||||||
@ -272,34 +269,24 @@ if (ARCH_AARCH64)
|
|||||||
)
|
)
|
||||||
endif()
|
endif()
|
||||||
|
|
||||||
# TODO
|
|
||||||
# if (ARCH_PPC64LE)
|
|
||||||
# list(APPEND SRCS
|
|
||||||
# "${LIBRARY_DIR}/src/util/supervector/arch/ppc64el/impl.cpp"
|
|
||||||
# )
|
|
||||||
# endif()
|
|
||||||
|
|
||||||
add_library (_vectorscan ${SRCS})
|
add_library (_vectorscan ${SRCS})
|
||||||
|
|
||||||
target_compile_options (_vectorscan PRIVATE
|
|
||||||
-fno-sanitize=undefined # assume the library takes care of itself
|
|
||||||
-O2 -fno-strict-aliasing -fno-omit-frame-pointer -fvisibility=hidden # options from original build system
|
|
||||||
)
|
|
||||||
# library has too much debug information
|
# library has too much debug information
|
||||||
if (OMIT_HEAVY_DEBUG_SYMBOLS)
|
if (OMIT_HEAVY_DEBUG_SYMBOLS)
|
||||||
target_compile_options (_vectorscan PRIVATE -g0)
|
target_compile_options (_vectorscan PRIVATE -g0)
|
||||||
endif()
|
endif()
|
||||||
|
|
||||||
# Include version header manually generated by running the original build system
|
target_include_directories (_vectorscan SYSTEM PUBLIC "${LIBRARY_DIR}/src")
|
||||||
target_include_directories (_vectorscan SYSTEM PRIVATE common)
|
|
||||||
|
# Makes the version header visible. It was generated by running the native build system manually.
|
||||||
|
# Please update whenever you update vectorscan.
|
||||||
|
target_include_directories (_vectorscan SYSTEM PUBLIC common)
|
||||||
|
|
||||||
# vectorscan inherited some patched in-source versions of boost headers to fix a bug in
|
# vectorscan inherited some patched in-source versions of boost headers to fix a bug in
|
||||||
# boost 1.69. This bug has been solved long ago but vectorscan's source code still
|
# boost 1.69. This bug has been solved long ago but vectorscan's source code still
|
||||||
# points to the patched versions, so include it here.
|
# points to the patched versions, so include it here.
|
||||||
target_include_directories (_vectorscan SYSTEM PRIVATE "${LIBRARY_DIR}/include")
|
target_include_directories (_vectorscan SYSTEM PRIVATE "${LIBRARY_DIR}/include")
|
||||||
|
|
||||||
target_include_directories (_vectorscan SYSTEM PUBLIC "${LIBRARY_DIR}/src")
|
|
||||||
|
|
||||||
# Include platform-specific config header generated by manually running the original build system
|
# Include platform-specific config header generated by manually running the original build system
|
||||||
# Please regenerate these files if you update vectorscan.
|
# Please regenerate these files if you update vectorscan.
|
||||||
|
|
||||||
|
@ -32,8 +32,12 @@
|
|||||||
/**
|
/**
|
||||||
* A version string to identify this release of Hyperscan.
|
* A version string to identify this release of Hyperscan.
|
||||||
*/
|
*/
|
||||||
#define HS_VERSION_STRING "5.4.7 2022-06-20"
|
#define HS_VERSION_STRING "5.4.11 2024-07-04"
|
||||||
|
|
||||||
#define HS_VERSION_32BIT ((5 << 24) | (1 << 16) | (7 << 8) | 0)
|
#define HS_VERSION_32BIT ((5 << 24) | (1 << 16) | (7 << 8) | 0)
|
||||||
|
|
||||||
|
#define HS_MAJOR 5
|
||||||
|
#define HS_MINOR 4
|
||||||
|
#define HS_PATCH 11
|
||||||
|
|
||||||
#endif /* HS_VERSION_H_C6428FAF8E3713 */
|
#endif /* HS_VERSION_H_C6428FAF8E3713 */
|
||||||
|
@ -34,7 +34,7 @@ RUN arch=${TARGETARCH:-amd64} \
|
|||||||
# lts / testing / prestable / etc
|
# lts / testing / prestable / etc
|
||||||
ARG REPO_CHANNEL="stable"
|
ARG REPO_CHANNEL="stable"
|
||||||
ARG REPOSITORY="https://packages.clickhouse.com/tgz/${REPO_CHANNEL}"
|
ARG REPOSITORY="https://packages.clickhouse.com/tgz/${REPO_CHANNEL}"
|
||||||
ARG VERSION="24.5.3.5"
|
ARG VERSION="24.6.2.17"
|
||||||
ARG PACKAGES="clickhouse-keeper"
|
ARG PACKAGES="clickhouse-keeper"
|
||||||
ARG DIRECT_DOWNLOAD_URLS=""
|
ARG DIRECT_DOWNLOAD_URLS=""
|
||||||
|
|
||||||
|
@ -111,6 +111,7 @@ fi
|
|||||||
mv ./programs/clickhouse* /output || mv ./programs/*_fuzzer /output
|
mv ./programs/clickhouse* /output || mv ./programs/*_fuzzer /output
|
||||||
[ -x ./programs/self-extracting/clickhouse ] && mv ./programs/self-extracting/clickhouse /output
|
[ -x ./programs/self-extracting/clickhouse ] && mv ./programs/self-extracting/clickhouse /output
|
||||||
[ -x ./programs/self-extracting/clickhouse-stripped ] && mv ./programs/self-extracting/clickhouse-stripped /output
|
[ -x ./programs/self-extracting/clickhouse-stripped ] && mv ./programs/self-extracting/clickhouse-stripped /output
|
||||||
|
[ -x ./programs/self-extracting/clickhouse-keeper ] && mv ./programs/self-extracting/clickhouse-keeper /output
|
||||||
mv ./src/unit_tests_dbms /output ||: # may not exist for some binary builds
|
mv ./src/unit_tests_dbms /output ||: # may not exist for some binary builds
|
||||||
mv ./programs/*.dict ./programs/*.options ./programs/*_seed_corpus.zip /output ||: # libFuzzer oss-fuzz compatible infrastructure
|
mv ./programs/*.dict ./programs/*.options ./programs/*_seed_corpus.zip /output ||: # libFuzzer oss-fuzz compatible infrastructure
|
||||||
|
|
||||||
|
@ -276,10 +276,7 @@ def parse_env_variables(
|
|||||||
if is_release_build(debug_build, package_type, sanitizer, coverage):
|
if is_release_build(debug_build, package_type, sanitizer, coverage):
|
||||||
cmake_flags.append("-DSPLIT_DEBUG_SYMBOLS=ON")
|
cmake_flags.append("-DSPLIT_DEBUG_SYMBOLS=ON")
|
||||||
result.append("WITH_PERFORMANCE=1")
|
result.append("WITH_PERFORMANCE=1")
|
||||||
if is_cross_arm:
|
cmake_flags.append("-DBUILD_STANDALONE_KEEPER=1")
|
||||||
cmake_flags.append("-DBUILD_STANDALONE_KEEPER=1")
|
|
||||||
else:
|
|
||||||
result.append("BUILD_MUSL_KEEPER=1")
|
|
||||||
elif package_type == "fuzzers":
|
elif package_type == "fuzzers":
|
||||||
cmake_flags.append("-DENABLE_FUZZING=1")
|
cmake_flags.append("-DENABLE_FUZZING=1")
|
||||||
cmake_flags.append("-DENABLE_PROTOBUF=1")
|
cmake_flags.append("-DENABLE_PROTOBUF=1")
|
||||||
|
47
docker/reqgenerator.py
Normal file
47
docker/reqgenerator.py
Normal file
@ -0,0 +1,47 @@
|
|||||||
|
#!/usr/bin/env python3
|
||||||
|
# To run this script you must install docker and piddeptree python package
|
||||||
|
#
|
||||||
|
|
||||||
|
import subprocess
|
||||||
|
import os
|
||||||
|
import sys
|
||||||
|
|
||||||
|
|
||||||
|
def build_docker_deps(image_name, imagedir):
|
||||||
|
cmd = f"""docker run --entrypoint "/bin/bash" {image_name} -c "pip install pipdeptree 2>/dev/null 1>/dev/null && pipdeptree --freeze --warn silence | sed 's/ \+//g' | sort | uniq" > {imagedir}/requirements.txt"""
|
||||||
|
subprocess.check_call(cmd, shell=True)
|
||||||
|
|
||||||
|
|
||||||
|
def check_docker_file_install_with_pip(filepath):
|
||||||
|
image_name = None
|
||||||
|
with open(filepath, "r") as f:
|
||||||
|
for line in f:
|
||||||
|
if "docker build" in line:
|
||||||
|
arr = line.split(" ")
|
||||||
|
if len(arr) > 4:
|
||||||
|
image_name = arr[4]
|
||||||
|
if "pip3 install" in line or "pip install" in line:
|
||||||
|
return image_name, True
|
||||||
|
return image_name, False
|
||||||
|
|
||||||
|
|
||||||
|
def process_affected_images(images_dir):
|
||||||
|
for root, _dirs, files in os.walk(images_dir):
|
||||||
|
for f in files:
|
||||||
|
if f == "Dockerfile":
|
||||||
|
docker_file_path = os.path.join(root, f)
|
||||||
|
print("Checking image on path", docker_file_path)
|
||||||
|
image_name, has_pip = check_docker_file_install_with_pip(
|
||||||
|
docker_file_path
|
||||||
|
)
|
||||||
|
if has_pip:
|
||||||
|
print("Find pip in", image_name)
|
||||||
|
try:
|
||||||
|
build_docker_deps(image_name, root)
|
||||||
|
except Exception as ex:
|
||||||
|
print(ex)
|
||||||
|
else:
|
||||||
|
print("Pip not found in", docker_file_path)
|
||||||
|
|
||||||
|
|
||||||
|
process_affected_images(sys.argv[1])
|
@ -32,7 +32,7 @@ RUN arch=${TARGETARCH:-amd64} \
|
|||||||
# lts / testing / prestable / etc
|
# lts / testing / prestable / etc
|
||||||
ARG REPO_CHANNEL="stable"
|
ARG REPO_CHANNEL="stable"
|
||||||
ARG REPOSITORY="https://packages.clickhouse.com/tgz/${REPO_CHANNEL}"
|
ARG REPOSITORY="https://packages.clickhouse.com/tgz/${REPO_CHANNEL}"
|
||||||
ARG VERSION="24.5.3.5"
|
ARG VERSION="24.6.2.17"
|
||||||
ARG PACKAGES="clickhouse-client clickhouse-server clickhouse-common-static"
|
ARG PACKAGES="clickhouse-client clickhouse-server clickhouse-common-static"
|
||||||
ARG DIRECT_DOWNLOAD_URLS=""
|
ARG DIRECT_DOWNLOAD_URLS=""
|
||||||
|
|
||||||
|
@ -28,7 +28,7 @@ RUN sed -i "s|http://archive.ubuntu.com|${apt_archive}|g" /etc/apt/sources.list
|
|||||||
|
|
||||||
ARG REPO_CHANNEL="stable"
|
ARG REPO_CHANNEL="stable"
|
||||||
ARG REPOSITORY="deb [signed-by=/usr/share/keyrings/clickhouse-keyring.gpg] https://packages.clickhouse.com/deb ${REPO_CHANNEL} main"
|
ARG REPOSITORY="deb [signed-by=/usr/share/keyrings/clickhouse-keyring.gpg] https://packages.clickhouse.com/deb ${REPO_CHANNEL} main"
|
||||||
ARG VERSION="24.5.3.5"
|
ARG VERSION="24.6.2.17"
|
||||||
ARG PACKAGES="clickhouse-client clickhouse-server clickhouse-common-static"
|
ARG PACKAGES="clickhouse-client clickhouse-server clickhouse-common-static"
|
||||||
|
|
||||||
#docker-official-library:off
|
#docker-official-library:off
|
||||||
|
@ -23,15 +23,17 @@ RUN apt-get update \
|
|||||||
# and MEMORY_LIMIT_EXCEEDED exceptions in Functional tests (total memory limit in Functional tests is ~55.24 GiB).
|
# and MEMORY_LIMIT_EXCEEDED exceptions in Functional tests (total memory limit in Functional tests is ~55.24 GiB).
|
||||||
# TSAN will flush shadow memory when reaching this limit.
|
# TSAN will flush shadow memory when reaching this limit.
|
||||||
# It may cause false-negatives, but it's better than OOM.
|
# It may cause false-negatives, but it's better than OOM.
|
||||||
RUN echo "TSAN_OPTIONS='verbosity=1000 halt_on_error=1 abort_on_error=1 history_size=7 memory_limit_mb=46080 second_deadlock_stack=1'" >> /etc/environment
|
# max_allocation_size_mb is set to 32GB, so we have much bigger chance to run into memory limit than the limitation of the sanitizers
|
||||||
RUN echo "UBSAN_OPTIONS='print_stacktrace=1'" >> /etc/environment
|
RUN echo "TSAN_OPTIONS='verbosity=1000 halt_on_error=1 abort_on_error=1 history_size=7 memory_limit_mb=46080 second_deadlock_stack=1 max_allocation_size_mb=32768'" >> /etc/environment
|
||||||
RUN echo "MSAN_OPTIONS='abort_on_error=1 poison_in_dtor=1'" >> /etc/environment
|
RUN echo "UBSAN_OPTIONS='print_stacktrace=1 max_allocation_size_mb=32768'" >> /etc/environment
|
||||||
RUN echo "LSAN_OPTIONS='suppressions=/usr/share/clickhouse-test/config/lsan_suppressions.txt'" >> /etc/environment
|
RUN echo "MSAN_OPTIONS='abort_on_error=1 poison_in_dtor=1 max_allocation_size_mb=32768'" >> /etc/environment
|
||||||
|
RUN echo "LSAN_OPTIONS='suppressions=/usr/share/clickhouse-test/config/lsan_suppressions.txt max_allocation_size_mb=32768'" >> /etc/environment
|
||||||
# Sanitizer options for current shell (not current, but the one that will be spawned on "docker run")
|
# Sanitizer options for current shell (not current, but the one that will be spawned on "docker run")
|
||||||
# (but w/o verbosity for TSAN, otherwise test.reference will not match)
|
# (but w/o verbosity for TSAN, otherwise test.reference will not match)
|
||||||
ENV TSAN_OPTIONS='halt_on_error=1 abort_on_error=1 history_size=7 memory_limit_mb=46080 second_deadlock_stack=1'
|
ENV TSAN_OPTIONS='halt_on_error=1 abort_on_error=1 history_size=7 memory_limit_mb=46080 second_deadlock_stack=1 max_allocation_size_mb=32768'
|
||||||
ENV UBSAN_OPTIONS='print_stacktrace=1'
|
ENV UBSAN_OPTIONS='print_stacktrace=1 max_allocation_size_mb=32768'
|
||||||
ENV MSAN_OPTIONS='abort_on_error=1 poison_in_dtor=1'
|
ENV MSAN_OPTIONS='abort_on_error=1 poison_in_dtor=1 max_allocation_size_mb=32768'
|
||||||
|
ENV LSAN_OPTIONS='max_allocation_size_mb=32768'
|
||||||
|
|
||||||
# for external_symbolizer_path
|
# for external_symbolizer_path
|
||||||
RUN ln -s /usr/bin/llvm-symbolizer-${LLVM_VERSION} /usr/bin/llvm-symbolizer
|
RUN ln -s /usr/bin/llvm-symbolizer-${LLVM_VERSION} /usr/bin/llvm-symbolizer
|
||||||
|
@ -19,19 +19,20 @@ RUN apt-get update \
|
|||||||
odbcinst \
|
odbcinst \
|
||||||
psmisc \
|
psmisc \
|
||||||
python3 \
|
python3 \
|
||||||
python3-lxml \
|
|
||||||
python3-pip \
|
python3-pip \
|
||||||
python3-requests \
|
|
||||||
python3-termcolor \
|
|
||||||
unixodbc \
|
unixodbc \
|
||||||
pv \
|
pv \
|
||||||
jq \
|
jq \
|
||||||
zstd \
|
zstd \
|
||||||
--yes --no-install-recommends \
|
--yes --no-install-recommends \
|
||||||
&& apt-get clean \
|
&& apt-get clean \
|
||||||
&& rm -rf /var/lib/apt/lists/* /var/cache/debconf /tmp/*
|
&& rm -rf /var/lib/apt/lists/* /var/cache/debconf /tmp/* \
|
||||||
|
&& groupadd --system --gid 1000 clickhouse \
|
||||||
|
&& useradd --system --gid 1000 --uid 1000 -m clickhouse
|
||||||
|
# ^ For some reason, groupadd and useradd are needed for tests with 'expect', but I don't know, why.
|
||||||
|
|
||||||
RUN pip3 install numpy==1.26.3 scipy==1.12.0 pandas==1.5.3 Jinja2==3.1.3
|
COPY requirements.txt /
|
||||||
|
RUN pip3 install --no-cache-dir -r /requirements.txt
|
||||||
|
|
||||||
# This symlink is required by gcc to find the lld linker
|
# This symlink is required by gcc to find the lld linker
|
||||||
RUN ln -s /usr/bin/lld-${LLVM_VERSION} /usr/bin/ld.lld
|
RUN ln -s /usr/bin/lld-${LLVM_VERSION} /usr/bin/ld.lld
|
||||||
@ -39,6 +40,10 @@ RUN ln -s /usr/bin/lld-${LLVM_VERSION} /usr/bin/ld.lld
|
|||||||
# https://salsa.debian.org/pkg-llvm-team/llvm-toolchain/-/commit/992e52c0b156a5ba9c6a8a54f8c4857ddd3d371d
|
# https://salsa.debian.org/pkg-llvm-team/llvm-toolchain/-/commit/992e52c0b156a5ba9c6a8a54f8c4857ddd3d371d
|
||||||
RUN sed -i '/_IMPORT_CHECK_FILES_FOR_\(mlir-\|llvm-bolt\|merge-fdata\|MLIR\)/ {s|^|#|}' /usr/lib/llvm-${LLVM_VERSION}/lib/cmake/llvm/LLVMExports-*.cmake
|
RUN sed -i '/_IMPORT_CHECK_FILES_FOR_\(mlir-\|llvm-bolt\|merge-fdata\|MLIR\)/ {s|^|#|}' /usr/lib/llvm-${LLVM_VERSION}/lib/cmake/llvm/LLVMExports-*.cmake
|
||||||
|
|
||||||
|
# LLVM changes paths for compiler-rt libraries. For some reason clang-18.1.8 cannot catch up libraries from default install path.
|
||||||
|
# It's very dirty workaround, better to build compiler and LLVM ourself and use it. Details: https://github.com/llvm/llvm-project/issues/95792
|
||||||
|
RUN test ! -d /usr/lib/llvm-18/lib/clang/18/lib/x86_64-pc-linux-gnu || ln -s /usr/lib/llvm-18/lib/clang/18/lib/x86_64-pc-linux-gnu /usr/lib/llvm-18/lib/clang/18/lib/x86_64-unknown-linux-gnu
|
||||||
|
|
||||||
ARG CCACHE_VERSION=4.6.1
|
ARG CCACHE_VERSION=4.6.1
|
||||||
RUN mkdir /tmp/ccache \
|
RUN mkdir /tmp/ccache \
|
||||||
&& cd /tmp/ccache \
|
&& cd /tmp/ccache \
|
||||||
|
41
docker/test/fasttest/requirements.txt
Normal file
41
docker/test/fasttest/requirements.txt
Normal file
@ -0,0 +1,41 @@
|
|||||||
|
Jinja2==3.1.3
|
||||||
|
MarkupSafe==2.1.5
|
||||||
|
PyJWT==2.3.0
|
||||||
|
PyYAML==6.0.1
|
||||||
|
Pygments==2.11.2
|
||||||
|
SecretStorage==3.3.1
|
||||||
|
blinker==1.4
|
||||||
|
certifi==2020.6.20
|
||||||
|
chardet==4.0.0
|
||||||
|
cryptography==3.4.8
|
||||||
|
dbus-python==1.2.18
|
||||||
|
distro==1.7.0
|
||||||
|
httplib2==0.20.2
|
||||||
|
idna==3.3
|
||||||
|
importlib-metadata==4.6.4
|
||||||
|
jeepney==0.7.1
|
||||||
|
keyring==23.5.0
|
||||||
|
launchpadlib==1.10.16
|
||||||
|
lazr.restfulclient==0.14.4
|
||||||
|
lazr.uri==1.0.6
|
||||||
|
lxml==4.8.0
|
||||||
|
more-itertools==8.10.0
|
||||||
|
numpy==1.26.3
|
||||||
|
oauthlib==3.2.0
|
||||||
|
packaging==24.1
|
||||||
|
pandas==1.5.3
|
||||||
|
pip==24.1.1
|
||||||
|
pipdeptree==2.23.0
|
||||||
|
pyparsing==2.4.7
|
||||||
|
python-apt==2.4.0+ubuntu3
|
||||||
|
python-dateutil==2.9.0.post0
|
||||||
|
pytz==2024.1
|
||||||
|
requests==2.32.3
|
||||||
|
scipy==1.12.0
|
||||||
|
setuptools==59.6.0
|
||||||
|
six==1.16.0
|
||||||
|
termcolor==1.1.0
|
||||||
|
urllib3==1.26.5
|
||||||
|
wadllib==1.3.6
|
||||||
|
wheel==0.37.1
|
||||||
|
zipp==1.0.0
|
@ -9,7 +9,7 @@ trap 'kill $(jobs -pr) ||:' EXIT
|
|||||||
stage=${stage:-}
|
stage=${stage:-}
|
||||||
|
|
||||||
# Compiler version, normally set by Dockerfile
|
# Compiler version, normally set by Dockerfile
|
||||||
export LLVM_VERSION=${LLVM_VERSION:-17}
|
export LLVM_VERSION=${LLVM_VERSION:-18}
|
||||||
|
|
||||||
# A variable to pass additional flags to CMake.
|
# A variable to pass additional flags to CMake.
|
||||||
# Here we explicitly default it to nothing so that bash doesn't complain about
|
# Here we explicitly default it to nothing so that bash doesn't complain about
|
||||||
@ -84,6 +84,8 @@ function start_server
|
|||||||
echo "ClickHouse server pid '$server_pid' started and responded"
|
echo "ClickHouse server pid '$server_pid' started and responded"
|
||||||
}
|
}
|
||||||
|
|
||||||
|
export -f start_server
|
||||||
|
|
||||||
function clone_root
|
function clone_root
|
||||||
{
|
{
|
||||||
[ "$UID" -eq 0 ] && git config --global --add safe.directory "$FASTTEST_SOURCE"
|
[ "$UID" -eq 0 ] && git config --global --add safe.directory "$FASTTEST_SOURCE"
|
||||||
@ -254,6 +256,22 @@ function configure
|
|||||||
rm -f "$FASTTEST_DATA/config.d/secure_ports.xml"
|
rm -f "$FASTTEST_DATA/config.d/secure_ports.xml"
|
||||||
}
|
}
|
||||||
|
|
||||||
|
function timeout_with_logging() {
|
||||||
|
local exit_code=0
|
||||||
|
|
||||||
|
timeout -s TERM --preserve-status "${@}" || exit_code="${?}"
|
||||||
|
|
||||||
|
echo "Checking if it is a timeout. The code 124 will indicate a timeout."
|
||||||
|
if [[ "${exit_code}" -eq "124" ]]
|
||||||
|
then
|
||||||
|
echo "The command 'timeout ${*}' has been killed by timeout."
|
||||||
|
else
|
||||||
|
echo "No, it isn't a timeout."
|
||||||
|
fi
|
||||||
|
|
||||||
|
return $exit_code
|
||||||
|
}
|
||||||
|
|
||||||
function run_tests
|
function run_tests
|
||||||
{
|
{
|
||||||
clickhouse-server --version
|
clickhouse-server --version
|
||||||
@ -269,6 +287,11 @@ function run_tests
|
|||||||
NPROC=1
|
NPROC=1
|
||||||
fi
|
fi
|
||||||
|
|
||||||
|
export CLICKHOUSE_CONFIG_DIR=$FASTTEST_DATA
|
||||||
|
export CLICKHOUSE_CONFIG="$FASTTEST_DATA/config.xml"
|
||||||
|
export CLICKHOUSE_USER_FILES="$FASTTEST_DATA/user_files"
|
||||||
|
export CLICKHOUSE_SCHEMA_FILES="$FASTTEST_DATA/format_schemas"
|
||||||
|
|
||||||
local test_opts=(
|
local test_opts=(
|
||||||
--hung-check
|
--hung-check
|
||||||
--fast-tests-only
|
--fast-tests-only
|
||||||
@ -292,6 +315,8 @@ function run_tests
|
|||||||
clickhouse stop --pid-path "$FASTTEST_DATA"
|
clickhouse stop --pid-path "$FASTTEST_DATA"
|
||||||
}
|
}
|
||||||
|
|
||||||
|
export -f run_tests
|
||||||
|
|
||||||
case "$stage" in
|
case "$stage" in
|
||||||
"")
|
"")
|
||||||
ls -la
|
ls -la
|
||||||
@ -315,7 +340,7 @@ case "$stage" in
|
|||||||
configure 2>&1 | ts '%Y-%m-%d %H:%M:%S' | tee "$FASTTEST_OUTPUT/install_log.txt"
|
configure 2>&1 | ts '%Y-%m-%d %H:%M:%S' | tee "$FASTTEST_OUTPUT/install_log.txt"
|
||||||
;&
|
;&
|
||||||
"run_tests")
|
"run_tests")
|
||||||
run_tests
|
timeout_with_logging 35m bash -c run_tests ||:
|
||||||
/process_functional_tests_result.py --in-results-dir "$FASTTEST_OUTPUT/" \
|
/process_functional_tests_result.py --in-results-dir "$FASTTEST_OUTPUT/" \
|
||||||
--out-results-file "$FASTTEST_OUTPUT/test_results.tsv" \
|
--out-results-file "$FASTTEST_OUTPUT/test_results.tsv" \
|
||||||
--out-status-file "$FASTTEST_OUTPUT/check_status.tsv" || echo -e "failure\tCannot parse results" > "$FASTTEST_OUTPUT/check_status.tsv"
|
--out-status-file "$FASTTEST_OUTPUT/check_status.tsv" || echo -e "failure\tCannot parse results" > "$FASTTEST_OUTPUT/check_status.tsv"
|
||||||
|
@ -31,7 +31,8 @@ RUN apt-get update \
|
|||||||
&& apt-get clean \
|
&& apt-get clean \
|
||||||
&& rm -rf /var/lib/apt/lists/* /var/cache/debconf /tmp/*
|
&& rm -rf /var/lib/apt/lists/* /var/cache/debconf /tmp/*
|
||||||
|
|
||||||
RUN pip3 install Jinja2
|
COPY requirements.txt /
|
||||||
|
RUN pip3 install --no-cache-dir -r /requirements.txt
|
||||||
|
|
||||||
COPY * /
|
COPY * /
|
||||||
|
|
||||||
|
@ -20,7 +20,7 @@
|
|||||||
</max_execution_time>
|
</max_execution_time>
|
||||||
|
|
||||||
<max_memory_usage>
|
<max_memory_usage>
|
||||||
<max>10G</max>
|
<max>5G</max>
|
||||||
</max_memory_usage>
|
</max_memory_usage>
|
||||||
|
|
||||||
<table_function_remote_max_addresses>
|
<table_function_remote_max_addresses>
|
||||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user