mirror of
https://github.com/ClickHouse/ClickHouse.git
synced 2024-12-17 20:02:05 +00:00
Merge remote-tracking branch 'origin' into fix-read-settings-in-index-loading
This commit is contained in:
commit
ecee8721df
@ -27,6 +27,8 @@ Checks: [
|
||||
'-bugprone-not-null-terminated-result',
|
||||
'-bugprone-reserved-identifier', # useful but too slow, TODO retry when https://reviews.llvm.org/rG1c282052624f9d0bd273bde0b47b30c96699c6c7 is merged
|
||||
'-bugprone-unchecked-optional-access',
|
||||
'-bugprone-crtp-constructor-accessibility',
|
||||
'-bugprone-suspicious-stringview-data-usage',
|
||||
|
||||
'-cert-dcl16-c',
|
||||
'-cert-dcl37-c',
|
||||
@ -36,6 +38,7 @@ Checks: [
|
||||
'-cert-msc51-cpp',
|
||||
'-cert-oop54-cpp',
|
||||
'-cert-oop57-cpp',
|
||||
'-cert-err33-c', # Misreports on clang-19: it warns about all functions containing 'remove' in the name, not only about the standard library.
|
||||
|
||||
'-clang-analyzer-optin.performance.Padding',
|
||||
|
||||
@ -99,6 +102,7 @@ Checks: [
|
||||
'-modernize-use-emplace',
|
||||
'-modernize-use-nodiscard',
|
||||
'-modernize-use-trailing-return-type',
|
||||
'-modernize-use-designated-initializers',
|
||||
|
||||
'-performance-enum-size',
|
||||
'-performance-inefficient-string-concatenation',
|
||||
|
@ -13,3 +13,6 @@
|
||||
# dbms/ → src/
|
||||
# (though it is unlikely that you will see it in blame)
|
||||
06446b4f08a142d6f1bc30664c47ded88ab51782
|
||||
|
||||
# Applied Black formatter for Python code
|
||||
e6f5a3f98b21ba99cf274a9833797889e020a2b3
|
||||
|
1
.github/actionlint.yml
vendored
1
.github/actionlint.yml
vendored
@ -7,3 +7,4 @@ self-hosted-runner:
|
||||
- stress-tester
|
||||
- style-checker
|
||||
- style-checker-aarch64
|
||||
- release-maker
|
||||
|
21
.github/actions/check_workflow/action.yml
vendored
Normal file
21
.github/actions/check_workflow/action.yml
vendored
Normal file
@ -0,0 +1,21 @@
|
||||
name: CheckWorkflowResults
|
||||
|
||||
description: Check overall workflow status and post error to slack if any
|
||||
|
||||
inputs:
|
||||
needs:
|
||||
description: github needs context as a json string
|
||||
required: true
|
||||
type: string
|
||||
|
||||
runs:
|
||||
using: "composite"
|
||||
steps:
|
||||
- name: Check Workflow
|
||||
shell: bash
|
||||
run: |
|
||||
export WORKFLOW_RESULT_FILE="/tmp/workflow_results.json"
|
||||
cat > "$WORKFLOW_RESULT_FILE" << 'EOF'
|
||||
${{ inputs.needs }}
|
||||
EOF
|
||||
python3 ./tests/ci/ci_buddy.py --check-wf-status
|
45
.github/workflows/auto_release.yml
vendored
45
.github/workflows/auto_release.yml
vendored
@ -1,45 +0,0 @@
|
||||
name: AutoRelease
|
||||
|
||||
env:
|
||||
# Force the stdout and stderr streams to be unbuffered
|
||||
PYTHONUNBUFFERED: 1
|
||||
|
||||
concurrency:
|
||||
group: auto-release
|
||||
on: # yamllint disable-line rule:truthy
|
||||
# schedule:
|
||||
# - cron: '0 10-16 * * 1-5'
|
||||
workflow_dispatch:
|
||||
|
||||
jobs:
|
||||
CherryPick:
|
||||
runs-on: [self-hosted, style-checker-aarch64]
|
||||
steps:
|
||||
- name: Set envs
|
||||
# https://docs.github.com/en/actions/learn-github-actions/workflow-commands-for-github-actions#multiline-strings
|
||||
run: |
|
||||
cat >> "$GITHUB_ENV" << 'EOF'
|
||||
TEMP_PATH=${{runner.temp}}/cherry_pick
|
||||
ROBOT_CLICKHOUSE_SSH_KEY<<RCSK
|
||||
${{secrets.ROBOT_CLICKHOUSE_SSH_KEY}}
|
||||
RCSK
|
||||
REPO_OWNER=ClickHouse
|
||||
REPO_NAME=ClickHouse
|
||||
REPO_TEAM=core
|
||||
EOF
|
||||
- name: Check out repository code
|
||||
uses: ClickHouse/checkout@v1
|
||||
with:
|
||||
clear-repository: true
|
||||
token: ${{secrets.ROBOT_CLICKHOUSE_COMMIT_TOKEN}}
|
||||
fetch-depth: 0
|
||||
- name: Auto-release
|
||||
run: |
|
||||
cd "$GITHUB_WORKSPACE/tests/ci"
|
||||
python3 auto_release.py --release-after-days=3
|
||||
- name: Cleanup
|
||||
if: always()
|
||||
run: |
|
||||
docker ps --quiet | xargs --no-run-if-empty docker kill ||:
|
||||
docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||:
|
||||
sudo rm -fr "$TEMP_PATH"
|
24
.github/workflows/backport_branches.yml
vendored
24
.github/workflows/backport_branches.yml
vendored
@ -36,10 +36,6 @@ jobs:
|
||||
cd "$GITHUB_WORKSPACE/tests/ci"
|
||||
echo "Testing the main ci directory"
|
||||
python3 -m unittest discover -s . -p 'test_*.py'
|
||||
for dir in *_lambda/; do
|
||||
echo "Testing $dir"
|
||||
python3 -m unittest discover -s "$dir" -p 'test_*.py'
|
||||
done
|
||||
- name: PrepareRunConfig
|
||||
id: runconfig
|
||||
run: |
|
||||
@ -62,7 +58,7 @@ jobs:
|
||||
BuildDockers:
|
||||
needs: [RunConfig]
|
||||
if: ${{ !failure() && !cancelled() }}
|
||||
uses: ./.github/workflows/reusable_docker.yml
|
||||
uses: ./.github/workflows/docker_test_images.yml
|
||||
with:
|
||||
data: ${{ needs.RunConfig.outputs.data }}
|
||||
CompatibilityCheckX86:
|
||||
@ -245,8 +241,9 @@ jobs:
|
||||
runner_type: stress-tester
|
||||
data: ${{ needs.RunConfig.outputs.data }}
|
||||
FinishCheck:
|
||||
if: ${{ !failure() && !cancelled() }}
|
||||
if: ${{ !cancelled() }}
|
||||
needs:
|
||||
- RunConfig
|
||||
- Builds_Report
|
||||
- FunctionalStatelessTestAsan
|
||||
- FunctionalStatefulTestDebug
|
||||
@ -261,10 +258,23 @@ jobs:
|
||||
with:
|
||||
clear-repository: true
|
||||
- name: Finish label
|
||||
if: ${{ !failure() }}
|
||||
run: |
|
||||
export WORKFLOW_RESULT_FILE="/tmp/workflow_results.json"
|
||||
cat > "$WORKFLOW_RESULT_FILE" << 'EOF'
|
||||
${{ toJson(needs) }}
|
||||
EOF
|
||||
cd "$GITHUB_WORKSPACE/tests/ci"
|
||||
# update mergeable check
|
||||
python3 merge_pr.py --set-ci-status --wf-status ${{ contains(needs.*.result, 'failure') && 'failure' || 'success' }}
|
||||
python3 merge_pr.py --set-ci-status
|
||||
# update overall ci report
|
||||
python3 finish_check.py --wf-status ${{ contains(needs.*.result, 'failure') && 'failure' || 'success' }}
|
||||
python3 merge_pr.py
|
||||
- name: Check Workflow results
|
||||
if: ${{ !cancelled() }}
|
||||
run: |
|
||||
export WORKFLOW_RESULT_FILE="/tmp/workflow_results.json"
|
||||
cat > "$WORKFLOW_RESULT_FILE" << 'EOF'
|
||||
${{ toJson(needs) }}
|
||||
EOF
|
||||
python3 ./tests/ci/ci_buddy.py --check-wf-status
|
||||
|
189
.github/workflows/create_release.yml
vendored
189
.github/workflows/create_release.yml
vendored
@ -2,12 +2,11 @@ name: CreateRelease
|
||||
|
||||
concurrency:
|
||||
group: release
|
||||
|
||||
'on':
|
||||
workflow_dispatch:
|
||||
inputs:
|
||||
sha:
|
||||
description: 'The SHA hash of the commit from which to create the release'
|
||||
ref:
|
||||
description: 'Git reference (branch or commit sha) from which to create the release'
|
||||
required: true
|
||||
type: string
|
||||
type:
|
||||
@ -15,15 +14,189 @@ concurrency:
|
||||
required: true
|
||||
type: choice
|
||||
options:
|
||||
- new
|
||||
- patch
|
||||
- new
|
||||
only-repo:
|
||||
description: 'Run only repos updates including docker (repo-recovery, tests)'
|
||||
required: false
|
||||
default: false
|
||||
type: boolean
|
||||
dry-run:
|
||||
description: 'Dry run'
|
||||
required: false
|
||||
default: false
|
||||
type: boolean
|
||||
|
||||
jobs:
|
||||
Release:
|
||||
runs-on: [self-hosted, style-checker-aarch64]
|
||||
CreateRelease:
|
||||
env:
|
||||
GH_TOKEN: ${{ secrets.ROBOT_CLICKHOUSE_COMMIT_TOKEN }}
|
||||
runs-on: [self-hosted, release-maker]
|
||||
steps:
|
||||
- name: DebugInfo
|
||||
uses: hmarr/debug-action@f7318c783045ac39ed9bb497e22ce835fdafbfe6
|
||||
- name: Check out repository code
|
||||
uses: ClickHouse/checkout@v1
|
||||
- name: Print greeting
|
||||
with:
|
||||
token: ${{secrets.ROBOT_CLICKHOUSE_COMMIT_TOKEN}}
|
||||
fetch-depth: 0
|
||||
- name: Prepare Release Info
|
||||
shell: bash
|
||||
run: |
|
||||
python3 ./tests/ci/release.py --commit ${{ inputs.sha }} --type ${{ inputs.type }} --dry-run
|
||||
if [ ${{ inputs.only-repo }} == "true" ]; then
|
||||
git tag -l ${{ inputs.ref }} || { echo "With only-repo option ref must be a valid release tag"; exit 1; }
|
||||
fi
|
||||
python3 ./tests/ci/create_release.py --prepare-release-info \
|
||||
--ref ${{ inputs.ref }} --release-type ${{ inputs.type }} \
|
||||
${{ inputs.dry-run == true && '--dry-run' || '' }} \
|
||||
${{ inputs.only-repo == true && '--skip-tag-check' || '' }}
|
||||
echo "::group::Release Info"
|
||||
python3 -m json.tool /tmp/release_info.json
|
||||
echo "::endgroup::"
|
||||
release_tag=$(jq -r '.release_tag' /tmp/release_info.json)
|
||||
commit_sha=$(jq -r '.commit_sha' /tmp/release_info.json)
|
||||
is_latest=$(jq -r '.latest' /tmp/release_info.json)
|
||||
echo "Release Tag: $release_tag"
|
||||
echo "RELEASE_TAG=$release_tag" >> "$GITHUB_ENV"
|
||||
echo "COMMIT_SHA=$commit_sha" >> "$GITHUB_ENV"
|
||||
if [ "$is_latest" == "true" ]; then
|
||||
echo "DOCKER_TAG_TYPE=release-latest" >> "$GITHUB_ENV"
|
||||
else
|
||||
echo "DOCKER_TAG_TYPE=release" >> "$GITHUB_ENV"
|
||||
fi
|
||||
- name: Download All Release Artifacts
|
||||
if: ${{ inputs.type == 'patch' }}
|
||||
shell: bash
|
||||
run: |
|
||||
python3 ./tests/ci/create_release.py --download-packages ${{ inputs.dry-run == true && '--dry-run' || '' }}
|
||||
- name: Push Git Tag for the Release
|
||||
if: ${{ ! inputs.only-repo }}
|
||||
shell: bash
|
||||
run: |
|
||||
python3 ./tests/ci/create_release.py --push-release-tag ${{ inputs.dry-run == true && '--dry-run' || '' }}
|
||||
- name: Push New Release Branch
|
||||
if: ${{ inputs.type == 'new' && ! inputs.only-repo }}
|
||||
shell: bash
|
||||
run: |
|
||||
python3 ./tests/ci/create_release.py --push-new-release-branch ${{ inputs.dry-run == true && '--dry-run' || '' }}
|
||||
- name: Bump CH Version and Update Contributors' List
|
||||
if: ${{ ! inputs.only-repo }}
|
||||
shell: bash
|
||||
run: |
|
||||
python3 ./tests/ci/create_release.py --create-bump-version-pr ${{ inputs.dry-run == true && '--dry-run' || '' }}
|
||||
- name: Bump Docker versions, Changelog, Security
|
||||
if: ${{ inputs.type == 'patch' && ! inputs.only-repo }}
|
||||
shell: bash
|
||||
run: |
|
||||
python3 ./tests/ci/create_release.py --set-progress-started --progress "update changelog, docker version, security"
|
||||
|
||||
git checkout master # in case WF started from feature branch
|
||||
echo "List versions"
|
||||
./utils/list-versions/list-versions.sh > ./utils/list-versions/version_date.tsv
|
||||
echo "Update docker version"
|
||||
./utils/list-versions/update-docker-version.sh
|
||||
echo "Generate ChangeLog"
|
||||
export CI=1
|
||||
docker run -u "${UID}:${GID}" -e PYTHONUNBUFFERED=1 -e CI=1 --network=host \
|
||||
--volume=".:/wd" --workdir="/wd" \
|
||||
clickhouse/style-test \
|
||||
./tests/ci/changelog.py -v --debug-helpers \
|
||||
--jobs=5 \
|
||||
--output="./docs/changelogs/${{ env.RELEASE_TAG }}.md" ${{ env.RELEASE_TAG }}
|
||||
git add ./docs/changelogs/${{ env.RELEASE_TAG }}.md
|
||||
echo "Generate Security"
|
||||
python3 ./utils/security-generator/generate_security.py > SECURITY.md
|
||||
git diff HEAD
|
||||
- name: Create ChangeLog PR
|
||||
if: ${{ inputs.type == 'patch' && ! inputs.dry-run && ! inputs.only-repo }}
|
||||
uses: peter-evans/create-pull-request@v6
|
||||
with:
|
||||
author: "robot-clickhouse <robot-clickhouse@users.noreply.github.com>"
|
||||
token: ${{ secrets.ROBOT_CLICKHOUSE_COMMIT_TOKEN }}
|
||||
committer: "robot-clickhouse <robot-clickhouse@users.noreply.github.com>"
|
||||
commit-message: Update version_date.tsv and changelogs after ${{ env.RELEASE_TAG }}
|
||||
branch: auto/${{ env.RELEASE_TAG }}
|
||||
base: master
|
||||
assignees: ${{ github.event.sender.login }} # assign the PR to the tag pusher
|
||||
delete-branch: true
|
||||
title: Update version_date.tsv and changelog after ${{ env.RELEASE_TAG }}
|
||||
labels: do not test
|
||||
body: |
|
||||
Update version_date.tsv and changelogs after ${{ env.RELEASE_TAG }}
|
||||
### Changelog category (leave one):
|
||||
- Not for changelog (changelog entry is not required)
|
||||
- name: Complete previous steps and Restore git state
|
||||
if: ${{ inputs.type == 'patch' && ! inputs.only-repo }}
|
||||
shell: bash
|
||||
run: |
|
||||
python3 ./tests/ci/create_release.py --set-progress-completed
|
||||
git reset --hard HEAD
|
||||
git checkout "$GITHUB_REF_NAME"
|
||||
- name: Create GH Release
|
||||
if: ${{ inputs.type == 'patch' && ! inputs.only-repo }}
|
||||
shell: bash
|
||||
run: |
|
||||
python3 ./tests/ci/create_release.py --create-gh-release ${{ inputs.dry-run == true && '--dry-run' || '' }}
|
||||
- name: Export TGZ Packages
|
||||
if: ${{ inputs.type == 'patch' }}
|
||||
shell: bash
|
||||
run: |
|
||||
python3 ./tests/ci/artifactory.py --export-tgz ${{ inputs.dry-run == true && '--dry-run' || '' }}
|
||||
- name: Test TGZ Packages
|
||||
if: ${{ inputs.type == 'patch' }}
|
||||
shell: bash
|
||||
run: |
|
||||
python3 ./tests/ci/artifactory.py --test-tgz ${{ inputs.dry-run == true && '--dry-run' || '' }}
|
||||
- name: Export RPM Packages
|
||||
if: ${{ inputs.type == 'patch' }}
|
||||
shell: bash
|
||||
run: |
|
||||
python3 ./tests/ci/artifactory.py --export-rpm ${{ inputs.dry-run == true && '--dry-run' || '' }}
|
||||
- name: Test RPM Packages
|
||||
if: ${{ inputs.type == 'patch' }}
|
||||
shell: bash
|
||||
run: |
|
||||
python3 ./tests/ci/artifactory.py --test-rpm ${{ inputs.dry-run == true && '--dry-run' || '' }}
|
||||
- name: Export Debian Packages
|
||||
if: ${{ inputs.type == 'patch' }}
|
||||
shell: bash
|
||||
run: |
|
||||
python3 ./tests/ci/artifactory.py --export-debian ${{ inputs.dry-run == true && '--dry-run' || '' }}
|
||||
- name: Test Debian Packages
|
||||
if: ${{ inputs.type == 'patch' }}
|
||||
shell: bash
|
||||
run: |
|
||||
python3 ./tests/ci/artifactory.py --test-debian ${{ inputs.dry-run == true && '--dry-run' || '' }}
|
||||
- name: Docker clickhouse/clickhouse-server building
|
||||
if: ${{ inputs.type == 'patch' }}
|
||||
shell: bash
|
||||
run: |
|
||||
cd "./tests/ci"
|
||||
python3 ./create_release.py --set-progress-started --progress "docker server release"
|
||||
export CHECK_NAME="Docker server image"
|
||||
python3 docker_server.py --tag-type ${{ env.DOCKER_TAG_TYPE }} --version ${{ env.RELEASE_TAG }} --check-name "$CHECK_NAME" --sha ${{ env.COMMIT_SHA }} ${{ ! inputs.dry-run && '--push' || '' }}
|
||||
python3 ./create_release.py --set-progress-completed
|
||||
- name: Docker clickhouse/clickhouse-keeper building
|
||||
if: ${{ inputs.type == 'patch' }}
|
||||
shell: bash
|
||||
run: |
|
||||
cd "./tests/ci"
|
||||
python3 ./create_release.py --set-progress-started --progress "docker keeper release"
|
||||
export CHECK_NAME="Docker keeper image"
|
||||
python3 docker_server.py --tag-type ${{ env.DOCKER_TAG_TYPE }} --version ${{ env.RELEASE_TAG }} --check-name "$CHECK_NAME" --sha ${{ env.COMMIT_SHA }} ${{ ! inputs.dry-run && '--push' || '' }}
|
||||
python3 ./create_release.py --set-progress-completed
|
||||
- name: Update release info. Merge created PRs
|
||||
shell: bash
|
||||
run: |
|
||||
python3 ./tests/ci/create_release.py --merge-prs ${{ inputs.dry-run == true && '--dry-run' || '' }}
|
||||
- name: Set current Release progress to Completed with OK
|
||||
shell: bash
|
||||
run: |
|
||||
# dummy stage to finalize release info with "progress: completed; status: OK"
|
||||
python3 ./tests/ci/create_release.py --set-progress-started --progress "completed"
|
||||
python3 ./tests/ci/create_release.py --set-progress-completed
|
||||
- name: Post Slack Message
|
||||
if: ${{ !cancelled() }}
|
||||
shell: bash
|
||||
run: |
|
||||
python3 ./tests/ci/create_release.py --post-status ${{ inputs.dry-run == true && '--dry-run' || '' }}
|
||||
|
70
.github/workflows/jepsen.yml
vendored
70
.github/workflows/jepsen.yml
vendored
@ -9,19 +9,65 @@ on: # yamllint disable-line rule:truthy
|
||||
- cron: '0 */6 * * *'
|
||||
workflow_dispatch:
|
||||
jobs:
|
||||
RunConfig:
|
||||
runs-on: [self-hosted, style-checker-aarch64]
|
||||
outputs:
|
||||
data: ${{ steps.runconfig.outputs.CI_DATA }}
|
||||
steps:
|
||||
- name: DebugInfo
|
||||
uses: hmarr/debug-action@f7318c783045ac39ed9bb497e22ce835fdafbfe6
|
||||
- name: Check out repository code
|
||||
uses: ClickHouse/checkout@v1
|
||||
with:
|
||||
clear-repository: true # to ensure correct digests
|
||||
fetch-depth: 0 # to get version
|
||||
filter: tree:0
|
||||
- name: PrepareRunConfig
|
||||
id: runconfig
|
||||
run: |
|
||||
echo "::group::configure CI run"
|
||||
python3 "$GITHUB_WORKSPACE/tests/ci/ci.py" --configure --workflow "$GITHUB_WORKFLOW" --outfile ${{ runner.temp }}/ci_run_data.json
|
||||
echo "::endgroup::"
|
||||
|
||||
echo "::group::CI run configure results"
|
||||
python3 -m json.tool ${{ runner.temp }}/ci_run_data.json
|
||||
echo "::endgroup::"
|
||||
{
|
||||
echo 'CI_DATA<<EOF'
|
||||
cat ${{ runner.temp }}/ci_run_data.json
|
||||
echo 'EOF'
|
||||
} >> "$GITHUB_OUTPUT"
|
||||
KeeperJepsenRelease:
|
||||
uses: ./.github/workflows/reusable_simple_job.yml
|
||||
needs: [RunConfig]
|
||||
uses: ./.github/workflows/reusable_test.yml
|
||||
with:
|
||||
test_name: Jepsen keeper check
|
||||
runner_type: style-checker
|
||||
report_required: true
|
||||
test_name: ClickHouse Keeper Jepsen
|
||||
runner_type: style-checker-aarch64
|
||||
data: ${{ needs.RunConfig.outputs.data }}
|
||||
run_command: |
|
||||
python3 jepsen_check.py keeper
|
||||
# ServerJepsenRelease:
|
||||
# uses: ./.github/workflows/reusable_simple_job.yml
|
||||
# with:
|
||||
# test_name: Jepsen server check
|
||||
# runner_type: style-checker
|
||||
# run_command: |
|
||||
# cd "$REPO_COPY/tests/ci"
|
||||
# python3 jepsen_check.py server
|
||||
ServerJepsenRelease:
|
||||
if: false # skip for server
|
||||
needs: [RunConfig]
|
||||
uses: ./.github/workflows/reusable_test.yml
|
||||
with:
|
||||
test_name: ClickHouse Server Jepsen
|
||||
runner_type: style-checker-aarch64
|
||||
data: ${{ needs.RunConfig.outputs.data }}
|
||||
run_command: |
|
||||
python3 jepsen_check.py server
|
||||
CheckWorkflow:
|
||||
if: ${{ !cancelled() }}
|
||||
needs: [RunConfig, ServerJepsenRelease, KeeperJepsenRelease]
|
||||
runs-on: [self-hosted, style-checker-aarch64]
|
||||
steps:
|
||||
- name: Check out repository code
|
||||
uses: ClickHouse/checkout@v1
|
||||
- name: Check Workflow results
|
||||
if: ${{ !cancelled() }}
|
||||
run: |
|
||||
export WORKFLOW_RESULT_FILE="/tmp/workflow_results.json"
|
||||
cat > "$WORKFLOW_RESULT_FILE" << 'EOF'
|
||||
${{ toJson(needs) }}
|
||||
EOF
|
||||
python3 ./tests/ci/ci_buddy.py --check-wf-status
|
||||
|
62
.github/workflows/master.yml
vendored
62
.github/workflows/master.yml
vendored
@ -33,10 +33,6 @@ jobs:
|
||||
# cd "$GITHUB_WORKSPACE/tests/ci"
|
||||
# echo "Testing the main ci directory"
|
||||
# python3 -m unittest discover -s . -p 'test_*.py'
|
||||
# for dir in *_lambda/; do
|
||||
# echo "Testing $dir"
|
||||
# python3 -m unittest discover -s "$dir" -p 'test_*.py'
|
||||
# done
|
||||
- name: PrepareRunConfig
|
||||
id: runconfig
|
||||
run: |
|
||||
@ -58,7 +54,7 @@ jobs:
|
||||
# BuildDockers:
|
||||
# needs: [RunConfig]
|
||||
# if: ${{ !failure() && !cancelled() }}
|
||||
# uses: ./.github/workflows/reusable_docker.yml
|
||||
# uses: ./.github/workflows/docker_test_images.yml
|
||||
# with:
|
||||
# data: ${{ needs.RunConfig.outputs.data }}
|
||||
# StyleCheck:
|
||||
@ -97,21 +93,21 @@ jobs:
|
||||
with:
|
||||
stage: Builds_2
|
||||
data: ${{ needs.RunConfig.outputs.data }}
|
||||
Tests_2:
|
||||
Tests_2_ww:
|
||||
needs: [RunConfig, Builds_2]
|
||||
if: ${{ !failure() && !cancelled() && contains(fromJson(needs.RunConfig.outputs.data).stages_data.stages_to_do, 'Tests_2_ww') }}
|
||||
uses: ./.github/workflows/reusable_test_stage.yml
|
||||
with:
|
||||
stage: Tests_2_ww
|
||||
data: ${{ needs.RunConfig.outputs.data }}
|
||||
Tests_2:
|
||||
# Test_3 should not wait for Test_1/Test_2 and should not be blocked by them on master branch since all jobs need to run there.
|
||||
needs: [RunConfig, Builds_1]
|
||||
if: ${{ !failure() && !cancelled() && contains(fromJson(needs.RunConfig.outputs.data).stages_data.stages_to_do, 'Tests_2') }}
|
||||
uses: ./.github/workflows/reusable_test_stage.yml
|
||||
with:
|
||||
stage: Tests_2
|
||||
data: ${{ needs.RunConfig.outputs.data }}
|
||||
Tests_3:
|
||||
# Test_3 should not wait for Test_1/Test_2 and should not be blocked by them on master branch since all jobs need to run there.
|
||||
needs: [RunConfig, Builds_1]
|
||||
if: ${{ !failure() && !cancelled() && contains(fromJson(needs.RunConfig.outputs.data).stages_data.stages_to_do, 'Tests_3') }}
|
||||
uses: ./.github/workflows/reusable_test_stage.yml
|
||||
with:
|
||||
stage: Tests_3
|
||||
data: ${{ needs.RunConfig.outputs.data }}
|
||||
|
||||
################################# Reports #################################
|
||||
# Reports should run even if Builds_1/2 fail - run them separately, not in Tests_1/2/3
|
||||
@ -125,37 +121,9 @@ jobs:
|
||||
runner_type: style-checker-aarch64
|
||||
data: ${{ needs.RunConfig.outputs.data }}
|
||||
|
||||
MarkReleaseReady:
|
||||
if: ${{ !failure() && !cancelled() }}
|
||||
needs: [RunConfig, Builds_1, Builds_2]
|
||||
runs-on: [self-hosted, style-checker-aarch64]
|
||||
steps:
|
||||
- name: Debug
|
||||
run: |
|
||||
echo need with different filters
|
||||
cat << 'EOF'
|
||||
${{ toJSON(needs) }}
|
||||
${{ toJSON(needs.*.result) }}
|
||||
no failures ${{ !contains(needs.*.result, 'failure') }}
|
||||
no skips ${{ !contains(needs.*.result, 'skipped') }}
|
||||
no both ${{ !(contains(needs.*.result, 'skipped') || contains(needs.*.result, 'failure')) }}
|
||||
EOF
|
||||
- name: Not ready
|
||||
# fail the job to be able to restart it
|
||||
if: ${{ contains(needs.*.result, 'skipped') || contains(needs.*.result, 'failure') }}
|
||||
run: exit 1
|
||||
- name: Check out repository code
|
||||
if: ${{ ! (contains(needs.*.result, 'skipped') || contains(needs.*.result, 'failure')) }}
|
||||
uses: ClickHouse/checkout@v1
|
||||
- name: Mark Commit Release Ready
|
||||
if: ${{ ! (contains(needs.*.result, 'skipped') || contains(needs.*.result, 'failure')) }}
|
||||
run: |
|
||||
cd "$GITHUB_WORKSPACE/tests/ci"
|
||||
python3 mark_release_ready.py
|
||||
|
||||
FinishCheck:
|
||||
if: ${{ !cancelled() }}
|
||||
needs: [RunConfig, Builds_1, Builds_2, Builds_Report, Tests_1, Tests_2, Tests_3]
|
||||
needs: [RunConfig, Builds_1, Builds_2, Builds_Report, Tests_1, Tests_2_ww, Tests_2]
|
||||
runs-on: [self-hosted, style-checker-aarch64]
|
||||
steps:
|
||||
- name: Check out repository code
|
||||
@ -164,3 +132,11 @@ jobs:
|
||||
run: |
|
||||
cd "$GITHUB_WORKSPACE/tests/ci"
|
||||
python3 finish_check.py --wf-status ${{ contains(needs.*.result, 'failure') && 'failure' || 'success' }}
|
||||
- name: Check Workflow results
|
||||
if: ${{ !cancelled() }}
|
||||
run: |
|
||||
export WORKFLOW_RESULT_FILE="/tmp/workflow_results.json"
|
||||
cat > "$WORKFLOW_RESULT_FILE" << 'EOF'
|
||||
${{ toJson(needs) }}
|
||||
EOF
|
||||
python3 ./tests/ci/ci_buddy.py --check-wf-status
|
||||
|
23
.github/workflows/merge_queue.yml
vendored
23
.github/workflows/merge_queue.yml
vendored
@ -30,10 +30,6 @@ jobs:
|
||||
cd "$GITHUB_WORKSPACE/tests/ci"
|
||||
echo "Testing the main ci directory"
|
||||
python3 -m unittest discover -s . -p 'test_*.py'
|
||||
for dir in *_lambda/; do
|
||||
echo "Testing $dir"
|
||||
python3 -m unittest discover -s "$dir" -p 'test_*.py'
|
||||
done
|
||||
- name: PrepareRunConfig
|
||||
id: runconfig
|
||||
run: |
|
||||
@ -51,7 +47,7 @@ jobs:
|
||||
BuildDockers:
|
||||
needs: [RunConfig]
|
||||
if: ${{ !failure() && !cancelled() && toJson(fromJson(needs.RunConfig.outputs.data).docker_data.missing_multi) != '[]' }}
|
||||
uses: ./.github/workflows/reusable_docker.yml
|
||||
uses: ./.github/workflows/docker_test_images.yml
|
||||
with:
|
||||
data: ${{ needs.RunConfig.outputs.data }}
|
||||
StyleCheck:
|
||||
@ -97,7 +93,7 @@ jobs:
|
||||
data: ${{ needs.RunConfig.outputs.data }}
|
||||
|
||||
CheckReadyForMerge:
|
||||
if: ${{ !cancelled() && needs.StyleCheck.result == 'success' }}
|
||||
if: ${{ !cancelled() }}
|
||||
# Test_2 or Test_3 must not have jobs required for Mergeable check
|
||||
needs: [RunConfig, BuildDockers, StyleCheck, FastTest, Builds_1, Tests_1]
|
||||
runs-on: [self-hosted, style-checker-aarch64]
|
||||
@ -105,6 +101,19 @@ jobs:
|
||||
- name: Check out repository code
|
||||
uses: ClickHouse/checkout@v1
|
||||
- name: Check and set merge status
|
||||
if: ${{ needs.StyleCheck.result == 'success' }}
|
||||
run: |
|
||||
export WORKFLOW_RESULT_FILE="/tmp/workflow_results.json"
|
||||
cat > "$WORKFLOW_RESULT_FILE" << 'EOF'
|
||||
${{ toJson(needs) }}
|
||||
EOF
|
||||
cd "$GITHUB_WORKSPACE/tests/ci"
|
||||
python3 merge_pr.py --set-ci-status --wf-status ${{ contains(needs.*.result, 'failure') && 'failure' || 'success' }}
|
||||
python3 merge_pr.py --set-ci-status
|
||||
- name: Check Workflow results
|
||||
if: ${{ !cancelled() }}
|
||||
run: |
|
||||
export WORKFLOW_RESULT_FILE="/tmp/workflow_results.json"
|
||||
cat > "$WORKFLOW_RESULT_FILE" << 'EOF'
|
||||
${{ toJson(needs) }}
|
||||
EOF
|
||||
python3 ./tests/ci/ci_buddy.py --check-wf-status
|
||||
|
17
.github/workflows/nightly.yml
vendored
17
.github/workflows/nightly.yml
vendored
@ -40,7 +40,22 @@ jobs:
|
||||
} >> "$GITHUB_OUTPUT"
|
||||
BuildDockers:
|
||||
needs: [RunConfig]
|
||||
uses: ./.github/workflows/reusable_docker.yml
|
||||
uses: ./.github/workflows/docker_test_images.yml
|
||||
with:
|
||||
data: "${{ needs.RunConfig.outputs.data }}"
|
||||
set_latest: true
|
||||
CheckWorkflow:
|
||||
if: ${{ !cancelled() }}
|
||||
needs: [RunConfig, BuildDockers]
|
||||
runs-on: [self-hosted, style-checker-aarch64]
|
||||
steps:
|
||||
- name: Check out repository code
|
||||
uses: ClickHouse/checkout@v1
|
||||
- name: Check Workflow results
|
||||
if: ${{ !cancelled() }}
|
||||
run: |
|
||||
export WORKFLOW_RESULT_FILE="/tmp/workflow_results.json"
|
||||
cat > "$WORKFLOW_RESULT_FILE" << 'EOF'
|
||||
${{ toJson(needs) }}
|
||||
EOF
|
||||
python3 ./tests/ci/ci_buddy.py --check-wf-status
|
||||
|
53
.github/workflows/pull_request.yml
vendored
53
.github/workflows/pull_request.yml
vendored
@ -48,10 +48,6 @@ jobs:
|
||||
cd "$GITHUB_WORKSPACE/tests/ci"
|
||||
echo "Testing the main ci directory"
|
||||
python3 -m unittest discover -s . -p 'test_*.py'
|
||||
for dir in *_lambda/; do
|
||||
echo "Testing $dir"
|
||||
python3 -m unittest discover -s "$dir" -p 'test_*.py'
|
||||
done
|
||||
- name: PrepareRunConfig
|
||||
id: runconfig
|
||||
run: |
|
||||
@ -72,7 +68,7 @@ jobs:
|
||||
BuildDockers:
|
||||
needs: [RunConfig]
|
||||
if: ${{ !failure() && !cancelled() && toJson(fromJson(needs.RunConfig.outputs.data).docker_data.missing_multi) != '[]' }}
|
||||
uses: ./.github/workflows/reusable_docker.yml
|
||||
uses: ./.github/workflows/docker_test_images.yml
|
||||
with:
|
||||
data: ${{ needs.RunConfig.outputs.data }}
|
||||
StyleCheck:
|
||||
@ -127,27 +123,32 @@ jobs:
|
||||
stage: Builds_2
|
||||
data: ${{ needs.RunConfig.outputs.data }}
|
||||
# stage for running non-required checks without being blocked by required checks (Test_1) if corresponding settings is selected
|
||||
Tests_2:
|
||||
Tests_2_ww:
|
||||
needs: [RunConfig, Builds_1]
|
||||
if: ${{ !failure() && !cancelled() && contains(fromJson(needs.RunConfig.outputs.data).stages_data.stages_to_do, 'Tests_2_ww') }}
|
||||
uses: ./.github/workflows/reusable_test_stage.yml
|
||||
with:
|
||||
stage: Tests_2_ww
|
||||
data: ${{ needs.RunConfig.outputs.data }}
|
||||
Tests_2:
|
||||
needs: [RunConfig, Builds_1, Tests_1]
|
||||
if: ${{ !failure() && !cancelled() && contains(fromJson(needs.RunConfig.outputs.data).stages_data.stages_to_do, 'Tests_2') }}
|
||||
uses: ./.github/workflows/reusable_test_stage.yml
|
||||
with:
|
||||
stage: Tests_2
|
||||
data: ${{ needs.RunConfig.outputs.data }}
|
||||
Tests_3:
|
||||
needs: [RunConfig, Builds_1, Tests_1]
|
||||
if: ${{ !failure() && !cancelled() && contains(fromJson(needs.RunConfig.outputs.data).stages_data.stages_to_do, 'Tests_3') }}
|
||||
uses: ./.github/workflows/reusable_test_stage.yml
|
||||
with:
|
||||
stage: Tests_3
|
||||
data: ${{ needs.RunConfig.outputs.data }}
|
||||
|
||||
################################# Reports #################################
|
||||
# Reports should run even if Builds_1/2 fail - run them separately (not in Tests_1/2/3)
|
||||
Builds_Report:
|
||||
# run report check for failed builds to indicate the CI error
|
||||
if: ${{ !cancelled() && needs.RunConfig.result == 'success' && contains(fromJson(needs.RunConfig.outputs.data).jobs_data.jobs_to_do, 'Builds') }}
|
||||
needs: [RunConfig, StyleCheck, Builds_1, Builds_2]
|
||||
if: ${{ !cancelled()
|
||||
&& needs.RunConfig.result == 'success'
|
||||
&& needs.StyleCheck.result != 'failure'
|
||||
&& needs.FastTest.result != 'failure'
|
||||
&& needs.BuildDockers.result != 'failure'
|
||||
&& contains(fromJson(needs.RunConfig.outputs.data).jobs_data.jobs_to_do, 'Builds') }}
|
||||
needs: [RunConfig, BuildDockers, StyleCheck, FastTest, Builds_1, Builds_2]
|
||||
uses: ./.github/workflows/reusable_test.yml
|
||||
with:
|
||||
test_name: Builds
|
||||
@ -155,9 +156,10 @@ jobs:
|
||||
data: ${{ needs.RunConfig.outputs.data }}
|
||||
|
||||
CheckReadyForMerge:
|
||||
if: ${{ !cancelled() && needs.StyleCheck.result == 'success' }}
|
||||
# Test_2 or Test_3 must not have jobs required for Mergeable check
|
||||
needs: [RunConfig, BuildDockers, StyleCheck, FastTest, Builds_1, Builds_2, Builds_Report, Tests_1]
|
||||
if: ${{ !cancelled() }}
|
||||
# Test_2 or Test_3 do not have the jobs required for Mergeable check,
|
||||
# however, set them as "needs" to get all checks results before the automatic merge occurs.
|
||||
needs: [RunConfig, BuildDockers, StyleCheck, FastTest, Builds_1, Builds_2, Builds_Report, Tests_1, Tests_2_ww, Tests_2]
|
||||
runs-on: [self-hosted, style-checker-aarch64]
|
||||
steps:
|
||||
- name: Check out repository code
|
||||
@ -165,15 +167,24 @@ jobs:
|
||||
with:
|
||||
filter: tree:0
|
||||
- name: Check and set merge status
|
||||
if: ${{ needs.StyleCheck.result == 'success' }}
|
||||
run: |
|
||||
cd "$GITHUB_WORKSPACE/tests/ci"
|
||||
python3 merge_pr.py --set-ci-status --wf-status ${{ contains(needs.*.result, 'failure') && 'failure' || 'success' }}
|
||||
export WORKFLOW_RESULT_FILE="/tmp/workflow_results.json"
|
||||
cat > "$WORKFLOW_RESULT_FILE" << 'EOF'
|
||||
${{ toJson(needs) }}
|
||||
EOF
|
||||
python3 merge_pr.py --set-ci-status
|
||||
- name: Check Workflow results
|
||||
uses: ./.github/actions/check_workflow
|
||||
with:
|
||||
needs: ${{ toJson(needs) }}
|
||||
|
||||
################################# Stage Final #################################
|
||||
#
|
||||
FinishCheck:
|
||||
if: ${{ !cancelled() }}
|
||||
needs: [RunConfig, BuildDockers, StyleCheck, FastTest, Builds_1, Builds_2, Builds_Report, Tests_1, Tests_2, Tests_3]
|
||||
if: ${{ !failure() && !cancelled() }}
|
||||
needs: [RunConfig, BuildDockers, StyleCheck, FastTest, Builds_1, Builds_2, Builds_Report, Tests_1, Tests_2_ww, Tests_2]
|
||||
runs-on: [self-hosted, style-checker-aarch64]
|
||||
steps:
|
||||
- name: Check out repository code
|
||||
|
69
.github/workflows/release.yml
vendored
69
.github/workflows/release.yml
vendored
@ -1,69 +0,0 @@
|
||||
name: PublishedReleaseCI
|
||||
# - Gets artifacts from S3
|
||||
# - Sends it to JFROG Artifactory
|
||||
# - Adds them to the release assets
|
||||
|
||||
on: # yamllint disable-line rule:truthy
|
||||
release:
|
||||
types:
|
||||
- published
|
||||
workflow_dispatch:
|
||||
inputs:
|
||||
tag:
|
||||
description: 'Release tag'
|
||||
required: true
|
||||
type: string
|
||||
|
||||
jobs:
|
||||
ReleasePublish:
|
||||
runs-on: [self-hosted, style-checker]
|
||||
steps:
|
||||
- name: Set tag from input
|
||||
if: github.event_name == 'workflow_dispatch'
|
||||
run: |
|
||||
echo "GITHUB_TAG=${{ github.event.inputs.tag }}" >> "$GITHUB_ENV"
|
||||
- name: Set tag from REF
|
||||
if: github.event_name == 'release'
|
||||
run: |
|
||||
echo "GITHUB_TAG=${GITHUB_REF#refs/tags/}" >> "$GITHUB_ENV"
|
||||
- name: Deploy packages and assets
|
||||
run: |
|
||||
curl --silent --data '' --no-buffer \
|
||||
'${{ secrets.PACKAGES_RELEASE_URL }}/release/'"${GITHUB_TAG}"'?binary=binary_darwin&binary=binary_darwin_aarch64&sync=true'
|
||||
############################################################################################
|
||||
##################################### Docker images #######################################
|
||||
############################################################################################
|
||||
DockerServerImages:
|
||||
runs-on: [self-hosted, style-checker]
|
||||
steps:
|
||||
- name: Set tag from input
|
||||
if: github.event_name == 'workflow_dispatch'
|
||||
run: |
|
||||
echo "GITHUB_TAG=${{ github.event.inputs.tag }}" >> "$GITHUB_ENV"
|
||||
- name: Set tag from REF
|
||||
if: github.event_name == 'release'
|
||||
run: |
|
||||
echo "GITHUB_TAG=${GITHUB_REF#refs/tags/}" >> "$GITHUB_ENV"
|
||||
- name: Check out repository code
|
||||
uses: ClickHouse/checkout@v1
|
||||
with:
|
||||
clear-repository: true
|
||||
fetch-depth: 0 # otherwise we will have no version info
|
||||
filter: tree:0
|
||||
ref: ${{ env.GITHUB_TAG }}
|
||||
- name: Check docker clickhouse/clickhouse-server building
|
||||
run: |
|
||||
cd "$GITHUB_WORKSPACE/tests/ci"
|
||||
export CHECK_NAME="Docker server image"
|
||||
python3 docker_server.py --release-type auto --version "$GITHUB_TAG" --check-name "$CHECK_NAME" --push
|
||||
- name: Check docker clickhouse/clickhouse-keeper building
|
||||
run: |
|
||||
cd "$GITHUB_WORKSPACE/tests/ci"
|
||||
export CHECK_NAME="Docker keeper image"
|
||||
python3 docker_server.py --release-type auto --version "$GITHUB_TAG" --check-name "$CHECK_NAME" --push
|
||||
- name: Cleanup
|
||||
if: always()
|
||||
run: |
|
||||
docker ps --quiet | xargs --no-run-if-empty docker kill ||:
|
||||
docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||:
|
||||
sudo rm -fr "$TEMP_PATH"
|
22
.github/workflows/release_branches.yml
vendored
22
.github/workflows/release_branches.yml
vendored
@ -33,10 +33,6 @@ jobs:
|
||||
cd "$GITHUB_WORKSPACE/tests/ci"
|
||||
echo "Testing the main ci directory"
|
||||
python3 -m unittest discover -s . -p 'test_*.py'
|
||||
for dir in *_lambda/; do
|
||||
echo "Testing $dir"
|
||||
python3 -m unittest discover -s "$dir" -p 'test_*.py'
|
||||
done
|
||||
- name: PrepareRunConfig
|
||||
id: runconfig
|
||||
run: |
|
||||
@ -57,7 +53,7 @@ jobs:
|
||||
BuildDockers:
|
||||
needs: [RunConfig]
|
||||
if: ${{ !failure() && !cancelled() }}
|
||||
uses: ./.github/workflows/reusable_docker.yml
|
||||
uses: ./.github/workflows/docker_test_images.yml
|
||||
with:
|
||||
data: ${{ needs.RunConfig.outputs.data }}
|
||||
CompatibilityCheckX86:
|
||||
@ -445,8 +441,9 @@ jobs:
|
||||
runner_type: stress-tester
|
||||
data: ${{ needs.RunConfig.outputs.data }}
|
||||
FinishCheck:
|
||||
if: ${{ !failure() && !cancelled() }}
|
||||
if: ${{ !cancelled() }}
|
||||
needs:
|
||||
- RunConfig
|
||||
- DockerServerImage
|
||||
- DockerKeeperImage
|
||||
- Builds_Report
|
||||
@ -482,9 +479,16 @@ jobs:
|
||||
with:
|
||||
clear-repository: true
|
||||
- name: Finish label
|
||||
if: ${{ !failure() }}
|
||||
run: |
|
||||
cd "$GITHUB_WORKSPACE/tests/ci"
|
||||
# update mergeable check
|
||||
python3 merge_pr.py --set-ci-status --wf-status ${{ contains(needs.*.result, 'failure') && 'failure' || 'success' }}
|
||||
# update overall ci report
|
||||
python3 finish_check.py --wf-status ${{ contains(needs.*.result, 'failure') && 'failure' || 'success' }}
|
||||
- name: Check Workflow results
|
||||
if: ${{ !cancelled() }}
|
||||
run: |
|
||||
export WORKFLOW_RESULT_FILE="/tmp/workflow_results.json"
|
||||
cat > "$WORKFLOW_RESULT_FILE" << 'EOF'
|
||||
${{ toJson(needs) }}
|
||||
EOF
|
||||
|
||||
python3 ./tests/ci/ci_buddy.py --check-wf-status
|
||||
|
2
.github/workflows/reusable_test.yml
vendored
2
.github/workflows/reusable_test.yml
vendored
@ -102,6 +102,8 @@ jobs:
|
||||
--job-name '${{inputs.test_name}}' \
|
||||
--run \
|
||||
--run-command '''${{inputs.run_command}}'''
|
||||
# shellcheck disable=SC2319
|
||||
echo "JOB_EXIT_CODE=$?" >> "$GITHUB_ENV"
|
||||
- name: Post run
|
||||
if: ${{ !cancelled() }}
|
||||
run: |
|
||||
|
74
.github/workflows/tags_stable.yml
vendored
74
.github/workflows/tags_stable.yml
vendored
@ -1,74 +0,0 @@
|
||||
name: TagsStableWorkflow
|
||||
# - Gets artifacts from S3
|
||||
# - Sends it to JFROG Artifactory
|
||||
# - Adds them to the release assets
|
||||
|
||||
env:
|
||||
# Force the stdout and stderr streams to be unbuffered
|
||||
PYTHONUNBUFFERED: 1
|
||||
|
||||
on: # yamllint disable-line rule:truthy
|
||||
push:
|
||||
tags:
|
||||
- 'v*-prestable'
|
||||
- 'v*-stable'
|
||||
- 'v*-lts'
|
||||
workflow_dispatch:
|
||||
inputs:
|
||||
tag:
|
||||
description: 'Test tag'
|
||||
required: true
|
||||
type: string
|
||||
|
||||
|
||||
jobs:
|
||||
UpdateVersions:
|
||||
runs-on: [self-hosted, style-checker]
|
||||
steps:
|
||||
- name: Set test tag
|
||||
if: github.event_name == 'workflow_dispatch'
|
||||
run: |
|
||||
echo "GITHUB_TAG=${{ github.event.inputs.tag }}" >> "$GITHUB_ENV"
|
||||
- name: Get tag name
|
||||
if: github.event_name != 'workflow_dispatch'
|
||||
run: |
|
||||
echo "GITHUB_TAG=${GITHUB_REF#refs/tags/}" >> "$GITHUB_ENV"
|
||||
- name: Check out repository code
|
||||
uses: ClickHouse/checkout@v1
|
||||
with:
|
||||
ref: master
|
||||
fetch-depth: 0
|
||||
filter: tree:0
|
||||
- name: Update versions, docker version, changelog, security
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.ROBOT_CLICKHOUSE_COMMIT_TOKEN }}
|
||||
run: |
|
||||
./utils/list-versions/list-versions.sh > ./utils/list-versions/version_date.tsv
|
||||
./utils/list-versions/update-docker-version.sh
|
||||
GID=$(id -g "${UID}")
|
||||
# --network=host and CI=1 are required for the S3 access from a container
|
||||
docker run -u "${UID}:${GID}" -e PYTHONUNBUFFERED=1 -e CI=1 --network=host \
|
||||
--volume="${GITHUB_WORKSPACE}:/ClickHouse" clickhouse/style-test \
|
||||
/ClickHouse/tests/ci/changelog.py -v --debug-helpers \
|
||||
--gh-user-or-token="$GITHUB_TOKEN" --jobs=5 \
|
||||
--output="/ClickHouse/docs/changelogs/${GITHUB_TAG}.md" "${GITHUB_TAG}"
|
||||
git add "./docs/changelogs/${GITHUB_TAG}.md"
|
||||
python3 ./utils/security-generator/generate_security.py > SECURITY.md
|
||||
git diff HEAD
|
||||
- name: Create Pull Request
|
||||
uses: peter-evans/create-pull-request@v6
|
||||
with:
|
||||
author: "robot-clickhouse <robot-clickhouse@users.noreply.github.com>"
|
||||
token: ${{ secrets.ROBOT_CLICKHOUSE_COMMIT_TOKEN }}
|
||||
committer: "robot-clickhouse <robot-clickhouse@users.noreply.github.com>"
|
||||
commit-message: Update version_date.tsv and changelogs after ${{ env.GITHUB_TAG }}
|
||||
branch: auto/${{ env.GITHUB_TAG }}
|
||||
assignees: ${{ github.event.sender.login }} # assign the PR to the tag pusher
|
||||
delete-branch: true
|
||||
title: Update version_date.tsv and changelogs after ${{ env.GITHUB_TAG }}
|
||||
labels: do not test
|
||||
body: |
|
||||
Update version_date.tsv and changelogs after ${{ env.GITHUB_TAG }}
|
||||
|
||||
### Changelog category (leave one):
|
||||
- Not for changelog (changelog entry is not required)
|
5
.gitmodules
vendored
5
.gitmodules
vendored
@ -341,7 +341,7 @@
|
||||
url = https://github.com/graphitemaster/incbin.git
|
||||
[submodule "contrib/usearch"]
|
||||
path = contrib/usearch
|
||||
url = https://github.com/unum-cloud/usearch.git
|
||||
url = https://github.com/ClickHouse/usearch.git
|
||||
[submodule "contrib/SimSIMD"]
|
||||
path = contrib/SimSIMD
|
||||
url = https://github.com/ashvardanian/SimSIMD.git
|
||||
@ -372,3 +372,6 @@
|
||||
[submodule "contrib/double-conversion"]
|
||||
path = contrib/double-conversion
|
||||
url = https://github.com/ClickHouse/double-conversion.git
|
||||
[submodule "contrib/numactl"]
|
||||
path = contrib/numactl
|
||||
url = https://github.com/ClickHouse/numactl.git
|
||||
|
12
.yamllint
12
.yamllint
@ -5,12 +5,12 @@ rules:
|
||||
indentation:
|
||||
level: warning
|
||||
indent-sequences: consistent
|
||||
line-length:
|
||||
# there are:
|
||||
# - bash -c "", so this is OK
|
||||
# - yaml in tests
|
||||
max: 1000
|
||||
level: warning
|
||||
comments:
|
||||
min-spaces-from-content: 1
|
||||
document-start: disable
|
||||
colons: disable
|
||||
indentation: disable
|
||||
line-length: disable
|
||||
trailing-spaces: disable
|
||||
truthy: disable
|
||||
new-line-at-end-of-file: disable
|
||||
|
168
CHANGELOG.md
168
CHANGELOG.md
@ -1,4 +1,5 @@
|
||||
### Table of Contents
|
||||
**[ClickHouse release v24.7, 2024-07-30](#247)**<br/>
|
||||
**[ClickHouse release v24.6, 2024-07-01](#246)**<br/>
|
||||
**[ClickHouse release v24.5, 2024-05-30](#245)**<br/>
|
||||
**[ClickHouse release v24.4, 2024-04-30](#244)**<br/>
|
||||
@ -9,6 +10,173 @@
|
||||
|
||||
# 2024 Changelog
|
||||
|
||||
### <a id="247"></a> ClickHouse release 24.7, 2024-07-30
|
||||
|
||||
#### Backward Incompatible Change
|
||||
* Forbid `CRATE MATERIALIZED VIEW ... ENGINE Replicated*MergeTree POPULATE AS SELECT ...` with Replicated databases. [#63963](https://github.com/ClickHouse/ClickHouse/pull/63963) ([vdimir](https://github.com/vdimir)).
|
||||
* `clickhouse-keeper-client` will only accept paths in string literals, such as `ls '/hello/world'`, not bare strings such as `ls /hello/world`. [#65494](https://github.com/ClickHouse/ClickHouse/pull/65494) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* Metric `KeeperOutstandingRequets` was renamed to `KeeperOutstandingRequests`. [#66206](https://github.com/ClickHouse/ClickHouse/pull/66206) ([Robert Schulze](https://github.com/rschu1ze)).
|
||||
* Remove `is_deterministic` field from the `system.functions` table. [#66630](https://github.com/ClickHouse/ClickHouse/pull/66630) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* Function `tuple` will now try to construct named tuples in query (controlled by `enable_named_columns_in_function_tuple`). Introduce function `tupleNames` to extract names from tuples. [#54881](https://github.com/ClickHouse/ClickHouse/pull/54881) ([Amos Bird](https://github.com/amosbird)).
|
||||
* Change how deduplication for Materialized Views works. Fixed a lot of cases like: - on destination table: data is split for 2 or more blocks and that blocks is considered as duplicate when that block is inserted in parallel. - on MV destination table: the equal blocks are deduplicated, that happens when MV often produces equal data as a result for different input data due to performing aggregation. - on MV destination table: the equal blocks which comes from different MV are deduplicated. [#61601](https://github.com/ClickHouse/ClickHouse/pull/61601) ([Sema Checherinda](https://github.com/CheSema)).
|
||||
|
||||
#### New Feature
|
||||
* Add `ASOF JOIN` support for `full_sorting_join` algorithm. [#55051](https://github.com/ClickHouse/ClickHouse/pull/55051) ([vdimir](https://github.com/vdimir)).
|
||||
* Support JWT authentication in `clickhouse-client` (will be available only in ClickHouse Cloud). [#62829](https://github.com/ClickHouse/ClickHouse/pull/62829) ([Konstantin Bogdanov](https://github.com/thevar1able)).
|
||||
* Add SQL functions `changeYear`, `changeMonth`, `changeDay`, `changeHour`, `changeMinute`, `changeSecond`. For example, `SELECT changeMonth(toDate('2024-06-14'), 7)` returns date `2024-07-14`. [#63186](https://github.com/ClickHouse/ClickHouse/pull/63186) ([cucumber95](https://github.com/cucumber95)).
|
||||
* Introduce startup scripts, which allow the execution of preconfigured queries at the startup stage. [#64889](https://github.com/ClickHouse/ClickHouse/pull/64889) ([pufit](https://github.com/pufit)).
|
||||
* Support accept_invalid_certificate in client's config in order to allow for client to connect over secure TCP to a server running with self-signed certificate - can be used as a shorthand for corresponding `openSSL` client settings `verificationMode=none` + `invalidCertificateHandler.name=AcceptCertificateHandler`. [#65238](https://github.com/ClickHouse/ClickHouse/pull/65238) ([peacewalker122](https://github.com/peacewalker122)).
|
||||
* Add system.error_log which contains history of error values from table system.errors, periodically flushed to disk. [#65381](https://github.com/ClickHouse/ClickHouse/pull/65381) ([Pablo Marcos](https://github.com/pamarcos)).
|
||||
* Add aggregate function `groupConcat`. About the same as `arrayStringConcat( groupArray(column), ',')` Can receive 2 parameters: a string delimiter and the number of elements to be processed. [#65451](https://github.com/ClickHouse/ClickHouse/pull/65451) ([Yarik Briukhovetskyi](https://github.com/yariks5s)).
|
||||
* Add AzureQueue storage. [#65458](https://github.com/ClickHouse/ClickHouse/pull/65458) ([Kseniia Sumarokova](https://github.com/kssenii)).
|
||||
* Add a new setting to disable/enable writing page index into parquet files. [#65475](https://github.com/ClickHouse/ClickHouse/pull/65475) ([lgbo](https://github.com/lgbo-ustc)).
|
||||
* Introduce `logger.console_log_level` server config to control the log level to the console (if enabled). [#65559](https://github.com/ClickHouse/ClickHouse/pull/65559) ([Azat Khuzhin](https://github.com/azat)).
|
||||
* Automatically append a wildcard `*` to the end of a directory path with table function `file`. [#66019](https://github.com/ClickHouse/ClickHouse/pull/66019) ([Zhidong (David) Guo](https://github.com/Gun9niR)).
|
||||
* Add `--memory-usage` option to client in non-interactive mode. [#66393](https://github.com/ClickHouse/ClickHouse/pull/66393) ([vdimir](https://github.com/vdimir)).
|
||||
* Make an interactive client for clickhouse-disks, add local disk from the local directory. [#64446](https://github.com/ClickHouse/ClickHouse/pull/64446) ([Daniil Ivanik](https://github.com/divanik)).
|
||||
* When lightweight delete happens on a table with projection(s), users have choices either throw an exception (by default) or drop the projection [#65594](https://github.com/ClickHouse/ClickHouse/pull/65594) ([jsc0218](https://github.com/jsc0218)).
|
||||
* Add system tables with main information about all detached tables. [#65400](https://github.com/ClickHouse/ClickHouse/pull/65400) ([Konstantin Morozov](https://github.com/k-morozov)).
|
||||
|
||||
#### Experimental Feature
|
||||
* Change binary serialization of the `Variant` data type: add `compact` mode to avoid writing the same discriminator multiple times for granules with single variant or with only NULL values. Add MergeTree setting `use_compact_variant_discriminators_serialization` that is enabled by default. Note that Variant type is still experimental and backward-incompatible change in serialization is ok. [#62774](https://github.com/ClickHouse/ClickHouse/pull/62774) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||
* Support on-disk backend storage for clickhouse-keeper. [#56626](https://github.com/ClickHouse/ClickHouse/pull/56626) ([Han Fei](https://github.com/hanfei1991)).
|
||||
* Refactor JSONExtract functions, support more types including experimental Dynamic type. [#66046](https://github.com/ClickHouse/ClickHouse/pull/66046) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||
* Support null map subcolumn for `Variant` and `Dynamic` subcolumns. [#66178](https://github.com/ClickHouse/ClickHouse/pull/66178) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||
* Fix reading `Dynamic` subcolumns from altered `Memory` table. Previously if `max_types` parameter of a Dynamic type was changed in Memory table via alter, further subcolumns reading can return wrong result. [#66066](https://github.com/ClickHouse/ClickHouse/pull/66066) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||
* Add support for `cluster_for_parallel_replicas` when using custom key parallel replicas. It allows you to use parallel replicas with custom key with MergeTree tables. [#65453](https://github.com/ClickHouse/ClickHouse/pull/65453) ([Antonio Andelic](https://github.com/antonio2368)).
|
||||
|
||||
#### Performance Improvement
|
||||
* Replace int to string algorithm with a faster one (from a modified amdn/itoa to a modified jeaiii/itoa). [#61661](https://github.com/ClickHouse/ClickHouse/pull/61661) ([Raúl Marín](https://github.com/Algunenano)).
|
||||
* Sizes of hash tables created by join (`parallel_hash` algorithm) are collected and cached now. This information will be used to preallocate space in hash tables for subsequent query executions and save time on hash table resizes. [#64553](https://github.com/ClickHouse/ClickHouse/pull/64553) ([Nikita Taranov](https://github.com/nickitat)).
|
||||
* Optimized queries with `ORDER BY` primary key and `WHERE` that have a condition with high selectivity by using buffering. It is controlled by setting `read_in_order_use_buffering` (enabled by default) and can increase memory usage of query. [#64607](https://github.com/ClickHouse/ClickHouse/pull/64607) ([Anton Popov](https://github.com/CurtizJ)).
|
||||
* Improve performance of loading `plain_rewritable` metadata. [#65634](https://github.com/ClickHouse/ClickHouse/pull/65634) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* Attaching tables on read-only disks will use fewer resources by not loading outdated parts. [#65635](https://github.com/ClickHouse/ClickHouse/pull/65635) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* Support minmax hyperrectangle for Set indices. [#65676](https://github.com/ClickHouse/ClickHouse/pull/65676) ([AntiTopQuark](https://github.com/AntiTopQuark)).
|
||||
* Unload primary index of outdated parts to reduce total memory usage. [#65852](https://github.com/ClickHouse/ClickHouse/pull/65852) ([Anton Popov](https://github.com/CurtizJ)).
|
||||
* Functions `replaceRegexpAll` and `replaceRegexpOne` are now significantly faster if the pattern is trivial, i.e. contains no metacharacters, pattern classes, flags, grouping characters etc. (Thanks to Taiyang Li). [#66185](https://github.com/ClickHouse/ClickHouse/pull/66185) ([Robert Schulze](https://github.com/rschu1ze)).
|
||||
* s3 requests: Reduce retry time for queries, increase retries count for backups. 8.5 minutes and 100 retires for queries, 1.2 hours and 1000 retries for backup restore. [#65232](https://github.com/ClickHouse/ClickHouse/pull/65232) ([Sema Checherinda](https://github.com/CheSema)).
|
||||
* Support query plan LIMIT optimization. Support LIMIT pushdown for PostgreSQL storage and table function. [#65454](https://github.com/ClickHouse/ClickHouse/pull/65454) ([Maksim Kita](https://github.com/kitaisreal)).
|
||||
* Improved ZooKeeper load balancing. The current session doesn't expire until the optimal nodes become available despite `fallback_session_lifetime`. Added support for AZ-aware balancing. [#65570](https://github.com/ClickHouse/ClickHouse/pull/65570) ([Alexander Tokmakov](https://github.com/tavplubix)).
|
||||
* DatabaseCatalog drops tables faster by using up to database_catalog_drop_table_concurrency threads. [#66065](https://github.com/ClickHouse/ClickHouse/pull/66065) ([Sema Checherinda](https://github.com/CheSema)).
|
||||
|
||||
#### Improvement
|
||||
* Improved ZooKeeper load balancing. The current session doesn't expire until the optimal nodes become available despite `fallback_session_lifetime`. Added support for AZ-aware balancing. [#65570](https://github.com/ClickHouse/ClickHouse/pull/65570) ([Alexander Tokmakov](https://github.com/tavplubix)).
|
||||
* The setting `optimize_trivial_insert_select` is disabled by default. In most cases, it should be beneficial. Nevertheless, if you are seeing slower INSERT SELECT or increased memory usage, you can enable it back or `SET compatibility = '24.6'`. [#58970](https://github.com/ClickHouse/ClickHouse/pull/58970) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* Print stacktrace and diagnostic info if `clickhouse-client` or `clickhouse-local` crashes. [#61109](https://github.com/ClickHouse/ClickHouse/pull/61109) ([Alexander Tokmakov](https://github.com/tavplubix)).
|
||||
* The result of `SHOW INDEX | INDEXES | INDICES | KEYS` was previously sorted by the primary key column names. Since this was unintuitive, the result is now sorted by the position of the primary key columns within the primary key. [#61131](https://github.com/ClickHouse/ClickHouse/pull/61131) ([Robert Schulze](https://github.com/rschu1ze)).
|
||||
* Change how deduplication for Materialized Views works. Fixed a lot of cases like: - on destination table: data is split for 2 or more blocks and that blocks is considered as duplicate when that block is inserted in parallel. - on MV destination table: the equal blocks are deduplicated, that happens when MV often produces equal data as a result for different input data due to performing aggregation. - on MV destination table: the equal blocks which comes from different MV are deduplicated. [#61601](https://github.com/ClickHouse/ClickHouse/pull/61601) ([Sema Checherinda](https://github.com/CheSema)).
|
||||
* Support reading partitioned data DeltaLake data. Infer DeltaLake schema by reading metadata instead of data. [#63201](https://github.com/ClickHouse/ClickHouse/pull/63201) ([Kseniia Sumarokova](https://github.com/kssenii)).
|
||||
* In composable protocols TLS layer accepted only `certificateFile` and `privateKeyFile` parameters. https://clickhouse.com/docs/en/operations/settings/composable-protocols. [#63985](https://github.com/ClickHouse/ClickHouse/pull/63985) ([Anton Ivashkin](https://github.com/ianton-ru)).
|
||||
* Added profile event `SelectQueriesWithPrimaryKeyUsage` which indicates how many SELECT queries use the primary key to evaluate the WHERE clause. [#64492](https://github.com/ClickHouse/ClickHouse/pull/64492) ([0x01f](https://github.com/0xfei)).
|
||||
* `StorageS3Queue` related fixes and improvements. Deduce a default value of `s3queue_processing_threads_num` according to the number of physical cpu cores on the server (instead of the previous default value as 1). Set default value of `s3queue_loading_retries` to 10. Fix possible vague "Uncaught exception" in exception column of `system.s3queue`. Do not increment retry count on `MEMORY_LIMIT_EXCEEDED` exception. Move files commit to a stage after insertion into table fully finished to avoid files being commited while not inserted. Add settings `s3queue_max_processed_files_before_commit`, `s3queue_max_processed_rows_before_commit`, `s3queue_max_processed_bytes_before_commit`, `s3queue_max_processing_time_sec_before_commit`, to better control commit and flush time. [#65046](https://github.com/ClickHouse/ClickHouse/pull/65046) ([Kseniia Sumarokova](https://github.com/kssenii)).
|
||||
* Support aliases in parametrized view function (only new analyzer). [#65190](https://github.com/ClickHouse/ClickHouse/pull/65190) ([Kseniia Sumarokova](https://github.com/kssenii)).
|
||||
* Updated to mask account key in logs in azureBlobStorage. [#65273](https://github.com/ClickHouse/ClickHouse/pull/65273) ([SmitaRKulkarni](https://github.com/SmitaRKulkarni)).
|
||||
* Partition pruning for `IN` predicates when filter expression is a part of `PARTITION BY` expression. [#65335](https://github.com/ClickHouse/ClickHouse/pull/65335) ([Eduard Karacharov](https://github.com/korowa)).
|
||||
* `arrayMin`/`arrayMax` can be applicable to all data types that are comparable. [#65455](https://github.com/ClickHouse/ClickHouse/pull/65455) ([pn](https://github.com/chloro-pn)).
|
||||
* Improved memory accounting for cgroups v2 to exclude the amount occupied by the page cache. [#65470](https://github.com/ClickHouse/ClickHouse/pull/65470) ([Nikita Taranov](https://github.com/nickitat)).
|
||||
* Do not create format settings for each row when serializing chunks to insert to EmbeddedRocksDB table. [#65474](https://github.com/ClickHouse/ClickHouse/pull/65474) ([Duc Canh Le](https://github.com/canhld94)).
|
||||
* Reduce `clickhouse-local` prompt to just `:)`. `getFQDNOrHostName()` takes too long on macOS, and we don't want a hostname in the prompt for `clickhouse-local` anyway. [#65510](https://github.com/ClickHouse/ClickHouse/pull/65510) ([Konstantin Bogdanov](https://github.com/thevar1able)).
|
||||
* Avoid printing a message from jemalloc about per-CPU arenas on low-end virtual machines. [#65532](https://github.com/ClickHouse/ClickHouse/pull/65532) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* Disable filesystem cache background download by default. It will be enabled back when we fix the issue with possible "Memory limit exceeded" because memory deallocation is done outside of query context (while buffer is allocated inside of query context) if we use background download threads. Plus we need to add a separate setting to define max size to download for background workers (currently it is limited by max_file_segment_size, which might be too big). [#65534](https://github.com/ClickHouse/ClickHouse/pull/65534) ([Kseniia Sumarokova](https://github.com/kssenii)).
|
||||
* Add new option to config `<config_reload_interval_ms>` which allow to specify how often clickhouse will reload config. [#65545](https://github.com/ClickHouse/ClickHouse/pull/65545) ([alesapin](https://github.com/alesapin)).
|
||||
* Implement binary encoding for ClickHouse data types and add its specification in docs. Use it in Dynamic binary serialization, allow to use it in RowBinaryWithNamesAndTypes and Native formats under settings. [#65546](https://github.com/ClickHouse/ClickHouse/pull/65546) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||
* Server settings `compiled_expression_cache_size` and `compiled_expression_cache_elements_size` are now shown in `system.server_settings`. [#65584](https://github.com/ClickHouse/ClickHouse/pull/65584) ([Robert Schulze](https://github.com/rschu1ze)).
|
||||
* Add support for user identification based on x509 SubjectAltName extension. [#65626](https://github.com/ClickHouse/ClickHouse/pull/65626) ([Anton Kozlov](https://github.com/tonickkozlov)).
|
||||
* `clickhouse-local` will respect the `max_server_memory_usage` and `max_server_memory_usage_to_ram_ratio` from the configuration file. It will also set the max memory usage to 90% of the system memory by default, like `clickhouse-server` does. [#65697](https://github.com/ClickHouse/ClickHouse/pull/65697) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* Add a script to backup your files to ClickHouse. [#65699](https://github.com/ClickHouse/ClickHouse/pull/65699) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* PostgreSQL source to support query cancellations. [#65722](https://github.com/ClickHouse/ClickHouse/pull/65722) ([Maksim Kita](https://github.com/kitaisreal)).
|
||||
* Make `allow_experimental_analyzer` be controlled by the initiator for distributed queries. This ensures compatibility and correctness during operations in mixed version clusters. [#65777](https://github.com/ClickHouse/ClickHouse/pull/65777) ([Nikita Mikhaylov](https://github.com/nikitamikhaylov)).
|
||||
* Respect cgroup CPU limit in Keeper. [#65819](https://github.com/ClickHouse/ClickHouse/pull/65819) ([Antonio Andelic](https://github.com/antonio2368)).
|
||||
* Allow to use `concat` function with empty arguments `:) select concat();`. [#65887](https://github.com/ClickHouse/ClickHouse/pull/65887) ([李扬](https://github.com/taiyang-li)).
|
||||
* Allow controlling named collections in `clickhouse-local`. [#65973](https://github.com/ClickHouse/ClickHouse/pull/65973) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* Improve Azure-related profile events. [#65999](https://github.com/ClickHouse/ClickHouse/pull/65999) ([alesapin](https://github.com/alesapin)).
|
||||
* Support ORC file read by writer's time zone. [#66025](https://github.com/ClickHouse/ClickHouse/pull/66025) ([kevinyhzou](https://github.com/KevinyhZou)).
|
||||
* Add settings to control connections to PostgreSQL. The setting `postgresql_connection_attempt_timeout` specifies the value passed to `connect_timeout` parameter of connection URL. The setting `postgresql_connection_pool_retries` specifies the number of retries to establish a connection to the PostgreSQL end-point. [#66232](https://github.com/ClickHouse/ClickHouse/pull/66232) ([Dmitry Novik](https://github.com/novikd)).
|
||||
* Reduce inaccuracy of `input_wait_elapsed_us`/`elapsed_us` in the `system.processors_profile_log`. [#66239](https://github.com/ClickHouse/ClickHouse/pull/66239) ([Azat Khuzhin](https://github.com/azat)).
|
||||
* Improve ProfileEvents for the filesystem cache. [#66249](https://github.com/ClickHouse/ClickHouse/pull/66249) ([zhukai](https://github.com/nauu)).
|
||||
* Add settings to ignore the `ON CLUSTER` clause in queries for named collection management with the replicated storage. [#66288](https://github.com/ClickHouse/ClickHouse/pull/66288) ([MikhailBurdukov](https://github.com/MikhailBurdukov)).
|
||||
* Function `generateSnowflakeID` now allows to specify a machine ID as a parameter to prevent collisions in large clusters. [#66374](https://github.com/ClickHouse/ClickHouse/pull/66374) ([ZAWA_ll](https://github.com/Zawa-ll)).
|
||||
* Disable suspending on `Ctrl+Z` in interactive mode. This is a common trap and is not expected behavior for almost all users. I imagine only a few extreme power users could appreciate suspending terminal applications to the background, but I don't know any. [#66511](https://github.com/ClickHouse/ClickHouse/pull/66511) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* Add option for validating the primary key type in Dictionaries. Without this option for simple layouts any column type will be implicitly converted to UInt64. [#66595](https://github.com/ClickHouse/ClickHouse/pull/66595) ([MikhailBurdukov](https://github.com/MikhailBurdukov)).
|
||||
|
||||
#### Bug Fix (user-visible misbehavior in an official stable release)
|
||||
* Check cyclic dependencies on CREATE/REPLACE/RENAME/EXCHANGE queries and throw an exception if there is a cyclic dependency. Previously such cyclic dependencies could lead to a deadlock during server startup. Also fix some bugs in dependencies creation. [#65405](https://github.com/ClickHouse/ClickHouse/pull/65405) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||
* Fix unexpected sizes of `LowCardinality` columns in function calls. [#65298](https://github.com/ClickHouse/ClickHouse/pull/65298) ([Raúl Marín](https://github.com/Algunenano)).
|
||||
* Fix crash in maxIntersections. [#65689](https://github.com/ClickHouse/ClickHouse/pull/65689) ([Raúl Marín](https://github.com/Algunenano)).
|
||||
* Fix the `VALID UNTIL` clause in the user definition resetting after a restart. [#66409](https://github.com/ClickHouse/ClickHouse/pull/66409) ([Nikolay Degterinsky](https://github.com/evillique)).
|
||||
* Fix the remaining time column in `SHOW MERGES`. [#66735](https://github.com/ClickHouse/ClickHouse/pull/66735) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* `Query was cancelled` might have been printed twice in clickhouse-client. This behaviour is fixed. [#66005](https://github.com/ClickHouse/ClickHouse/pull/66005) ([Nikita Mikhaylov](https://github.com/nikitamikhaylov)).
|
||||
* Fixed crash while using `MaterializedMySQL` (which is an unsupported, experimental feature) with TABLE OVERRIDE that maps MySQL NULL field into ClickHouse not NULL field. [#54649](https://github.com/ClickHouse/ClickHouse/pull/54649) ([Filipp Ozinov](https://github.com/bakwc)).
|
||||
* Fix logical error when `PREWHERE` expression read no columns and table has no adaptive index granularity (very old table). [#59173](https://github.com/ClickHouse/ClickHouse/pull/59173) ([Alexander Gololobov](https://github.com/davenger)).
|
||||
* Fix bug with the cancellation buffer when canceling a query. [#64478](https://github.com/ClickHouse/ClickHouse/pull/64478) ([Sema Checherinda](https://github.com/CheSema)).
|
||||
* Fix filling parts columns from metadata (when columns.txt does not exists). [#64757](https://github.com/ClickHouse/ClickHouse/pull/64757) ([Azat Khuzhin](https://github.com/azat)).
|
||||
* Fix crash for `ALTER TABLE ... ON CLUSTER ... MODIFY SQL SECURITY`. [#64957](https://github.com/ClickHouse/ClickHouse/pull/64957) ([pufit](https://github.com/pufit)).
|
||||
* Fix crash on destroying AccessControl: add explicit shutdown. [#64993](https://github.com/ClickHouse/ClickHouse/pull/64993) ([Vitaly Baranov](https://github.com/vitlibar)).
|
||||
* Eliminate injective function in argument of functions `uniq*` recursively. This used to work correctly but was broken in the new analyzer. [#65140](https://github.com/ClickHouse/ClickHouse/pull/65140) ([Duc Canh Le](https://github.com/canhld94)).
|
||||
* Fix unexpected projection name when query with CTE. [#65267](https://github.com/ClickHouse/ClickHouse/pull/65267) ([wudidapaopao](https://github.com/wudidapaopao)).
|
||||
* Require `dictGet` privilege when accessing dictionaries via direct query or the `Dictionary` table engine. [#65359](https://github.com/ClickHouse/ClickHouse/pull/65359) ([Joe Lynch](https://github.com/joelynch)).
|
||||
* Fix user-specific S3 auth with incremental backups. [#65481](https://github.com/ClickHouse/ClickHouse/pull/65481) ([Antonio Andelic](https://github.com/antonio2368)).
|
||||
* Disable `non-intersecting-parts` optimization for queries with `FINAL` in case of `read-in-order` optimization was enabled. This could lead to an incorrect query result. As a workaround, disable `do_not_merge_across_partitions_select_final` and `split_parts_ranges_into_intersecting_and_non_intersecting_final` before this fix is merged. [#65505](https://github.com/ClickHouse/ClickHouse/pull/65505) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
|
||||
* Fix getting exception `Index out of bound for blob metadata` in case all files from list batch were filtered out. [#65523](https://github.com/ClickHouse/ClickHouse/pull/65523) ([Kseniia Sumarokova](https://github.com/kssenii)).
|
||||
* Fix NOT_FOUND_COLUMN_IN_BLOCK for deduplicate merge of projection. [#65573](https://github.com/ClickHouse/ClickHouse/pull/65573) ([Yakov Olkhovskiy](https://github.com/yakov-olkhovskiy)).
|
||||
* Fixed bug in MergeJoin. Column in sparse serialisation might be treated as a column of its nested type though the required conversion wasn't performed. [#65632](https://github.com/ClickHouse/ClickHouse/pull/65632) ([Nikita Taranov](https://github.com/nickitat)).
|
||||
* Fixed a bug that compatibility level '23.4' was not properly applied. [#65737](https://github.com/ClickHouse/ClickHouse/pull/65737) ([cw5121](https://github.com/cw5121)).
|
||||
* Fix odbc table with nullable fields. [#65738](https://github.com/ClickHouse/ClickHouse/pull/65738) ([Rodolphe Dugé de Bernonville](https://github.com/RodolpheDuge)).
|
||||
* Fix data race in `TCPHandler`, which could happen on fatal error. [#65744](https://github.com/ClickHouse/ClickHouse/pull/65744) ([Kseniia Sumarokova](https://github.com/kssenii)).
|
||||
* Fix invalid exceptions in function `parseDateTime` with `%F` and `%D` placeholders. [#65768](https://github.com/ClickHouse/ClickHouse/pull/65768) ([Antonio Andelic](https://github.com/antonio2368)).
|
||||
* For queries that read from `PostgreSQL`, cancel the internal `PostgreSQL` query if the ClickHouse query is finished. Otherwise, `ClickHouse` query cannot be canceled until the internal `PostgreSQL` query is finished. [#65771](https://github.com/ClickHouse/ClickHouse/pull/65771) ([Maksim Kita](https://github.com/kitaisreal)).
|
||||
* Fix a bug in short circuit logic when old analyzer and dictGetOrDefault is used. [#65802](https://github.com/ClickHouse/ClickHouse/pull/65802) ([jsc0218](https://github.com/jsc0218)).
|
||||
* Fix a bug leads to EmbeddedRocksDB with TTL write corrupted SST files. [#65816](https://github.com/ClickHouse/ClickHouse/pull/65816) ([Duc Canh Le](https://github.com/canhld94)).
|
||||
* Functions `bitTest`, `bitTestAll`, and `bitTestAny` now return an error if the specified bit index is out-of-bounds [#65818](https://github.com/ClickHouse/ClickHouse/pull/65818) ([Pablo Marcos](https://github.com/pamarcos)).
|
||||
* Setting `join_any_take_last_row` is supported in any query with hash join. [#65820](https://github.com/ClickHouse/ClickHouse/pull/65820) ([vdimir](https://github.com/vdimir)).
|
||||
* Better handling of join conditions involving `IS NULL` checks (for example `ON (a = b AND (a IS NOT NULL) AND (b IS NOT NULL) ) OR ( (a IS NULL) AND (b IS NULL) )` is rewritten to `ON a <=> b`), fix incorrect optimization when condition other then `IS NULL` are present. [#65835](https://github.com/ClickHouse/ClickHouse/pull/65835) ([vdimir](https://github.com/vdimir)).
|
||||
* Functions `bitShiftLeft` and `bitShitfRight` return an error for out of bounds shift positions [#65838](https://github.com/ClickHouse/ClickHouse/pull/65838) ([Pablo Marcos](https://github.com/pamarcos)).
|
||||
* Fix growing memory usage in S3Queue. [#65839](https://github.com/ClickHouse/ClickHouse/pull/65839) ([Kseniia Sumarokova](https://github.com/kssenii)).
|
||||
* Fix tie handling in `arrayAUC` to match sklearn. [#65840](https://github.com/ClickHouse/ClickHouse/pull/65840) ([gabrielmcg44](https://github.com/gabrielmcg44)).
|
||||
* Fix possible issues with MySQL server protocol TLS connections. [#65917](https://github.com/ClickHouse/ClickHouse/pull/65917) ([Azat Khuzhin](https://github.com/azat)).
|
||||
* Fix possible issues with MySQL client protocol TLS connections. [#65938](https://github.com/ClickHouse/ClickHouse/pull/65938) ([Azat Khuzhin](https://github.com/azat)).
|
||||
* Fix handling of `SSL_ERROR_WANT_READ`/`SSL_ERROR_WANT_WRITE` with zero timeout. [#65941](https://github.com/ClickHouse/ClickHouse/pull/65941) ([Azat Khuzhin](https://github.com/azat)).
|
||||
* Add missing settings `input_format_csv_skip_first_lines/input_format_tsv_skip_first_lines/input_format_csv_try_infer_numbers_from_strings/input_format_csv_try_infer_strings_from_quoted_tuples` in schema inference cache because they can change the resulting schema. It prevents from incorrect result of schema inference with these settings changed. [#65980](https://github.com/ClickHouse/ClickHouse/pull/65980) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||
* Column _size in s3 engine and s3 table function denotes the size of a file inside the archive, not a size of the archive itself. [#65993](https://github.com/ClickHouse/ClickHouse/pull/65993) ([Daniil Ivanik](https://github.com/divanik)).
|
||||
* Fix resolving dynamic subcolumns in analyzer, avoid reading the whole column on dynamic subcolumn reading. [#66004](https://github.com/ClickHouse/ClickHouse/pull/66004) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||
* Fix config merging for from_env with replace overrides. [#66034](https://github.com/ClickHouse/ClickHouse/pull/66034) ([Azat Khuzhin](https://github.com/azat)).
|
||||
* Fix a possible hanging in `GRPCServer` during shutdown. [#66061](https://github.com/ClickHouse/ClickHouse/pull/66061) ([Vitaly Baranov](https://github.com/vitlibar)).
|
||||
* Fixed several cases in function `has` with non-constant `LowCardinality` arguments. [#66088](https://github.com/ClickHouse/ClickHouse/pull/66088) ([Anton Popov](https://github.com/CurtizJ)).
|
||||
* Fix for `groupArrayIntersect`. It had incorrect behavior in the `merge()` function. Also, fixed behavior in `deserialise()` for numeric and general data. [#66103](https://github.com/ClickHouse/ClickHouse/pull/66103) ([Yarik Briukhovetskyi](https://github.com/yariks5s)).
|
||||
* Fixed buffer overflow bug in `unbin`/`unhex` implementation. [#66106](https://github.com/ClickHouse/ClickHouse/pull/66106) ([Nikita Taranov](https://github.com/nickitat)).
|
||||
* Disable the `merge-filters` optimization introduced in [#64760](https://github.com/ClickHouse/ClickHouse/issues/64760). It may cause an exception if optimization merges two filter expressions and does not apply a short-circuit evaluation. [#66126](https://github.com/ClickHouse/ClickHouse/pull/66126) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
|
||||
* Fixed the issue when the server failed to parse Avro files with negative block size arrays encoded, which is now allowed by the Avro specification. [#66130](https://github.com/ClickHouse/ClickHouse/pull/66130) ([Serge Klochkov](https://github.com/slvrtrn)).
|
||||
* Fixed a bug in ZooKeeper client: a session could get stuck in unusable state after receiving a hardware error from ZooKeeper. For example, this might happen due to "soft memory limit" in ClickHouse Keeper. [#66140](https://github.com/ClickHouse/ClickHouse/pull/66140) ([Alexander Tokmakov](https://github.com/tavplubix)).
|
||||
* Fix issue in SumIfToCountIfVisitor and signed integers. [#66146](https://github.com/ClickHouse/ClickHouse/pull/66146) ([Raúl Marín](https://github.com/Algunenano)).
|
||||
* Fix rare case with missing data in the result of distributed query. [#66174](https://github.com/ClickHouse/ClickHouse/pull/66174) ([vdimir](https://github.com/vdimir)).
|
||||
* Fix order of parsing metadata fields in StorageDeltaLake. [#66211](https://github.com/ClickHouse/ClickHouse/pull/66211) ([Kseniia Sumarokova](https://github.com/kssenii)).
|
||||
* Don't throw `TIMEOUT_EXCEEDED` for `none_only_active` mode of `distributed_ddl_output_mode`. [#66218](https://github.com/ClickHouse/ClickHouse/pull/66218) ([Alexander Tokmakov](https://github.com/tavplubix)).
|
||||
* Fix handling limit for `system.numbers_mt` when no index can be used. [#66231](https://github.com/ClickHouse/ClickHouse/pull/66231) ([János Benjamin Antal](https://github.com/antaljanosbenjamin)).
|
||||
* Fixed how the ClickHouse server detects the maximum number of usable CPU cores as specified by cgroups v2 if the server runs in a container such as Docker. In more detail, containers often run their process in the root cgroup which has an empty name. In that case, ClickHouse ignored the CPU limits set by cgroups v2. [#66237](https://github.com/ClickHouse/ClickHouse/pull/66237) ([filimonov](https://github.com/filimonov)).
|
||||
* Fix the `Not-ready set` error when a subquery with `IN` is used in the constraint. [#66261](https://github.com/ClickHouse/ClickHouse/pull/66261) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
|
||||
* Fix error reporting while copying to S3 or AzureBlobStorage. [#66295](https://github.com/ClickHouse/ClickHouse/pull/66295) ([Vitaly Baranov](https://github.com/vitlibar)).
|
||||
* Prevent watchdog from keeping descriptors of unlinked (rotated) log files. [#66334](https://github.com/ClickHouse/ClickHouse/pull/66334) ([Aleksei Filatov](https://github.com/aalexfvk)).
|
||||
* Fix the bug that logicalexpressionoptimizerpass lost logical type of constant. [#66344](https://github.com/ClickHouse/ClickHouse/pull/66344) ([pn](https://github.com/chloro-pn)).
|
||||
* Fix `Column identifier is already registered` error with `group_by_use_nulls=true` and new analyzer. [#66400](https://github.com/ClickHouse/ClickHouse/pull/66400) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
|
||||
* Fix possible incorrect result for queries joining and filtering table external engine (like PostgreSQL), due to too aggressive filter pushdown. Since now, conditions from where section won't be send to external database in case of outer join with external table. [#66402](https://github.com/ClickHouse/ClickHouse/pull/66402) ([vdimir](https://github.com/vdimir)).
|
||||
* Added missing column materialization for cross join. [#66413](https://github.com/ClickHouse/ClickHouse/pull/66413) ([lgbo](https://github.com/lgbo-ustc)).
|
||||
* Fix `Cannot find column` error for queries with constant expression in `GROUP BY` key and new analyzer enabled. [#66433](https://github.com/ClickHouse/ClickHouse/pull/66433) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
|
||||
* Avoid possible logical error during import from Npy format in case of bad array nesting level, fix testing of other kinds of errors. [#66461](https://github.com/ClickHouse/ClickHouse/pull/66461) ([Yarik Briukhovetskyi](https://github.com/yariks5s)).
|
||||
* Fix wrong count() result when there is non-deterministic function in predicate. [#66510](https://github.com/ClickHouse/ClickHouse/pull/66510) ([Duc Canh Le](https://github.com/canhld94)).
|
||||
* Correctly track memory for `Allocator::realloc`. [#66548](https://github.com/ClickHouse/ClickHouse/pull/66548) ([Antonio Andelic](https://github.com/antonio2368)).
|
||||
* Fix reading of uninitialized memory when hashing empty tuples. [#66562](https://github.com/ClickHouse/ClickHouse/pull/66562) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* Fix an invalid result for queries with `WINDOW`. This could happen when `PARTITION` columns have sparse serialization and window functions are executed in parallel. [#66579](https://github.com/ClickHouse/ClickHouse/pull/66579) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
|
||||
* Fix removing named collections in local storage. [#66599](https://github.com/ClickHouse/ClickHouse/pull/66599) ([János Benjamin Antal](https://github.com/antaljanosbenjamin)).
|
||||
* Fix `column_length` is not updated in `ColumnTuple::insertManyFrom`. [#66626](https://github.com/ClickHouse/ClickHouse/pull/66626) ([lgbo](https://github.com/lgbo-ustc)).
|
||||
* Fix `Unknown identifier` and `Column is not under aggregate function` errors for queries with the expression `(column IS NULL).` The bug was triggered by [#65088](https://github.com/ClickHouse/ClickHouse/issues/65088), with the disabled analyzer only. [#66654](https://github.com/ClickHouse/ClickHouse/pull/66654) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
|
||||
* Fix `Method getResultType is not supported for QUERY query node` error when scalar subquery was used as the first argument of IN (with new analyzer). [#66655](https://github.com/ClickHouse/ClickHouse/pull/66655) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
|
||||
* Fix possible PARAMETER_OUT_OF_BOUND error during reading variant subcolumn. [#66659](https://github.com/ClickHouse/ClickHouse/pull/66659) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||
* Fix rare case of stuck merge after drop column. [#66707](https://github.com/ClickHouse/ClickHouse/pull/66707) ([Raúl Marín](https://github.com/Algunenano)).
|
||||
* Fix assertion `isUniqTypes` when insert select from remote sources. [#66722](https://github.com/ClickHouse/ClickHouse/pull/66722) ([Sema Checherinda](https://github.com/CheSema)).
|
||||
* Fix logical error in PrometheusRequestHandler. [#66621](https://github.com/ClickHouse/ClickHouse/pull/66621) ([Vitaly Baranov](https://github.com/vitlibar)).
|
||||
* Fix `indexHint` function case found by fuzzer. [#66286](https://github.com/ClickHouse/ClickHouse/pull/66286) ([Anton Popov](https://github.com/CurtizJ)).
|
||||
* Fix AST formatting of 'create table b empty as a'. [#64951](https://github.com/ClickHouse/ClickHouse/pull/64951) ([Michael Kolupaev](https://github.com/al13n321)).
|
||||
|
||||
### <a id="246"></a> ClickHouse release 24.6, 2024-07-01
|
||||
|
||||
#### Backward Incompatible Change
|
||||
|
@ -187,14 +187,6 @@ else ()
|
||||
set(NO_WHOLE_ARCHIVE --no-whole-archive)
|
||||
endif ()
|
||||
|
||||
if (NOT CMAKE_BUILD_TYPE_UC STREQUAL "RELEASE")
|
||||
# Can be lld or ld-lld or lld-13 or /path/to/lld.
|
||||
if (LINKER_NAME MATCHES "lld")
|
||||
set (CMAKE_EXE_LINKER_FLAGS "${CMAKE_EXE_LINKER_FLAGS} -Wl,--gdb-index")
|
||||
message (STATUS "Adding .gdb-index via --gdb-index linker option.")
|
||||
endif ()
|
||||
endif()
|
||||
|
||||
if (NOT (SANITIZE_COVERAGE OR WITH_COVERAGE)
|
||||
AND (CMAKE_BUILD_TYPE_UC STREQUAL "RELEASE"
|
||||
OR CMAKE_BUILD_TYPE_UC STREQUAL "RELWITHDEBINFO"
|
||||
@ -402,7 +394,7 @@ if ((NOT OS_LINUX AND NOT OS_ANDROID) OR (CMAKE_BUILD_TYPE_UC STREQUAL "DEBUG")
|
||||
set(ENABLE_GWP_ASAN OFF)
|
||||
endif ()
|
||||
|
||||
option (ENABLE_FIU "Enable Fiu" ON)
|
||||
option (ENABLE_LIBFIU "Enable libfiu" ON)
|
||||
|
||||
option(WERROR "Enable -Werror compiler option" ON)
|
||||
|
||||
@ -428,12 +420,17 @@ if (NOT SANITIZE)
|
||||
set (CMAKE_POSITION_INDEPENDENT_CODE OFF)
|
||||
endif()
|
||||
|
||||
if (OS_LINUX AND NOT (ARCH_AARCH64 OR ARCH_S390X) AND NOT SANITIZE)
|
||||
# Slightly more efficient code can be generated
|
||||
# It's disabled for ARM because otherwise ClickHouse cannot run on Android.
|
||||
if (NOT OS_ANDROID AND OS_LINUX AND NOT ARCH_S390X AND NOT SANITIZE)
|
||||
# Using '-no-pie' builds executables with fixed addresses, resulting in slightly more efficient code
|
||||
# and keeping binary addresses constant even with ASLR enabled.
|
||||
# Disabled on Android as it requires PIE: https://source.android.com/docs/security/enhancements#android-5
|
||||
# Disabled on IBM S390X due to build issues with 'no-pie'
|
||||
# Disabled with sanitizers to avoid issues with maximum relocation size: https://github.com/ClickHouse/ClickHouse/pull/49145
|
||||
set (CMAKE_CXX_FLAGS_RELWITHDEBINFO "${CMAKE_CXX_FLAGS_RELWITHDEBINFO} -fno-pie")
|
||||
set (CMAKE_C_FLAGS_RELWITHDEBINFO "${CMAKE_C_FLAGS_RELWITHDEBINFO} -fno-pie")
|
||||
set (CMAKE_EXE_LINKER_FLAGS "${CMAKE_EXE_LINKER_FLAGS} -no-pie -Wl,-no-pie")
|
||||
else ()
|
||||
message (WARNING "ClickHouse is built as PIE, system.trace_log will contain invalid addresses after server restart.")
|
||||
endif ()
|
||||
|
||||
if (ENABLE_TESTS)
|
||||
|
10
README.md
10
README.md
@ -34,19 +34,13 @@ curl https://clickhouse.com/ | sh
|
||||
|
||||
Every month we get together with the community (users, contributors, customers, those interested in learning more about ClickHouse) to discuss what is coming in the latest release. If you are interested in sharing what you've built on ClickHouse, let us know.
|
||||
|
||||
* [v24.6 Community Call](https://clickhouse.com/company/events/v24-6-community-release-call) - Jul 2
|
||||
* [v24.8 Community Call](https://clickhouse.com/company/events/v24-8-community-release-call) - August 29
|
||||
|
||||
## Upcoming Events
|
||||
|
||||
Keep an eye out for upcoming meetups and events around the world. Somewhere else you want us to be? Please feel free to reach out to tyler `<at>` clickhouse `<dot>` com. You can also peruse [ClickHouse Events](https://clickhouse.com/company/news-events) for a list of all upcoming trainings, meetups, speaking engagements, etc.
|
||||
|
||||
* [AWS Summit in DC](https://clickhouse.com/company/events/2024-06-aws-summit-dc) - Jun 26
|
||||
* [ClickHouse Meetup in Amsterdam](https://www.meetup.com/clickhouse-netherlands-user-group/events/300781068/) - Jun 27
|
||||
* [ClickHouse Meetup in Paris](https://www.meetup.com/clickhouse-france-user-group/events/300783448/) - Jul 9
|
||||
* [ClickHouse Cloud - Live Update Call](https://clickhouse.com/company/events/202407-cloud-update-live) - Jul 9
|
||||
* [ClickHouse Meetup @ Ramp - New York City](https://www.meetup.com/clickhouse-new-york-user-group/events/300595845/) - Jul 9
|
||||
* [AWS Summit in New York](https://clickhouse.com/company/events/2024-07-awssummit-nyc) - Jul 10
|
||||
* [ClickHouse Meetup @ Klaviyo - Boston](https://www.meetup.com/clickhouse-boston-user-group/events/300907870) - Jul 11
|
||||
* MORE COMING SOON!
|
||||
|
||||
## Recent Recordings
|
||||
* **Recent Meetup Videos**: [Meetup Playlist](https://www.youtube.com/playlist?list=PL0Z2YDlm0b3iNDUzpY1S3L_iV4nARda_U) Whenever possible recordings of the ClickHouse Community Meetups are edited and presented as individual talks. Current featuring "Modern SQL in 2023", "Fast, Concurrent, and Consistent Asynchronous INSERTS in ClickHouse", and "Full-Text Indices: Design and Experiments"
|
||||
|
@ -14,9 +14,10 @@ The following versions of ClickHouse server are currently supported with securit
|
||||
|
||||
| Version | Supported |
|
||||
|:-|:-|
|
||||
| 24.7 | ✔️ |
|
||||
| 24.6 | ✔️ |
|
||||
| 24.5 | ✔️ |
|
||||
| 24.4 | ✔️ |
|
||||
| 24.4 | ❌ |
|
||||
| 24.3 | ✔️ |
|
||||
| 24.2 | ❌ |
|
||||
| 24.1 | ❌ |
|
||||
|
@ -32,6 +32,7 @@ set (SRCS
|
||||
StringRef.cpp
|
||||
safeExit.cpp
|
||||
throwError.cpp
|
||||
Numa.cpp
|
||||
)
|
||||
|
||||
add_library (common ${SRCS})
|
||||
@ -46,6 +47,10 @@ if (TARGET ch_contrib::crc32_s390x)
|
||||
target_link_libraries(common PUBLIC ch_contrib::crc32_s390x)
|
||||
endif()
|
||||
|
||||
if (TARGET ch_contrib::numactl)
|
||||
target_link_libraries(common PUBLIC ch_contrib::numactl)
|
||||
endif()
|
||||
|
||||
target_include_directories(common PUBLIC .. "${CMAKE_CURRENT_BINARY_DIR}/..")
|
||||
|
||||
target_link_libraries (common
|
||||
|
37
base/base/Numa.cpp
Normal file
37
base/base/Numa.cpp
Normal file
@ -0,0 +1,37 @@
|
||||
#include <base/Numa.h>
|
||||
|
||||
#include "config.h"
|
||||
|
||||
#if USE_NUMACTL
|
||||
# include <numa.h>
|
||||
#endif
|
||||
|
||||
namespace DB
|
||||
{
|
||||
|
||||
std::optional<size_t> getNumaNodesTotalMemory()
|
||||
{
|
||||
std::optional<size_t> total_memory;
|
||||
#if USE_NUMACTL
|
||||
if (numa_available() != -1)
|
||||
{
|
||||
auto * membind = numa_get_membind();
|
||||
if (!numa_bitmask_equal(membind, numa_all_nodes_ptr))
|
||||
{
|
||||
total_memory.emplace(0);
|
||||
auto max_node = numa_max_node();
|
||||
for (int i = 0; i <= max_node; ++i)
|
||||
{
|
||||
if (numa_bitmask_isbitset(membind, i))
|
||||
*total_memory += numa_node_size(i, nullptr);
|
||||
}
|
||||
}
|
||||
|
||||
numa_bitmask_free(membind);
|
||||
}
|
||||
|
||||
#endif
|
||||
return total_memory;
|
||||
}
|
||||
|
||||
}
|
12
base/base/Numa.h
Normal file
12
base/base/Numa.h
Normal file
@ -0,0 +1,12 @@
|
||||
#pragma once
|
||||
|
||||
#include <optional>
|
||||
|
||||
namespace DB
|
||||
{
|
||||
|
||||
/// return total memory of NUMA nodes the process is bound to
|
||||
/// if NUMA is not supported or process can use all nodes, std::nullopt is returned
|
||||
std::optional<size_t> getNumaNodesTotalMemory();
|
||||
|
||||
}
|
@ -3,8 +3,9 @@
|
||||
#include <base/defines.h>
|
||||
|
||||
#include <fstream>
|
||||
#include <sstream>
|
||||
#include <string>
|
||||
|
||||
namespace fs = std::filesystem;
|
||||
|
||||
bool cgroupsV2Enabled()
|
||||
{
|
||||
@ -13,11 +14,11 @@ bool cgroupsV2Enabled()
|
||||
{
|
||||
/// This file exists iff the host has cgroups v2 enabled.
|
||||
auto controllers_file = default_cgroups_mount / "cgroup.controllers";
|
||||
if (!std::filesystem::exists(controllers_file))
|
||||
if (!fs::exists(controllers_file))
|
||||
return false;
|
||||
return true;
|
||||
}
|
||||
catch (const std::filesystem::filesystem_error &) /// all "underlying OS API errors", typically: permission denied
|
||||
catch (const fs::filesystem_error &) /// all "underlying OS API errors", typically: permission denied
|
||||
{
|
||||
return false; /// not logging the exception as most callers fall back to cgroups v1
|
||||
}
|
||||
@ -33,8 +34,9 @@ bool cgroupsV2MemoryControllerEnabled()
|
||||
/// According to https://docs.kernel.org/admin-guide/cgroup-v2.html, file "cgroup.controllers" defines which controllers are available
|
||||
/// for the current + child cgroups. The set of available controllers can be restricted from level to level using file
|
||||
/// "cgroups.subtree_control". It is therefore sufficient to check the bottom-most nested "cgroup.controllers" file.
|
||||
std::string cgroup = cgroupV2OfProcess();
|
||||
auto cgroup_dir = cgroup.empty() ? default_cgroups_mount : (default_cgroups_mount / cgroup);
|
||||
fs::path cgroup_dir = cgroupV2PathOfProcess();
|
||||
if (cgroup_dir.empty())
|
||||
return false;
|
||||
std::ifstream controllers_file(cgroup_dir / "cgroup.controllers");
|
||||
if (!controllers_file.is_open())
|
||||
return false;
|
||||
@ -46,7 +48,7 @@ bool cgroupsV2MemoryControllerEnabled()
|
||||
#endif
|
||||
}
|
||||
|
||||
std::string cgroupV2OfProcess()
|
||||
fs::path cgroupV2PathOfProcess()
|
||||
{
|
||||
#if defined(OS_LINUX)
|
||||
chassert(cgroupsV2Enabled());
|
||||
@ -54,17 +56,18 @@ std::string cgroupV2OfProcess()
|
||||
/// A simpler way to get the membership is:
|
||||
std::ifstream cgroup_name_file("/proc/self/cgroup");
|
||||
if (!cgroup_name_file.is_open())
|
||||
return "";
|
||||
return {};
|
||||
/// With cgroups v2, there will be a *single* line with prefix "0::/"
|
||||
/// (see https://docs.kernel.org/admin-guide/cgroup-v2.html)
|
||||
std::string cgroup;
|
||||
std::getline(cgroup_name_file, cgroup);
|
||||
static const std::string v2_prefix = "0::/";
|
||||
if (!cgroup.starts_with(v2_prefix))
|
||||
return "";
|
||||
return {};
|
||||
cgroup = cgroup.substr(v2_prefix.length());
|
||||
return cgroup;
|
||||
/// Note: The 'root' cgroup can have an empty cgroup name, this is valid
|
||||
return default_cgroups_mount / cgroup;
|
||||
#else
|
||||
return "";
|
||||
return {};
|
||||
#endif
|
||||
}
|
||||
|
@ -1,7 +1,6 @@
|
||||
#pragma once
|
||||
|
||||
#include <filesystem>
|
||||
#include <string>
|
||||
|
||||
#if defined(OS_LINUX)
|
||||
/// I think it is possible to mount the cgroups hierarchy somewhere else (e.g. when in containers).
|
||||
@ -16,7 +15,7 @@ bool cgroupsV2Enabled();
|
||||
/// Assumes that cgroupsV2Enabled() is enabled.
|
||||
bool cgroupsV2MemoryControllerEnabled();
|
||||
|
||||
/// Which cgroup does the process belong to?
|
||||
/// Returns an empty string if the cgroup cannot be determined.
|
||||
/// Detects which cgroup v2 the process belongs to and returns the filesystem path to the cgroup.
|
||||
/// Returns an empty path the cgroup cannot be determined.
|
||||
/// Assumes that cgroupsV2Enabled() is enabled.
|
||||
std::string cgroupV2OfProcess();
|
||||
std::filesystem::path cgroupV2PathOfProcess();
|
||||
|
@ -87,10 +87,13 @@
|
||||
# define ASAN_POISON_MEMORY_REGION(a, b)
|
||||
#endif
|
||||
|
||||
#if !defined(ABORT_ON_LOGICAL_ERROR)
|
||||
#if !defined(NDEBUG) || defined(ADDRESS_SANITIZER) || defined(THREAD_SANITIZER) || defined(MEMORY_SANITIZER) || defined(UNDEFINED_BEHAVIOR_SANITIZER)
|
||||
#define ABORT_ON_LOGICAL_ERROR
|
||||
#endif
|
||||
/// We used to have only ABORT_ON_LOGICAL_ERROR macro, but most of its uses were actually in places where we didn't care about logical errors
|
||||
/// but wanted to check exactly if the current build type is debug or with sanitizer. This new macro is introduced to fix those places.
|
||||
#if !defined(DEBUG_OR_SANITIZER_BUILD)
|
||||
# if !defined(NDEBUG) || defined(ADDRESS_SANITIZER) || defined(THREAD_SANITIZER) || defined(MEMORY_SANITIZER) \
|
||||
|| defined(UNDEFINED_BEHAVIOR_SANITIZER)
|
||||
# define DEBUG_OR_SANITIZER_BUILD
|
||||
# endif
|
||||
#endif
|
||||
|
||||
/// chassert(x) is similar to assert(x), but:
|
||||
@ -101,7 +104,7 @@
|
||||
/// Also it makes sense to call abort() instead of __builtin_unreachable() in debug builds,
|
||||
/// because SIGABRT is easier to debug than SIGTRAP (the second one makes gdb crazy)
|
||||
#if !defined(chassert)
|
||||
#if defined(ABORT_ON_LOGICAL_ERROR)
|
||||
# if defined(DEBUG_OR_SANITIZER_BUILD)
|
||||
// clang-format off
|
||||
#include <base/types.h>
|
||||
namespace DB
|
||||
|
@ -1,5 +1,6 @@
|
||||
#pragma once
|
||||
|
||||
#include <cstdlib>
|
||||
#include <memory>
|
||||
#include <string>
|
||||
|
||||
|
@ -108,6 +108,14 @@ struct make_unsigned // NOLINT(readability-identifier-naming)
|
||||
using type = std::make_unsigned_t<T>;
|
||||
};
|
||||
|
||||
template <> struct make_unsigned<Int8> { using type = UInt8; };
|
||||
template <> struct make_unsigned<UInt8> { using type = UInt8; };
|
||||
template <> struct make_unsigned<Int16> { using type = UInt16; };
|
||||
template <> struct make_unsigned<UInt16> { using type = UInt16; };
|
||||
template <> struct make_unsigned<Int32> { using type = UInt32; };
|
||||
template <> struct make_unsigned<UInt32> { using type = UInt32; };
|
||||
template <> struct make_unsigned<Int64> { using type = UInt64; };
|
||||
template <> struct make_unsigned<UInt64> { using type = UInt64; };
|
||||
template <> struct make_unsigned<Int128> { using type = UInt128; };
|
||||
template <> struct make_unsigned<UInt128> { using type = UInt128; };
|
||||
template <> struct make_unsigned<Int256> { using type = UInt256; };
|
||||
@ -121,6 +129,14 @@ struct make_signed // NOLINT(readability-identifier-naming)
|
||||
using type = std::make_signed_t<T>;
|
||||
};
|
||||
|
||||
template <> struct make_signed<Int8> { using type = Int8; };
|
||||
template <> struct make_signed<UInt8> { using type = Int8; };
|
||||
template <> struct make_signed<Int16> { using type = Int16; };
|
||||
template <> struct make_signed<UInt16> { using type = Int16; };
|
||||
template <> struct make_signed<Int32> { using type = Int32; };
|
||||
template <> struct make_signed<UInt32> { using type = Int32; };
|
||||
template <> struct make_signed<Int64> { using type = Int64; };
|
||||
template <> struct make_signed<UInt64> { using type = Int64; };
|
||||
template <> struct make_signed<Int128> { using type = Int128; };
|
||||
template <> struct make_signed<UInt128> { using type = Int128; };
|
||||
template <> struct make_signed<Int256> { using type = Int256; };
|
||||
|
@ -2,15 +2,14 @@
|
||||
|
||||
#include <base/cgroupsv2.h>
|
||||
#include <base/getPageSize.h>
|
||||
#include <base/Numa.h>
|
||||
|
||||
#include <fstream>
|
||||
#include <stdexcept>
|
||||
|
||||
#include <unistd.h>
|
||||
#include <sys/types.h>
|
||||
#include <sys/param.h>
|
||||
|
||||
|
||||
namespace
|
||||
{
|
||||
|
||||
@ -23,8 +22,9 @@ std::optional<uint64_t> getCgroupsV2MemoryLimit()
|
||||
if (!cgroupsV2MemoryControllerEnabled())
|
||||
return {};
|
||||
|
||||
std::string cgroup = cgroupV2OfProcess();
|
||||
auto current_cgroup = cgroup.empty() ? default_cgroups_mount : (default_cgroups_mount / cgroup);
|
||||
std::filesystem::path current_cgroup = cgroupV2PathOfProcess();
|
||||
if (current_cgroup.empty())
|
||||
return {};
|
||||
|
||||
/// Open the bottom-most nested memory limit setting file. If there is no such file at the current
|
||||
/// level, try again at the parent level as memory settings are inherited.
|
||||
@ -62,6 +62,9 @@ uint64_t getMemoryAmountOrZero()
|
||||
|
||||
uint64_t memory_amount = num_pages * page_size;
|
||||
|
||||
if (auto total_numa_memory = DB::getNumaNodesTotalMemory(); total_numa_memory.has_value())
|
||||
memory_amount = *total_numa_memory;
|
||||
|
||||
/// Respect the memory limit set by cgroups v2.
|
||||
auto limit_v2 = getCgroupsV2MemoryLimit();
|
||||
if (limit_v2.has_value() && *limit_v2 < memory_amount)
|
||||
|
9
base/base/isSharedPtrUnique.h
Normal file
9
base/base/isSharedPtrUnique.h
Normal file
@ -0,0 +1,9 @@
|
||||
#pragma once
|
||||
|
||||
#include <memory>
|
||||
|
||||
template <typename T>
|
||||
bool isSharedPtrUnique(const std::shared_ptr<T> & ptr)
|
||||
{
|
||||
return ptr.use_count() == 1;
|
||||
}
|
@ -18,6 +18,16 @@ if (GLIBC_COMPATIBILITY)
|
||||
message (FATAL_ERROR "glibc_compatibility can only be used on x86_64 or aarch64.")
|
||||
endif ()
|
||||
|
||||
if (SANITIZE STREQUAL thread)
|
||||
# Disable TSAN instrumentation that conflicts with re-exec due to high ASLR entropy using getauxval
|
||||
# See longer comment in __auxv_init_procfs
|
||||
# In the case of tsan we need to make sure getauxval is not instrumented as that would introduce tsan
|
||||
# internal calls to functions that depend on a state that isn't initialized yet
|
||||
set_source_files_properties(
|
||||
musl/getauxval.c
|
||||
PROPERTIES COMPILE_FLAGS "-mllvm -tsan-instrument-func-entry-exit=false")
|
||||
endif()
|
||||
|
||||
# Need to omit frame pointers to match the performance of glibc
|
||||
set (CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -fomit-frame-pointer")
|
||||
|
||||
|
@ -75,6 +75,44 @@ unsigned long NO_SANITIZE_THREAD __getauxval_procfs(unsigned long type)
|
||||
}
|
||||
static unsigned long NO_SANITIZE_THREAD __auxv_init_procfs(unsigned long type)
|
||||
{
|
||||
#if defined(__x86_64__) && defined(__has_feature)
|
||||
# if __has_feature(memory_sanitizer) || __has_feature(thread_sanitizer)
|
||||
/// Sanitizers are not compatible with high ASLR entropy, which is the default on modern Linux distributions, and
|
||||
/// to workaround this limitation, TSAN and MSAN (couldn't see other sanitizers doing the same), re-exec the binary
|
||||
/// without ASLR (see https://github.com/llvm/llvm-project/commit/0784b1eefa36d4acbb0dacd2d18796e26313b6c5)
|
||||
|
||||
/// The problem we face is that, in order to re-exec, the sanitizer wants to use the original pathname in the call
|
||||
/// and to get its value it uses getauxval (https://github.com/llvm/llvm-project/blob/20eff684203287828d6722fc860b9d3621429542/compiler-rt/lib/sanitizer_common/sanitizer_linux_libcdep.cpp#L985-L988).
|
||||
/// Since we provide getauxval ourselves (to minimize the version dependency on runtime glibc), we are the ones
|
||||
// being called and we fail horribly:
|
||||
///
|
||||
/// ==301455==ERROR: MemorySanitizer: SEGV on unknown address 0x2ffc6d721550 (pc 0x5622c1cc0073 bp 0x000000000003 sp 0x7ffc6d721530 T301455)
|
||||
/// ==301455==The signal is caused by a WRITE memory access.
|
||||
/// #0 0x5622c1cc0073 in __auxv_init_procfs ./ClickHouse/base/glibc-compatibility/musl/getauxval.c:129:5
|
||||
/// #1 0x5622c1cbffe9 in getauxval ./ClickHouse/base/glibc-compatibility/musl/getauxval.c:240:12
|
||||
/// #2 0x5622c0d7bfb4 in __sanitizer::ReExec() crtstuff.c
|
||||
/// #3 0x5622c0df7bfc in __msan::InitShadowWithReExec(bool) crtstuff.c
|
||||
/// #4 0x5622c0d95356 in __msan_init (./ClickHouse/build_msan/contrib/google-protobuf-cmake/protoc+0x256356) (BuildId: 6411d3c88b898ba3f7d49760555977d3e61f0741)
|
||||
/// #5 0x5622c0dfe878 in msan.module_ctor main.cc
|
||||
/// #6 0x5622c1cc156c in __libc_csu_init (./ClickHouse/build_msan/contrib/google-protobuf-cmake/protoc+0x118256c) (BuildId: 6411d3c88b898ba3f7d49760555977d3e61f0741)
|
||||
/// #7 0x73dc05dd7ea3 in __libc_start_main /usr/src/debug/glibc/glibc/csu/../csu/libc-start.c:343:6
|
||||
/// #8 0x5622c0d6b7cd in _start (./ClickHouse/build_msan/contrib/google-protobuf-cmake/protoc+0x22c7cd) (BuildId: 6411d3c88b898ba3f7d49760555977d3e61f0741)
|
||||
|
||||
/// The source of the issue above is that, at this point in time during __msan_init, we can't really do much as
|
||||
/// most global variables aren't initialized or available yet, so we can't initiate the auxiliary vector.
|
||||
/// Normal glibc / musl getauxval doesn't have this problem since they initiate their auxval vector at the very
|
||||
/// start of __libc_start_main (just keeping track of argv+argc+1), but we don't have such option (otherwise
|
||||
/// this complexity of reading "/proc/self/auxv" or using __environ would not be necessary).
|
||||
|
||||
/// To avoid this crashes on the re-exec call (see above how it would fail when creating `aux`, and if we used
|
||||
/// __auxv_init_environ then it would SIGSEV on READing `__environ`) we capture this call for `AT_EXECFN` and
|
||||
/// unconditionally return "/proc/self/exe" without any preparation. Theoretically this should be fine in
|
||||
/// our case, as we don't load any libraries. That's the theory at least.
|
||||
if (type == AT_EXECFN)
|
||||
return (unsigned long)"/proc/self/exe";
|
||||
# endif
|
||||
#endif
|
||||
|
||||
// For debugging:
|
||||
// - od -t dL /proc/self/auxv
|
||||
// - LD_SHOW_AUX= ls
|
||||
@ -199,7 +237,7 @@ static unsigned long NO_SANITIZE_THREAD __auxv_init_environ(unsigned long type)
|
||||
// - __auxv_init_procfs -> __auxv_init_environ -> __getauxval_environ
|
||||
static void * volatile getauxval_func = (void *)__auxv_init_procfs;
|
||||
|
||||
unsigned long getauxval(unsigned long type)
|
||||
unsigned long NO_SANITIZE_THREAD getauxval(unsigned long type)
|
||||
{
|
||||
return ((unsigned long (*)(unsigned long))getauxval_func)(type);
|
||||
}
|
||||
|
@ -21,6 +21,7 @@
|
||||
#include "Poco/Exception.h"
|
||||
#include "Poco/Foundation.h"
|
||||
#include "Poco/Mutex.h"
|
||||
#include "Poco/Message.h"
|
||||
|
||||
|
||||
namespace Poco
|
||||
@ -78,6 +79,10 @@ public:
|
||||
///
|
||||
/// The default implementation just breaks into the debugger.
|
||||
|
||||
virtual void logMessageImpl(Message::Priority priority, const std::string & msg) {}
|
||||
/// Write a messages to the log
|
||||
/// Useful for logging from Poco
|
||||
|
||||
static void handle(const Exception & exc);
|
||||
/// Invokes the currently registered ErrorHandler.
|
||||
|
||||
@ -87,6 +92,9 @@ public:
|
||||
static void handle();
|
||||
/// Invokes the currently registered ErrorHandler.
|
||||
|
||||
static void logMessage(Message::Priority priority, const std::string & msg);
|
||||
/// Invokes the currently registered ErrorHandler to log a message.
|
||||
|
||||
static ErrorHandler * set(ErrorHandler * pHandler);
|
||||
/// Registers the given handler as the current error handler.
|
||||
///
|
||||
|
@ -232,7 +232,7 @@ void Foundation_API format(
|
||||
const Any & value10);
|
||||
|
||||
|
||||
void Foundation_API format(std::string & result, const std::string & fmt, const std::vector<Any> & values);
|
||||
void Foundation_API formatVector(std::string & result, const std::string & fmt, const std::vector<Any> & values);
|
||||
/// Supports a variable number of arguments and is used by
|
||||
/// all other variants of format().
|
||||
|
||||
|
@ -21,6 +21,8 @@
|
||||
#include <atomic>
|
||||
#include <cstddef>
|
||||
#include <map>
|
||||
#include <memory>
|
||||
#include <unordered_map>
|
||||
#include <vector>
|
||||
|
||||
#include "Poco/Channel.h"
|
||||
|
@ -19,6 +19,7 @@
|
||||
|
||||
|
||||
#include <map>
|
||||
#include <vector>
|
||||
#include "Poco/Foundation.h"
|
||||
#include "Poco/Timestamp.h"
|
||||
|
||||
|
@ -21,6 +21,8 @@
|
||||
#include "Poco/AtomicCounter.h"
|
||||
#include "Poco/Foundation.h"
|
||||
|
||||
#include <atomic>
|
||||
|
||||
|
||||
namespace Poco
|
||||
{
|
||||
|
@ -19,6 +19,7 @@
|
||||
|
||||
|
||||
#include "Poco/Foundation.h"
|
||||
#include <Poco/Types.h>
|
||||
|
||||
|
||||
namespace Poco
|
||||
@ -135,6 +136,12 @@ public:
|
||||
static const UUID & x500();
|
||||
/// Returns the namespace identifier for the X500 namespace.
|
||||
|
||||
UInt32 getTimeLow() const { return _timeLow; }
|
||||
UInt16 getTimeMid() const { return _timeMid; }
|
||||
UInt16 getTimeHiAndVersion() const { return _timeHiAndVersion; }
|
||||
UInt16 getClockSeq() const { return _clockSeq; }
|
||||
std::array<UInt8, 6> getNode() const { return std::array<UInt8, 6>{_node[0], _node[1], _node[2], _node[3], _node[4], _node[5]}; }
|
||||
|
||||
protected:
|
||||
UUID(UInt32 timeLow, UInt32 timeMid, UInt32 timeHiAndVersion, UInt16 clockSeq, UInt8 node[]);
|
||||
UUID(const char * bytes, Version version);
|
||||
|
@ -8,7 +8,7 @@
|
||||
// Copyright (c) 2005-2006, Applied Informatics Software Engineering GmbH.
|
||||
// and Contributors.
|
||||
//
|
||||
// SPDX-License-Identifier: BSL-1.0
|
||||
// SPDX-License-Identifier: BSL-1.0
|
||||
//
|
||||
|
||||
|
||||
@ -35,79 +35,91 @@ ErrorHandler::~ErrorHandler()
|
||||
|
||||
void ErrorHandler::exception(const Exception& exc)
|
||||
{
|
||||
poco_debugger_msg(exc.what());
|
||||
poco_debugger_msg(exc.what());
|
||||
}
|
||||
|
||||
|
||||
|
||||
void ErrorHandler::exception(const std::exception& exc)
|
||||
{
|
||||
poco_debugger_msg(exc.what());
|
||||
poco_debugger_msg(exc.what());
|
||||
}
|
||||
|
||||
|
||||
void ErrorHandler::exception()
|
||||
{
|
||||
poco_debugger_msg("unknown exception");
|
||||
poco_debugger_msg("unknown exception");
|
||||
}
|
||||
|
||||
|
||||
void ErrorHandler::handle(const Exception& exc)
|
||||
{
|
||||
FastMutex::ScopedLock lock(_mutex);
|
||||
try
|
||||
{
|
||||
_pHandler->exception(exc);
|
||||
}
|
||||
catch (...)
|
||||
{
|
||||
}
|
||||
FastMutex::ScopedLock lock(_mutex);
|
||||
try
|
||||
{
|
||||
_pHandler->exception(exc);
|
||||
}
|
||||
catch (...)
|
||||
{
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
|
||||
void ErrorHandler::handle(const std::exception& exc)
|
||||
{
|
||||
FastMutex::ScopedLock lock(_mutex);
|
||||
try
|
||||
{
|
||||
_pHandler->exception(exc);
|
||||
}
|
||||
catch (...)
|
||||
{
|
||||
}
|
||||
FastMutex::ScopedLock lock(_mutex);
|
||||
try
|
||||
{
|
||||
_pHandler->exception(exc);
|
||||
}
|
||||
catch (...)
|
||||
{
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
void ErrorHandler::handle()
|
||||
{
|
||||
FastMutex::ScopedLock lock(_mutex);
|
||||
try
|
||||
{
|
||||
_pHandler->exception();
|
||||
}
|
||||
catch (...)
|
||||
{
|
||||
}
|
||||
FastMutex::ScopedLock lock(_mutex);
|
||||
try
|
||||
{
|
||||
_pHandler->exception();
|
||||
}
|
||||
catch (...)
|
||||
{
|
||||
}
|
||||
}
|
||||
|
||||
void ErrorHandler::logMessage(Message::Priority priority, const std::string & msg)
|
||||
{
|
||||
FastMutex::ScopedLock lock(_mutex);
|
||||
try
|
||||
{
|
||||
_pHandler->logMessageImpl(priority, msg);
|
||||
}
|
||||
catch (...)
|
||||
{
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
ErrorHandler* ErrorHandler::set(ErrorHandler* pHandler)
|
||||
{
|
||||
poco_check_ptr(pHandler);
|
||||
poco_check_ptr(pHandler);
|
||||
|
||||
FastMutex::ScopedLock lock(_mutex);
|
||||
ErrorHandler* pOld = _pHandler;
|
||||
_pHandler = pHandler;
|
||||
return pOld;
|
||||
FastMutex::ScopedLock lock(_mutex);
|
||||
ErrorHandler* pOld = _pHandler;
|
||||
_pHandler = pHandler;
|
||||
return pOld;
|
||||
}
|
||||
|
||||
|
||||
ErrorHandler* ErrorHandler::defaultHandler()
|
||||
{
|
||||
// NOTE: Since this is called to initialize the static _pHandler
|
||||
// variable, sh has to be a local static, otherwise we run
|
||||
// into static initialization order issues.
|
||||
static SingletonHolder<ErrorHandler> sh;
|
||||
return sh.get();
|
||||
// NOTE: Since this is called to initialize the static _pHandler
|
||||
// variable, sh has to be a local static, otherwise we run
|
||||
// into static initialization order issues.
|
||||
static SingletonHolder<ErrorHandler> sh;
|
||||
return sh.get();
|
||||
}
|
||||
|
||||
|
||||
|
@ -51,8 +51,8 @@ namespace
|
||||
}
|
||||
if (width != 0) str.width(width);
|
||||
}
|
||||
|
||||
|
||||
|
||||
|
||||
void parsePrec(std::ostream& str, std::string::const_iterator& itFmt, const std::string::const_iterator& endFmt)
|
||||
{
|
||||
if (itFmt != endFmt && *itFmt == '.')
|
||||
@ -67,7 +67,7 @@ namespace
|
||||
if (prec >= 0) str.precision(prec);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
char parseMod(std::string::const_iterator& itFmt, const std::string::const_iterator& endFmt)
|
||||
{
|
||||
char mod = 0;
|
||||
@ -77,13 +77,13 @@ namespace
|
||||
{
|
||||
case 'l':
|
||||
case 'h':
|
||||
case 'L':
|
||||
case 'L':
|
||||
case '?': mod = *itFmt++; break;
|
||||
}
|
||||
}
|
||||
return mod;
|
||||
}
|
||||
|
||||
|
||||
std::size_t parseIndex(std::string::const_iterator& itFmt, const std::string::const_iterator& endFmt)
|
||||
{
|
||||
int index = 0;
|
||||
@ -110,8 +110,8 @@ namespace
|
||||
case 'f': str << std::fixed; break;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
|
||||
|
||||
void writeAnyInt(std::ostream& str, const Any& any)
|
||||
{
|
||||
if (any.type() == typeid(char))
|
||||
@ -201,7 +201,7 @@ namespace
|
||||
str << RefAnyCast<std::string>(*itVal++);
|
||||
break;
|
||||
case 'z':
|
||||
str << AnyCast<std::size_t>(*itVal++);
|
||||
str << AnyCast<std::size_t>(*itVal++);
|
||||
break;
|
||||
case 'I':
|
||||
case 'D':
|
||||
@ -303,7 +303,7 @@ void format(std::string& result, const std::string& fmt, const Any& value)
|
||||
{
|
||||
std::vector<Any> args;
|
||||
args.push_back(value);
|
||||
format(result, fmt, args);
|
||||
formatVector(result, fmt, args);
|
||||
}
|
||||
|
||||
|
||||
@ -312,7 +312,7 @@ void format(std::string& result, const std::string& fmt, const Any& value1, cons
|
||||
std::vector<Any> args;
|
||||
args.push_back(value1);
|
||||
args.push_back(value2);
|
||||
format(result, fmt, args);
|
||||
formatVector(result, fmt, args);
|
||||
}
|
||||
|
||||
|
||||
@ -322,7 +322,7 @@ void format(std::string& result, const std::string& fmt, const Any& value1, cons
|
||||
args.push_back(value1);
|
||||
args.push_back(value2);
|
||||
args.push_back(value3);
|
||||
format(result, fmt, args);
|
||||
formatVector(result, fmt, args);
|
||||
}
|
||||
|
||||
|
||||
@ -333,7 +333,7 @@ void format(std::string& result, const std::string& fmt, const Any& value1, cons
|
||||
args.push_back(value2);
|
||||
args.push_back(value3);
|
||||
args.push_back(value4);
|
||||
format(result, fmt, args);
|
||||
formatVector(result, fmt, args);
|
||||
}
|
||||
|
||||
|
||||
@ -345,7 +345,7 @@ void format(std::string& result, const std::string& fmt, const Any& value1, cons
|
||||
args.push_back(value3);
|
||||
args.push_back(value4);
|
||||
args.push_back(value5);
|
||||
format(result, fmt, args);
|
||||
formatVector(result, fmt, args);
|
||||
}
|
||||
|
||||
|
||||
@ -358,7 +358,7 @@ void format(std::string& result, const std::string& fmt, const Any& value1, cons
|
||||
args.push_back(value4);
|
||||
args.push_back(value5);
|
||||
args.push_back(value6);
|
||||
format(result, fmt, args);
|
||||
formatVector(result, fmt, args);
|
||||
}
|
||||
|
||||
|
||||
@ -372,7 +372,7 @@ void format(std::string& result, const std::string& fmt, const Any& value1, cons
|
||||
args.push_back(value5);
|
||||
args.push_back(value6);
|
||||
args.push_back(value7);
|
||||
format(result, fmt, args);
|
||||
formatVector(result, fmt, args);
|
||||
}
|
||||
|
||||
|
||||
@ -387,7 +387,7 @@ void format(std::string& result, const std::string& fmt, const Any& value1, cons
|
||||
args.push_back(value6);
|
||||
args.push_back(value7);
|
||||
args.push_back(value8);
|
||||
format(result, fmt, args);
|
||||
formatVector(result, fmt, args);
|
||||
}
|
||||
|
||||
|
||||
@ -403,7 +403,7 @@ void format(std::string& result, const std::string& fmt, const Any& value1, cons
|
||||
args.push_back(value7);
|
||||
args.push_back(value8);
|
||||
args.push_back(value9);
|
||||
format(result, fmt, args);
|
||||
formatVector(result, fmt, args);
|
||||
}
|
||||
|
||||
|
||||
@ -420,16 +420,16 @@ void format(std::string& result, const std::string& fmt, const Any& value1, cons
|
||||
args.push_back(value8);
|
||||
args.push_back(value9);
|
||||
args.push_back(value10);
|
||||
format(result, fmt, args);
|
||||
formatVector(result, fmt, args);
|
||||
}
|
||||
|
||||
|
||||
void format(std::string& result, const std::string& fmt, const std::vector<Any>& values)
|
||||
void formatVector(std::string& result, const std::string& fmt, const std::vector<Any>& values)
|
||||
{
|
||||
std::string::const_iterator itFmt = fmt.begin();
|
||||
std::string::const_iterator endFmt = fmt.end();
|
||||
std::vector<Any>::const_iterator itVal = values.begin();
|
||||
std::vector<Any>::const_iterator endVal = values.end();
|
||||
std::vector<Any>::const_iterator endVal = values.end();
|
||||
while (itFmt != endFmt)
|
||||
{
|
||||
switch (*itFmt)
|
||||
|
@ -76,13 +76,13 @@ std::string Binary::toString(int indent) const
|
||||
|
||||
UUID Binary::uuid() const
|
||||
{
|
||||
if (_subtype == 0x04 && _buffer.size() == 16)
|
||||
if ((_subtype == 0x04 || _subtype == 0x03) && _buffer.size() == 16)
|
||||
{
|
||||
UUID uuid;
|
||||
uuid.copyFrom((const char*) _buffer.begin());
|
||||
return uuid;
|
||||
}
|
||||
throw BadCastException("Invalid subtype");
|
||||
throw BadCastException("Invalid subtype: " + std::to_string(_subtype) + ", size: " + std::to_string(_buffer.size()));
|
||||
}
|
||||
|
||||
|
||||
|
@ -57,7 +57,7 @@ std::string ObjectId::toString(const std::string& fmt) const
|
||||
|
||||
for (int i = 0; i < 12; ++i)
|
||||
{
|
||||
s += format(fmt, (unsigned int) _id[i]);
|
||||
s += Poco::format(fmt, (unsigned int) _id[i]);
|
||||
}
|
||||
return s;
|
||||
}
|
||||
|
@ -43,9 +43,9 @@ namespace Poco {
|
||||
namespace MongoDB {
|
||||
|
||||
|
||||
static const std::string keyCursor {"cursor"};
|
||||
static const std::string keyFirstBatch {"firstBatch"};
|
||||
static const std::string keyNextBatch {"nextBatch"};
|
||||
[[ maybe_unused ]] static const std::string keyCursor {"cursor"};
|
||||
[[ maybe_unused ]] static const std::string keyFirstBatch {"firstBatch"};
|
||||
[[ maybe_unused ]] static const std::string keyNextBatch {"nextBatch"};
|
||||
|
||||
static Poco::Int64 cursorIdFromResponse(const MongoDB::Document& doc);
|
||||
|
||||
@ -131,7 +131,7 @@ OpMsgMessage& OpMsgCursor::next(Connection& connection)
|
||||
connection.readResponse(_response);
|
||||
}
|
||||
else
|
||||
#endif
|
||||
#endif
|
||||
{
|
||||
_response.clear();
|
||||
_query.setCursor(_cursorID, _batchSize);
|
||||
|
@ -79,7 +79,7 @@ namespace Net
|
||||
/// Returns the value of the first name-value pair with the given name.
|
||||
/// If no value with the given name has been found, the defaultValue is returned.
|
||||
|
||||
const std::vector<std::reference_wrapper<const std::string>> getAll(const std::string & name) const;
|
||||
std::vector<std::string> getAll(const std::string & name) const;
|
||||
/// Returns all values of all name-value pairs with the given name.
|
||||
///
|
||||
/// Returns an empty vector if there are no name-value pairs with the given name.
|
||||
|
@ -17,9 +17,9 @@
|
||||
#include "Poco/NumberFormatter.h"
|
||||
#include "Poco/NumberParser.h"
|
||||
#include "Poco/String.h"
|
||||
#include <charconv>
|
||||
#include <format>
|
||||
|
||||
|
||||
using Poco::NumberFormatter;
|
||||
using Poco::NumberParser;
|
||||
using Poco::icompare;
|
||||
@ -75,7 +75,7 @@ void HTTPMessage::setContentLength(std::streamsize length)
|
||||
erase(CONTENT_LENGTH);
|
||||
}
|
||||
|
||||
|
||||
|
||||
std::streamsize HTTPMessage::getContentLength() const
|
||||
{
|
||||
const std::string& contentLength = get(CONTENT_LENGTH, EMPTY);
|
||||
@ -98,7 +98,7 @@ void HTTPMessage::setContentLength64(Poco::Int64 length)
|
||||
erase(CONTENT_LENGTH);
|
||||
}
|
||||
|
||||
|
||||
|
||||
Poco::Int64 HTTPMessage::getContentLength64() const
|
||||
{
|
||||
const std::string& contentLength = get(CONTENT_LENGTH, EMPTY);
|
||||
@ -133,13 +133,13 @@ void HTTPMessage::setChunkedTransferEncoding(bool flag)
|
||||
setTransferEncoding(IDENTITY_TRANSFER_ENCODING);
|
||||
}
|
||||
|
||||
|
||||
|
||||
bool HTTPMessage::getChunkedTransferEncoding() const
|
||||
{
|
||||
return icompare(getTransferEncoding(), CHUNKED_TRANSFER_ENCODING) == 0;
|
||||
}
|
||||
|
||||
|
||||
|
||||
void HTTPMessage::setContentType(const std::string& mediaType)
|
||||
{
|
||||
if (mediaType.empty())
|
||||
@ -154,7 +154,7 @@ void HTTPMessage::setContentType(const MediaType& mediaType)
|
||||
setContentType(mediaType.toString());
|
||||
}
|
||||
|
||||
|
||||
|
||||
const std::string& HTTPMessage::getContentType() const
|
||||
{
|
||||
return get(CONTENT_TYPE, UNKNOWN_CONTENT_TYPE);
|
||||
|
@ -102,9 +102,9 @@ const std::string& NameValueCollection::get(const std::string& name, const std::
|
||||
return defaultValue;
|
||||
}
|
||||
|
||||
const std::vector<std::reference_wrapper<const std::string>> NameValueCollection::getAll(const std::string& name) const
|
||||
std::vector<std::string> NameValueCollection::getAll(const std::string& name) const
|
||||
{
|
||||
std::vector<std::reference_wrapper<const std::string>> values;
|
||||
std::vector<std::string> values;
|
||||
for (ConstIterator it = _map.find(name); it != _map.end(); it++)
|
||||
if (it->first == name)
|
||||
values.push_back(it->second);
|
||||
|
@ -17,6 +17,7 @@
|
||||
#include "Poco/Net/StreamSocketImpl.h"
|
||||
#include "Poco/NumberFormatter.h"
|
||||
#include "Poco/Timestamp.h"
|
||||
#include "Poco/ErrorHandler.h"
|
||||
#include <string.h> // FD_SET needs memset on some platforms, so we can't use <cstring>
|
||||
|
||||
|
||||
|
@ -8,7 +8,7 @@
|
||||
// Copyright (c) 2005-2006, Applied Informatics Software Engineering GmbH.
|
||||
// and Contributors.
|
||||
//
|
||||
// SPDX-License-Identifier: BSL-1.0
|
||||
// SPDX-License-Identifier: BSL-1.0
|
||||
//
|
||||
|
||||
|
||||
@ -44,190 +44,194 @@ TCPServerConnectionFilter::~TCPServerConnectionFilter()
|
||||
|
||||
|
||||
TCPServer::TCPServer(TCPServerConnectionFactory::Ptr pFactory, Poco::UInt16 portNumber, TCPServerParams::Ptr pParams):
|
||||
_socket(ServerSocket(portNumber)),
|
||||
_thread(threadName(_socket)),
|
||||
_stopped(true)
|
||||
{
|
||||
Poco::ThreadPool& pool = Poco::ThreadPool::defaultPool();
|
||||
if (pParams)
|
||||
{
|
||||
int toAdd = pParams->getMaxThreads() - pool.capacity();
|
||||
if (toAdd > 0) pool.addCapacity(toAdd);
|
||||
}
|
||||
_pDispatcher = new TCPServerDispatcher(pFactory, pool, pParams);
|
||||
|
||||
_socket(ServerSocket(portNumber)),
|
||||
_thread(threadName(_socket)),
|
||||
_stopped(true)
|
||||
{
|
||||
Poco::ThreadPool& pool = Poco::ThreadPool::defaultPool();
|
||||
if (pParams)
|
||||
{
|
||||
int toAdd = pParams->getMaxThreads() - pool.capacity();
|
||||
if (toAdd > 0) pool.addCapacity(toAdd);
|
||||
}
|
||||
_pDispatcher = new TCPServerDispatcher(pFactory, pool, pParams);
|
||||
|
||||
}
|
||||
|
||||
|
||||
TCPServer::TCPServer(TCPServerConnectionFactory::Ptr pFactory, const ServerSocket& socket, TCPServerParams::Ptr pParams):
|
||||
_socket(socket),
|
||||
_thread(threadName(socket)),
|
||||
_stopped(true)
|
||||
_socket(socket),
|
||||
_thread(threadName(socket)),
|
||||
_stopped(true)
|
||||
{
|
||||
Poco::ThreadPool& pool = Poco::ThreadPool::defaultPool();
|
||||
if (pParams)
|
||||
{
|
||||
int toAdd = pParams->getMaxThreads() - pool.capacity();
|
||||
if (toAdd > 0) pool.addCapacity(toAdd);
|
||||
}
|
||||
_pDispatcher = new TCPServerDispatcher(pFactory, pool, pParams);
|
||||
Poco::ThreadPool& pool = Poco::ThreadPool::defaultPool();
|
||||
if (pParams)
|
||||
{
|
||||
int toAdd = pParams->getMaxThreads() - pool.capacity();
|
||||
if (toAdd > 0) pool.addCapacity(toAdd);
|
||||
}
|
||||
_pDispatcher = new TCPServerDispatcher(pFactory, pool, pParams);
|
||||
}
|
||||
|
||||
|
||||
TCPServer::TCPServer(TCPServerConnectionFactory::Ptr pFactory, Poco::ThreadPool& threadPool, const ServerSocket& socket, TCPServerParams::Ptr pParams):
|
||||
_socket(socket),
|
||||
_pDispatcher(new TCPServerDispatcher(pFactory, threadPool, pParams)),
|
||||
_thread(threadName(socket)),
|
||||
_stopped(true)
|
||||
_socket(socket),
|
||||
_pDispatcher(new TCPServerDispatcher(pFactory, threadPool, pParams)),
|
||||
_thread(threadName(socket)),
|
||||
_stopped(true)
|
||||
{
|
||||
}
|
||||
|
||||
|
||||
TCPServer::~TCPServer()
|
||||
{
|
||||
try
|
||||
{
|
||||
stop();
|
||||
_pDispatcher->release();
|
||||
}
|
||||
catch (...)
|
||||
{
|
||||
poco_unexpected();
|
||||
}
|
||||
try
|
||||
{
|
||||
stop();
|
||||
_pDispatcher->release();
|
||||
}
|
||||
catch (...)
|
||||
{
|
||||
poco_unexpected();
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
const TCPServerParams& TCPServer::params() const
|
||||
{
|
||||
return _pDispatcher->params();
|
||||
return _pDispatcher->params();
|
||||
}
|
||||
|
||||
|
||||
void TCPServer::start()
|
||||
{
|
||||
poco_assert (_stopped);
|
||||
poco_assert (_stopped);
|
||||
|
||||
_stopped = false;
|
||||
_thread.start(*this);
|
||||
_stopped = false;
|
||||
_thread.start(*this);
|
||||
}
|
||||
|
||||
|
||||
|
||||
void TCPServer::stop()
|
||||
{
|
||||
if (!_stopped)
|
||||
{
|
||||
_stopped = true;
|
||||
_thread.join();
|
||||
_pDispatcher->stop();
|
||||
}
|
||||
if (!_stopped)
|
||||
{
|
||||
_stopped = true;
|
||||
_thread.join();
|
||||
_pDispatcher->stop();
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
void TCPServer::run()
|
||||
{
|
||||
while (!_stopped)
|
||||
{
|
||||
Poco::Timespan timeout(250000);
|
||||
try
|
||||
{
|
||||
if (_socket.poll(timeout, Socket::SELECT_READ))
|
||||
{
|
||||
try
|
||||
{
|
||||
StreamSocket ss = _socket.acceptConnection();
|
||||
|
||||
if (!_pConnectionFilter || _pConnectionFilter->accept(ss))
|
||||
{
|
||||
// enable nodelay per default: OSX really needs that
|
||||
while (!_stopped)
|
||||
{
|
||||
Poco::Timespan timeout(250000);
|
||||
try
|
||||
{
|
||||
if (_socket.poll(timeout, Socket::SELECT_READ))
|
||||
{
|
||||
try
|
||||
{
|
||||
StreamSocket ss = _socket.acceptConnection();
|
||||
|
||||
if (!_pConnectionFilter || _pConnectionFilter->accept(ss))
|
||||
{
|
||||
// enable nodelay per default: OSX really needs that
|
||||
#if defined(POCO_OS_FAMILY_UNIX)
|
||||
if (ss.address().family() != AddressFamily::UNIX_LOCAL)
|
||||
if (ss.address().family() != AddressFamily::UNIX_LOCAL)
|
||||
#endif
|
||||
{
|
||||
ss.setNoDelay(true);
|
||||
}
|
||||
_pDispatcher->enqueue(ss);
|
||||
}
|
||||
}
|
||||
catch (Poco::Exception& exc)
|
||||
{
|
||||
ErrorHandler::handle(exc);
|
||||
}
|
||||
catch (std::exception& exc)
|
||||
{
|
||||
ErrorHandler::handle(exc);
|
||||
}
|
||||
catch (...)
|
||||
{
|
||||
ErrorHandler::handle();
|
||||
}
|
||||
}
|
||||
}
|
||||
catch (Poco::Exception& exc)
|
||||
{
|
||||
ErrorHandler::handle(exc);
|
||||
// possibly a resource issue since poll() failed;
|
||||
// give some time to recover before trying again
|
||||
Poco::Thread::sleep(50);
|
||||
}
|
||||
}
|
||||
{
|
||||
ss.setNoDelay(true);
|
||||
}
|
||||
_pDispatcher->enqueue(ss);
|
||||
}
|
||||
else
|
||||
{
|
||||
ErrorHandler::logMessage(Message::PRIO_WARNING, "Filtered out connection from " + ss.peerAddress().toString());
|
||||
}
|
||||
}
|
||||
catch (Poco::Exception& exc)
|
||||
{
|
||||
ErrorHandler::handle(exc);
|
||||
}
|
||||
catch (std::exception& exc)
|
||||
{
|
||||
ErrorHandler::handle(exc);
|
||||
}
|
||||
catch (...)
|
||||
{
|
||||
ErrorHandler::handle();
|
||||
}
|
||||
}
|
||||
}
|
||||
catch (Poco::Exception& exc)
|
||||
{
|
||||
ErrorHandler::handle(exc);
|
||||
// possibly a resource issue since poll() failed;
|
||||
// give some time to recover before trying again
|
||||
Poco::Thread::sleep(50);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
int TCPServer::currentThreads() const
|
||||
{
|
||||
return _pDispatcher->currentThreads();
|
||||
return _pDispatcher->currentThreads();
|
||||
}
|
||||
|
||||
|
||||
int TCPServer::maxThreads() const
|
||||
{
|
||||
return _pDispatcher->maxThreads();
|
||||
return _pDispatcher->maxThreads();
|
||||
}
|
||||
|
||||
|
||||
|
||||
int TCPServer::totalConnections() const
|
||||
{
|
||||
return _pDispatcher->totalConnections();
|
||||
return _pDispatcher->totalConnections();
|
||||
}
|
||||
|
||||
|
||||
int TCPServer::currentConnections() const
|
||||
{
|
||||
return _pDispatcher->currentConnections();
|
||||
return _pDispatcher->currentConnections();
|
||||
}
|
||||
|
||||
|
||||
int TCPServer::maxConcurrentConnections() const
|
||||
{
|
||||
return _pDispatcher->maxConcurrentConnections();
|
||||
return _pDispatcher->maxConcurrentConnections();
|
||||
}
|
||||
|
||||
|
||||
|
||||
int TCPServer::queuedConnections() const
|
||||
{
|
||||
return _pDispatcher->queuedConnections();
|
||||
return _pDispatcher->queuedConnections();
|
||||
}
|
||||
|
||||
|
||||
int TCPServer::refusedConnections() const
|
||||
{
|
||||
return _pDispatcher->refusedConnections();
|
||||
return _pDispatcher->refusedConnections();
|
||||
}
|
||||
|
||||
|
||||
void TCPServer::setConnectionFilter(const TCPServerConnectionFilter::Ptr& pConnectionFilter)
|
||||
{
|
||||
poco_assert (_stopped);
|
||||
poco_assert (_stopped);
|
||||
|
||||
_pConnectionFilter = pConnectionFilter;
|
||||
_pConnectionFilter = pConnectionFilter;
|
||||
}
|
||||
|
||||
|
||||
std::string TCPServer::threadName(const ServerSocket& socket)
|
||||
{
|
||||
std::string name("TCPServer: ");
|
||||
name.append(socket.address().toString());
|
||||
return name;
|
||||
std::string name("TCPServer: ");
|
||||
name.append(socket.address().toString());
|
||||
return name;
|
||||
|
||||
}
|
||||
|
||||
|
@ -8,7 +8,7 @@
|
||||
// Copyright (c) 2005-2007, Applied Informatics Software Engineering GmbH.
|
||||
// and Contributors.
|
||||
//
|
||||
// SPDX-License-Identifier: BSL-1.0
|
||||
// SPDX-License-Identifier: BSL-1.0
|
||||
//
|
||||
|
||||
|
||||
@ -33,44 +33,44 @@ namespace Net {
|
||||
class TCPConnectionNotification: public Notification
|
||||
{
|
||||
public:
|
||||
TCPConnectionNotification(const StreamSocket& socket):
|
||||
_socket(socket)
|
||||
{
|
||||
}
|
||||
|
||||
~TCPConnectionNotification()
|
||||
{
|
||||
}
|
||||
|
||||
const StreamSocket& socket() const
|
||||
{
|
||||
return _socket;
|
||||
}
|
||||
TCPConnectionNotification(const StreamSocket& socket):
|
||||
_socket(socket)
|
||||
{
|
||||
}
|
||||
|
||||
~TCPConnectionNotification()
|
||||
{
|
||||
}
|
||||
|
||||
const StreamSocket& socket() const
|
||||
{
|
||||
return _socket;
|
||||
}
|
||||
|
||||
private:
|
||||
StreamSocket _socket;
|
||||
StreamSocket _socket;
|
||||
};
|
||||
|
||||
|
||||
TCPServerDispatcher::TCPServerDispatcher(TCPServerConnectionFactory::Ptr pFactory, Poco::ThreadPool& threadPool, TCPServerParams::Ptr pParams):
|
||||
_rc(1),
|
||||
_pParams(pParams),
|
||||
_currentThreads(0),
|
||||
_totalConnections(0),
|
||||
_currentConnections(0),
|
||||
_maxConcurrentConnections(0),
|
||||
_refusedConnections(0),
|
||||
_stopped(false),
|
||||
_pConnectionFactory(pFactory),
|
||||
_threadPool(threadPool)
|
||||
_rc(1),
|
||||
_pParams(pParams),
|
||||
_currentThreads(0),
|
||||
_totalConnections(0),
|
||||
_currentConnections(0),
|
||||
_maxConcurrentConnections(0),
|
||||
_refusedConnections(0),
|
||||
_stopped(false),
|
||||
_pConnectionFactory(pFactory),
|
||||
_threadPool(threadPool)
|
||||
{
|
||||
poco_check_ptr (pFactory);
|
||||
poco_check_ptr (pFactory);
|
||||
|
||||
if (!_pParams)
|
||||
_pParams = new TCPServerParams;
|
||||
|
||||
if (_pParams->getMaxThreads() == 0)
|
||||
_pParams->setMaxThreads(threadPool.capacity());
|
||||
if (!_pParams)
|
||||
_pParams = new TCPServerParams;
|
||||
|
||||
if (_pParams->getMaxThreads() == 0)
|
||||
_pParams->setMaxThreads(threadPool.capacity());
|
||||
}
|
||||
|
||||
|
||||
@ -81,161 +81,184 @@ TCPServerDispatcher::~TCPServerDispatcher()
|
||||
|
||||
void TCPServerDispatcher::duplicate()
|
||||
{
|
||||
++_rc;
|
||||
++_rc;
|
||||
}
|
||||
|
||||
|
||||
void TCPServerDispatcher::release()
|
||||
{
|
||||
if (--_rc == 0) delete this;
|
||||
if (--_rc == 0) delete this;
|
||||
}
|
||||
|
||||
|
||||
void TCPServerDispatcher::run()
|
||||
{
|
||||
AutoPtr<TCPServerDispatcher> guard(this); // ensure object stays alive
|
||||
AutoPtr<TCPServerDispatcher> guard(this); // ensure object stays alive
|
||||
|
||||
int idleTime = (int) _pParams->getThreadIdleTime().totalMilliseconds();
|
||||
int idleTime = (int) _pParams->getThreadIdleTime().totalMilliseconds();
|
||||
|
||||
for (;;)
|
||||
{
|
||||
try
|
||||
{
|
||||
AutoPtr<Notification> pNf = _queue.waitDequeueNotification(idleTime);
|
||||
if (pNf && !_stopped)
|
||||
{
|
||||
TCPConnectionNotification* pCNf = dynamic_cast<TCPConnectionNotification*>(pNf.get());
|
||||
if (pCNf)
|
||||
{
|
||||
beginConnection();
|
||||
if (!_stopped)
|
||||
{
|
||||
std::unique_ptr<TCPServerConnection> pConnection(_pConnectionFactory->createConnection(pCNf->socket()));
|
||||
poco_check_ptr(pConnection.get());
|
||||
pConnection->start();
|
||||
}
|
||||
/// endConnection() should be called after destroying TCPServerConnection,
|
||||
/// otherwise currentConnections() could become zero while some connections are yet still alive.
|
||||
endConnection();
|
||||
}
|
||||
}
|
||||
}
|
||||
catch (Poco::Exception &exc) { ErrorHandler::handle(exc); }
|
||||
catch (std::exception &exc) { ErrorHandler::handle(exc); }
|
||||
catch (...) { ErrorHandler::handle(); }
|
||||
FastMutex::ScopedLock lock(_mutex);
|
||||
if (_stopped || (_currentThreads > 1 && _queue.empty()))
|
||||
{
|
||||
--_currentThreads;
|
||||
break;
|
||||
}
|
||||
}
|
||||
for (;;)
|
||||
{
|
||||
try
|
||||
{
|
||||
AutoPtr<Notification> pNf = _queue.waitDequeueNotification(idleTime);
|
||||
if (pNf && !_stopped)
|
||||
{
|
||||
TCPConnectionNotification* pCNf = dynamic_cast<TCPConnectionNotification*>(pNf.get());
|
||||
if (pCNf)
|
||||
{
|
||||
beginConnection();
|
||||
if (!_stopped)
|
||||
{
|
||||
std::unique_ptr<TCPServerConnection> pConnection(_pConnectionFactory->createConnection(pCNf->socket()));
|
||||
poco_check_ptr(pConnection.get());
|
||||
pConnection->start();
|
||||
}
|
||||
/// endConnection() should be called after destroying TCPServerConnection,
|
||||
/// otherwise currentConnections() could become zero while some connections are yet still alive.
|
||||
endConnection();
|
||||
}
|
||||
}
|
||||
}
|
||||
catch (Poco::Exception &exc) { ErrorHandler::handle(exc); }
|
||||
catch (std::exception &exc) { ErrorHandler::handle(exc); }
|
||||
catch (...) { ErrorHandler::handle(); }
|
||||
FastMutex::ScopedLock lock(_mutex);
|
||||
if (_stopped || (_currentThreads > 1 && _queue.empty()))
|
||||
{
|
||||
--_currentThreads;
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
namespace
|
||||
{
|
||||
static const std::string threadName("TCPServerConnection");
|
||||
static const std::string threadName("TCPServerConnection");
|
||||
}
|
||||
|
||||
|
||||
|
||||
void TCPServerDispatcher::enqueue(const StreamSocket& socket)
|
||||
{
|
||||
FastMutex::ScopedLock lock(_mutex);
|
||||
FastMutex::ScopedLock lock(_mutex);
|
||||
|
||||
if (_queue.size() < _pParams->getMaxQueued())
|
||||
{
|
||||
if (!_queue.hasIdleThreads() && _currentThreads < _pParams->getMaxThreads())
|
||||
{
|
||||
try
|
||||
{
|
||||
ErrorHandler::logMessage(Message::PRIO_TEST, "Queue size: " + std::to_string(_queue.size()) +
|
||||
", current threads: " + std::to_string(_currentThreads) +
|
||||
", threads in pool: " + std::to_string(_threadPool.allocated()) +
|
||||
", current connections: " + std::to_string(_currentConnections));
|
||||
|
||||
|
||||
if (_queue.size() < _pParams->getMaxQueued())
|
||||
{
|
||||
/// NOTE: the condition below is wrong.
|
||||
/// Since the thread pool is shared between multiple servers/TCPServerDispatchers,
|
||||
/// _currentThreads < _pParams->getMaxThreads() will be true when the pool is actually saturated.
|
||||
/// As a result, queue is useless and connections never wait in queue.
|
||||
/// Instead, we (mistakenly) think that we can create a thread for this connection, but we fail to create it
|
||||
/// and the connection get rejected.
|
||||
/// We could check _currentThreads < _threadPool.allocated() to make it work,
|
||||
/// but it's not clear if we want to make it work
|
||||
/// because it may be better to reject connection immediately if we don't have resources to handle it.
|
||||
if (!_queue.hasIdleThreads() && _currentThreads < _pParams->getMaxThreads())
|
||||
{
|
||||
try
|
||||
{
|
||||
this->duplicate();
|
||||
_threadPool.startWithPriority(_pParams->getThreadPriority(), *this, threadName);
|
||||
++_currentThreads;
|
||||
}
|
||||
catch (Poco::Exception& exc)
|
||||
{
|
||||
_threadPool.startWithPriority(_pParams->getThreadPriority(), *this, threadName);
|
||||
++_currentThreads;
|
||||
}
|
||||
catch (Poco::Exception& exc)
|
||||
{
|
||||
ErrorHandler::logMessage(Message::PRIO_WARNING, "Got an exception while starting thread for connection from " +
|
||||
socket.peerAddress().toString());
|
||||
ErrorHandler::handle(exc);
|
||||
this->release();
|
||||
++_refusedConnections;
|
||||
std::cerr << "Got exception while starting thread for connection. Error code: "
|
||||
<< exc.code() << ", message: '" << exc.displayText() << "'" << std::endl;
|
||||
return;
|
||||
}
|
||||
}
|
||||
_queue.enqueueNotification(new TCPConnectionNotification(socket));
|
||||
}
|
||||
else
|
||||
{
|
||||
++_refusedConnections;
|
||||
}
|
||||
++_refusedConnections;
|
||||
return;
|
||||
}
|
||||
}
|
||||
else if (!_queue.hasIdleThreads())
|
||||
{
|
||||
ErrorHandler::logMessage(Message::PRIO_TRACE, "Don't have idle threads, adding connection from " +
|
||||
socket.peerAddress().toString() + " to the queue, size: " + std::to_string(_queue.size()));
|
||||
}
|
||||
_queue.enqueueNotification(new TCPConnectionNotification(socket));
|
||||
}
|
||||
else
|
||||
{
|
||||
ErrorHandler::logMessage(Message::PRIO_WARNING, "Refusing connection from " + socket.peerAddress().toString() +
|
||||
", reached max queue size " + std::to_string(_pParams->getMaxQueued()));
|
||||
++_refusedConnections;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
void TCPServerDispatcher::stop()
|
||||
{
|
||||
_stopped = true;
|
||||
_queue.clear();
|
||||
_queue.wakeUpAll();
|
||||
_stopped = true;
|
||||
_queue.clear();
|
||||
_queue.wakeUpAll();
|
||||
}
|
||||
|
||||
|
||||
int TCPServerDispatcher::currentThreads() const
|
||||
{
|
||||
return _currentThreads;
|
||||
return _currentThreads;
|
||||
}
|
||||
|
||||
int TCPServerDispatcher::maxThreads() const
|
||||
{
|
||||
FastMutex::ScopedLock lock(_mutex);
|
||||
|
||||
return _threadPool.capacity();
|
||||
FastMutex::ScopedLock lock(_mutex);
|
||||
|
||||
return _threadPool.capacity();
|
||||
}
|
||||
|
||||
|
||||
int TCPServerDispatcher::totalConnections() const
|
||||
{
|
||||
return _totalConnections;
|
||||
return _totalConnections;
|
||||
}
|
||||
|
||||
|
||||
int TCPServerDispatcher::currentConnections() const
|
||||
{
|
||||
return _currentConnections;
|
||||
return _currentConnections;
|
||||
}
|
||||
|
||||
|
||||
int TCPServerDispatcher::maxConcurrentConnections() const
|
||||
{
|
||||
return _maxConcurrentConnections;
|
||||
return _maxConcurrentConnections;
|
||||
}
|
||||
|
||||
|
||||
int TCPServerDispatcher::queuedConnections() const
|
||||
{
|
||||
return _queue.size();
|
||||
return _queue.size();
|
||||
}
|
||||
|
||||
|
||||
int TCPServerDispatcher::refusedConnections() const
|
||||
{
|
||||
return _refusedConnections;
|
||||
return _refusedConnections;
|
||||
}
|
||||
|
||||
|
||||
void TCPServerDispatcher::beginConnection()
|
||||
{
|
||||
FastMutex::ScopedLock lock(_mutex);
|
||||
FastMutex::ScopedLock lock(_mutex);
|
||||
|
||||
++_totalConnections;
|
||||
++_currentConnections;
|
||||
if (_currentConnections > _maxConcurrentConnections)
|
||||
_maxConcurrentConnections.store(_currentConnections);
|
||||
++_totalConnections;
|
||||
++_currentConnections;
|
||||
if (_currentConnections > _maxConcurrentConnections)
|
||||
_maxConcurrentConnections.store(_currentConnections);
|
||||
}
|
||||
|
||||
|
||||
void TCPServerDispatcher::endConnection()
|
||||
{
|
||||
--_currentConnections;
|
||||
--_currentConnections;
|
||||
}
|
||||
|
||||
|
||||
|
@ -261,6 +261,11 @@ namespace Util
|
||||
///
|
||||
/// Throws a NullPointerException if no Application instance exists.
|
||||
|
||||
static Application * instanceRawPtr();
|
||||
/// Returns a raw pointer to the Application singleton.
|
||||
///
|
||||
/// The caller should check whether the result is nullptr.
|
||||
|
||||
const Poco::Timestamp & startTime() const;
|
||||
/// Returns the application start time (UTC).
|
||||
|
||||
@ -448,6 +453,12 @@ namespace Util
|
||||
}
|
||||
|
||||
|
||||
inline Application * Application::instanceRawPtr()
|
||||
{
|
||||
return _pInstance;
|
||||
}
|
||||
|
||||
|
||||
inline const Poco::Timestamp & Application::startTime() const
|
||||
{
|
||||
return _startTime;
|
||||
|
@ -2,11 +2,11 @@
|
||||
|
||||
# NOTE: VERSION_REVISION has nothing common with DBMS_TCP_PROTOCOL_VERSION,
|
||||
# only DBMS_TCP_PROTOCOL_VERSION should be incremented on protocol changes.
|
||||
SET(VERSION_REVISION 54488)
|
||||
SET(VERSION_REVISION 54489)
|
||||
SET(VERSION_MAJOR 24)
|
||||
SET(VERSION_MINOR 7)
|
||||
SET(VERSION_MINOR 8)
|
||||
SET(VERSION_PATCH 1)
|
||||
SET(VERSION_GITHASH aa023477a9265e403982fca5ee29a714db5133d9)
|
||||
SET(VERSION_DESCRIBE v24.7.1.1-testing)
|
||||
SET(VERSION_STRING 24.7.1.1)
|
||||
SET(VERSION_GITHASH 3f8b27d7accd2b5ec4afe7d0dd459115323304af)
|
||||
SET(VERSION_DESCRIBE v24.8.1.1-testing)
|
||||
SET(VERSION_STRING 24.8.1.1)
|
||||
# end of autochange
|
||||
|
@ -42,9 +42,19 @@ endif ()
|
||||
# But use 2 parallel jobs, since:
|
||||
# - this is what llvm does
|
||||
# - and I've verfied that lld-11 does not use all available CPU time (in peak) while linking one binary
|
||||
if (CMAKE_BUILD_TYPE_UC STREQUAL "RELWITHDEBINFO" AND ENABLE_THINLTO AND PARALLEL_LINK_JOBS GREATER 2)
|
||||
message(STATUS "ThinLTO provides its own parallel linking - limiting parallel link jobs to 2.")
|
||||
set (PARALLEL_LINK_JOBS 2)
|
||||
if (CMAKE_BUILD_TYPE_UC STREQUAL "RELWITHDEBINFO" AND ENABLE_THINLTO)
|
||||
if (ARCH_AARCH64)
|
||||
# aarch64 builds start to often fail with OOMs (reason not yet clear), for now let's limit the concurrency
|
||||
message(STATUS "ThinLTO provides its own parallel linking - limiting parallel link jobs to 1.")
|
||||
set (PARALLEL_LINK_JOBS 1)
|
||||
if (LINKER_NAME MATCHES "lld")
|
||||
math(EXPR LTO_JOBS ${NUMBER_OF_LOGICAL_CORES}/4)
|
||||
set (CMAKE_EXE_LINKER_FLAGS_RELWITHDEBINFO "${CMAKE_EXE_LINKER_FLAGS_RELWITHDEBINFO} -Wl,--thinlto-jobs=${LTO_JOBS}")
|
||||
endif()
|
||||
elseif (PARALLEL_LINK_JOBS GREATER 2)
|
||||
message(STATUS "ThinLTO provides its own parallel linking - limiting parallel link jobs to 2.")
|
||||
set (PARALLEL_LINK_JOBS 2)
|
||||
endif ()
|
||||
endif()
|
||||
|
||||
message(STATUS "Building sub-tree with ${PARALLEL_COMPILE_JOBS} compile jobs and ${PARALLEL_LINK_JOBS} linker jobs (system: ${NUMBER_OF_LOGICAL_CORES} cores, ${TOTAL_PHYSICAL_MEMORY} MB RAM, 'OFF' means the native core count).")
|
||||
|
@ -57,7 +57,8 @@ option(WITH_COVERAGE "Instrumentation for code coverage with default implementat
|
||||
|
||||
if (WITH_COVERAGE)
|
||||
message (STATUS "Enabled instrumentation for code coverage")
|
||||
set(COVERAGE_FLAGS "-fprofile-instr-generate -fcoverage-mapping")
|
||||
set(COVERAGE_FLAGS "SHELL:-fprofile-instr-generate -fcoverage-mapping")
|
||||
set(CMAKE_EXE_LINKER_FLAGS "${CMAKE_EXE_LINKER_FLAGS} -fprofile-instr-generate -fcoverage-mapping")
|
||||
endif()
|
||||
|
||||
option (SANITIZE_COVERAGE "Instrumentation for code coverage with custom callbacks" OFF)
|
||||
|
@ -84,5 +84,5 @@ if (CMAKE_CROSSCOMPILING)
|
||||
message (FATAL_ERROR "Trying to cross-compile to unsupported system: ${CMAKE_SYSTEM_NAME}!")
|
||||
endif ()
|
||||
|
||||
message (STATUS "Cross-compiling for target: ${CMAKE_CXX_COMPILE_TARGET}")
|
||||
message (STATUS "Cross-compiling for target: ${CMAKE_CXX_COMPILER_TARGET}")
|
||||
endif ()
|
||||
|
8
contrib/CMakeLists.txt
vendored
8
contrib/CMakeLists.txt
vendored
@ -71,7 +71,6 @@ add_contrib (zlib-ng-cmake zlib-ng)
|
||||
add_contrib (bzip2-cmake bzip2)
|
||||
add_contrib (minizip-ng-cmake minizip-ng)
|
||||
add_contrib (snappy-cmake snappy)
|
||||
add_contrib (rocksdb-cmake rocksdb)
|
||||
add_contrib (thrift-cmake thrift)
|
||||
# parquet/arrow/orc
|
||||
add_contrib (arrow-cmake arrow) # requires: snappy, thrift, double-conversion
|
||||
@ -148,6 +147,7 @@ add_contrib (hive-metastore-cmake hive-metastore) # requires: thrift, avro, arro
|
||||
add_contrib (cppkafka-cmake cppkafka)
|
||||
add_contrib (libpqxx-cmake libpqxx)
|
||||
add_contrib (libpq-cmake libpq)
|
||||
add_contrib (rocksdb-cmake rocksdb) # requires: jemalloc, snappy, zlib, lz4, zstd, liburing
|
||||
add_contrib (nuraft-cmake NuRaft)
|
||||
add_contrib (fast_float-cmake fast_float)
|
||||
add_contrib (idna-cmake idna)
|
||||
@ -179,7 +179,7 @@ else()
|
||||
message(STATUS "Not using QPL")
|
||||
endif ()
|
||||
|
||||
if (OS_LINUX AND ARCH_AMD64)
|
||||
if (OS_LINUX AND ARCH_AMD64 AND NOT NO_SSE3_OR_HIGHER)
|
||||
option (ENABLE_QATLIB "Enable Intel® QuickAssist Technology Library (QATlib)" ${ENABLE_LIBRARIES})
|
||||
elseif(ENABLE_QATLIB)
|
||||
message (${RECONFIGURE_MESSAGE_LEVEL} "QATLib is only supported on x86_64")
|
||||
@ -228,6 +228,10 @@ add_contrib (ulid-c-cmake ulid-c)
|
||||
|
||||
add_contrib (libssh-cmake libssh)
|
||||
|
||||
add_contrib (prometheus-protobufs-cmake prometheus-protobufs prometheus-protobufs-gogo)
|
||||
|
||||
add_contrib(numactl-cmake numactl)
|
||||
|
||||
# Put all targets defined here and in subdirectories under "contrib/<immediate-subdir>" folders in GUI-based IDEs.
|
||||
# Some of third-party projects may override CMAKE_FOLDER or FOLDER property of their targets, so they would not appear
|
||||
# in "contrib/..." as originally planned, so we workaround this by fixing FOLDER properties of all targets manually,
|
||||
|
2
contrib/NuRaft
vendored
2
contrib/NuRaft
vendored
@ -1 +1 @@
|
||||
Subproject commit cb5dc3c906e80f253e9ce9535807caef827cc2e0
|
||||
Subproject commit c2b0811f164a7948208489562dab4f186eb305ce
|
@ -27,7 +27,7 @@ if (ENABLE_QAT_OUT_OF_TREE_BUILD)
|
||||
${QAT_AL_INCLUDE_DIR}
|
||||
${QAT_USDM_INCLUDE_DIR}
|
||||
${ZSTD_LIBRARY_DIR})
|
||||
target_compile_definitions(_qatzstd_plugin PRIVATE -DDEBUGLEVEL=0 PUBLIC -DENABLE_ZSTD_QAT_CODEC)
|
||||
target_compile_definitions(_qatzstd_plugin PRIVATE -DDEBUGLEVEL=0)
|
||||
add_library (ch_contrib::qatzstd_plugin ALIAS _qatzstd_plugin)
|
||||
else () # In-tree build
|
||||
message(STATUS "Intel QATZSTD in-tree build")
|
||||
@ -78,7 +78,7 @@ else () # In-tree build
|
||||
${QAT_USDM_INCLUDE_DIR}
|
||||
${ZSTD_LIBRARY_DIR}
|
||||
${LIBQAT_HEADER_DIR})
|
||||
target_compile_definitions(_qatzstd_plugin PRIVATE -DDEBUGLEVEL=0 PUBLIC -DENABLE_ZSTD_QAT_CODEC -DINTREE)
|
||||
target_compile_definitions(_qatzstd_plugin PRIVATE -DDEBUGLEVEL=0 PUBLIC -DINTREE)
|
||||
target_include_directories(_qatzstd_plugin SYSTEM PUBLIC $<BUILD_INTERFACE:${QATZSTD_SRC_DIR}> $<INSTALL_INTERFACE:include>)
|
||||
add_library (ch_contrib::qatzstd_plugin ALIAS _qatzstd_plugin)
|
||||
endif ()
|
||||
|
2
contrib/avro
vendored
2
contrib/avro
vendored
@ -1 +1 @@
|
||||
Subproject commit d43acc84d3d455b016f847d6666fbc3cd27f16a9
|
||||
Subproject commit 545e7002683cbc2198164d93088ac8e4955b4628
|
@ -125,7 +125,7 @@ configure_file("${AWS_SDK_CORE_DIR}/include/aws/core/SDKConfig.h.in"
|
||||
"${CMAKE_CURRENT_BINARY_DIR}/include/aws/core/SDKConfig.h" @ONLY)
|
||||
|
||||
aws_get_version(AWS_CRT_CPP_VERSION_MAJOR AWS_CRT_CPP_VERSION_MINOR AWS_CRT_CPP_VERSION_PATCH FULL_VERSION GIT_HASH)
|
||||
configure_file("${AWS_CRT_DIR}/include/aws/crt/Config.h.in" "${AWS_CRT_DIR}/include/aws/crt/Config.h" @ONLY)
|
||||
configure_file("${AWS_CRT_DIR}/include/aws/crt/Config.h.in" "${CMAKE_CURRENT_BINARY_DIR}/include/aws/crt/Config.h" @ONLY)
|
||||
|
||||
list(APPEND AWS_SOURCES ${AWS_SDK_CORE_SRC} ${AWS_SDK_CORE_NET_SRC} ${AWS_SDK_CORE_PLATFORM_SRC})
|
||||
|
||||
|
2
contrib/azure
vendored
2
contrib/azure
vendored
@ -1 +1 @@
|
||||
Subproject commit 6262a76ef4c4c330c84e58dd4f6f13f4e6230fcd
|
||||
Subproject commit 67272b7ee0adff6b69921b26eb071ba1a353062c
|
@ -9,6 +9,7 @@ set(DATASKETCHES_LIBRARY theta)
|
||||
add_library(_datasketches INTERFACE)
|
||||
target_include_directories(_datasketches SYSTEM BEFORE INTERFACE
|
||||
"${ClickHouse_SOURCE_DIR}/contrib/datasketches-cpp/common/include"
|
||||
"${ClickHouse_SOURCE_DIR}/contrib/datasketches-cpp/count/include"
|
||||
"${ClickHouse_SOURCE_DIR}/contrib/datasketches-cpp/theta/include")
|
||||
|
||||
add_library(ch_contrib::datasketches ALIAS _datasketches)
|
||||
|
@ -157,15 +157,13 @@ function(protobuf_generate)
|
||||
|
||||
set(_generated_srcs_all)
|
||||
foreach(_proto ${protobuf_generate_PROTOS})
|
||||
get_filename_component(_abs_file ${_proto} ABSOLUTE)
|
||||
get_filename_component(_abs_dir ${_abs_file} DIRECTORY)
|
||||
get_filename_component(_basename ${_proto} NAME_WE)
|
||||
file(RELATIVE_PATH _rel_dir ${CMAKE_CURRENT_SOURCE_DIR} ${_abs_dir})
|
||||
|
||||
set(_possible_rel_dir)
|
||||
if (NOT protobuf_generate_APPEND_PATH)
|
||||
set(_possible_rel_dir ${_rel_dir}/)
|
||||
endif()
|
||||
# The protobuf compiler doesn't return paths to the files it generates so we have to calculate those paths here:
|
||||
# _abs_file - absolute path to a .proto file,
|
||||
# _possible_rel_dir - relative path to the .proto file from some import directory specified in Protobuf_IMPORT_DIRS,
|
||||
# _basename - filename of the .proto file (without path and without extenstion).
|
||||
get_proto_absolute_path(_abs_file "${_proto}" ${_protobuf_include_path})
|
||||
get_proto_relative_path(_possible_rel_dir "${_abs_file}" ${_protobuf_include_path})
|
||||
get_filename_component(_basename "${_abs_file}" NAME_WE)
|
||||
|
||||
set(_generated_srcs)
|
||||
foreach(_ext ${protobuf_generate_GENERATE_EXTENSIONS})
|
||||
@ -173,7 +171,7 @@ function(protobuf_generate)
|
||||
endforeach()
|
||||
|
||||
if(protobuf_generate_DESCRIPTORS AND protobuf_generate_LANGUAGE STREQUAL cpp)
|
||||
set(_descriptor_file "${CMAKE_CURRENT_BINARY_DIR}/${_basename}.desc")
|
||||
set(_descriptor_file "${protobuf_generate_PROTOC_OUT_DIR}/${_possible_rel_dir}${_basename}.desc")
|
||||
set(_dll_desc_out "--descriptor_set_out=${_descriptor_file}")
|
||||
list(APPEND _generated_srcs ${_descriptor_file})
|
||||
endif()
|
||||
@ -196,3 +194,36 @@ function(protobuf_generate)
|
||||
target_sources(${protobuf_generate_TARGET} PRIVATE ${_generated_srcs_all})
|
||||
endif()
|
||||
endfunction()
|
||||
|
||||
# Calculates the absolute path to a .proto file.
|
||||
function(get_proto_absolute_path result proto)
|
||||
cmake_path(IS_ABSOLUTE proto _is_abs_path)
|
||||
if(_is_abs_path)
|
||||
set(${result} "${proto}" PARENT_SCOPE)
|
||||
return()
|
||||
endif()
|
||||
foreach(_include_dir ${ARGN})
|
||||
if(EXISTS "${_include_dir}/${proto}")
|
||||
set(${result} "${_include_dir}/${proto}" PARENT_SCOPE)
|
||||
return()
|
||||
endif()
|
||||
endforeach()
|
||||
message(SEND_ERROR "Not found protobuf ${proto} in Protobuf_IMPORT_DIRS: ${ARGN}")
|
||||
endfunction()
|
||||
|
||||
# Calculates a relative path to a .proto file. The returned path is relative to one of include directories.
|
||||
function(get_proto_relative_path result abs_path)
|
||||
set(${result} "" PARENT_SCOPE)
|
||||
get_filename_component(_abs_dir "${abs_path}" DIRECTORY)
|
||||
foreach(_include_dir ${ARGN})
|
||||
cmake_path(IS_PREFIX _include_dir "${_abs_dir}" _is_prefix)
|
||||
if(_is_prefix)
|
||||
file(RELATIVE_PATH _rel_dir "${_include_dir}" "${_abs_dir}")
|
||||
if(NOT _rel_dir STREQUAL "")
|
||||
set(${result} "${_rel_dir}/" PARENT_SCOPE)
|
||||
endif()
|
||||
return()
|
||||
endif()
|
||||
endforeach()
|
||||
message(WARNING "Not found protobuf ${abs_path} in Protobuf_IMPORT_DIRS: ${ARGN}")
|
||||
endfunction()
|
||||
|
2
contrib/grpc
vendored
2
contrib/grpc
vendored
@ -1 +1 @@
|
||||
Subproject commit 77b2737a709d43d8c6895e3f03ca62b00bd9201c
|
||||
Subproject commit 1716359d2e28d304a250f9df0e6c0ccad03de8db
|
2
contrib/icu
vendored
2
contrib/icu
vendored
@ -1 +1 @@
|
||||
Subproject commit a56dde820dc35665a66f2e9ee8ba58e75049b668
|
||||
Subproject commit 7750081bda4b3bc1768ae03849ec70f67ea10625
|
@ -5,15 +5,13 @@ else ()
|
||||
endif ()
|
||||
|
||||
if (NOT ENABLE_ICU)
|
||||
message(STATUS "Not using icu")
|
||||
message(STATUS "Not using ICU")
|
||||
return()
|
||||
endif()
|
||||
|
||||
set(ICU_SOURCE_DIR "${ClickHouse_SOURCE_DIR}/contrib/icu/icu4c/source")
|
||||
set(ICUDATA_SOURCE_DIR "${ClickHouse_SOURCE_DIR}/contrib/icudata/")
|
||||
|
||||
set (CMAKE_CXX_STANDARD 17)
|
||||
|
||||
# These lists of sources were generated from build log of the original ICU build system (configure + make).
|
||||
|
||||
set(ICUUC_SOURCES
|
||||
@ -462,9 +460,9 @@ file(GENERATE OUTPUT "${CMAKE_CURRENT_BINARY_DIR}/empty.cpp" CONTENT " ")
|
||||
enable_language(ASM)
|
||||
|
||||
if (ARCH_S390X)
|
||||
set(ICUDATA_SOURCE_FILE "${ICUDATA_SOURCE_DIR}/icudt70b_dat.S" )
|
||||
set(ICUDATA_SOURCE_FILE "${ICUDATA_SOURCE_DIR}/icudt75b_dat.S" )
|
||||
else()
|
||||
set(ICUDATA_SOURCE_FILE "${ICUDATA_SOURCE_DIR}/icudt70l_dat.S" )
|
||||
set(ICUDATA_SOURCE_FILE "${ICUDATA_SOURCE_DIR}/icudt75l_dat.S" )
|
||||
endif()
|
||||
|
||||
set(ICUDATA_SOURCES
|
||||
|
2
contrib/icudata
vendored
2
contrib/icudata
vendored
@ -1 +1 @@
|
||||
Subproject commit c8e717892a557b4d2852317c7d628aacc0a0e5ab
|
||||
Subproject commit 4904951339a70b4814d2d3723436b20d079cb01b
|
@ -175,12 +175,19 @@ endif ()
|
||||
|
||||
target_compile_definitions(_jemalloc PRIVATE -DJEMALLOC_PROF=1)
|
||||
|
||||
# jemalloc provides support for two different libunwind flavors: the original HP libunwind and the one coming with gcc / g++ / libstdc++.
|
||||
# The latter is identified by `JEMALLOC_PROF_LIBGCC` and uses `_Unwind_Backtrace` method instead of `unw_backtrace`.
|
||||
# At the time ClickHouse uses LLVM libunwind which follows libgcc's way of backtracking.
|
||||
# jemalloc provides support two unwind flavors:
|
||||
# - JEMALLOC_PROF_LIBUNWIND - unw_backtrace() - gnu libunwind (compatible with llvm libunwind)
|
||||
# - JEMALLOC_PROF_LIBGCC - _Unwind_Backtrace() - the original HP libunwind and the one coming with gcc / g++ / libstdc++.
|
||||
#
|
||||
# ClickHouse has to provide `unw_backtrace` method by the means of [commit 8e2b31e](https://github.com/ClickHouse/libunwind/commit/8e2b31e766dd502f6df74909e04a7dbdf5182eb1).
|
||||
target_compile_definitions (_jemalloc PRIVATE -DJEMALLOC_PROF_LIBGCC=1)
|
||||
# But for JEMALLOC_PROF_LIBGCC it also calls _Unwind_Backtrace() during
|
||||
# bootstraping of jemalloc, which may lead to deadlock, if the dlsym will do
|
||||
# allocations somewhere (like glibc does prio 2.34, see [1]).
|
||||
#
|
||||
# [1]: https://sourceware.org/git/?p=glibc.git;a=commit;h=fada9018199c21c469ff0e731ef75c6020074ac9
|
||||
#
|
||||
# And since ClickHouse unwind already supports unw_backtrace() we can safely
|
||||
# switch to it to avoid this deadlock.
|
||||
target_compile_definitions (_jemalloc PRIVATE -DJEMALLOC_PROF_LIBUNWIND=1)
|
||||
target_link_libraries (_jemalloc PRIVATE unwind)
|
||||
|
||||
# for RTLD_NEXT
|
||||
|
@ -4,3 +4,14 @@ It allows to integrate JEMalloc into CMake project.
|
||||
- Added JEMALLOC_CONFIG_MALLOC_CONF substitution
|
||||
- Add musl support (USE_MUSL)
|
||||
- Also note, that darwin build requires JEMALLOC_PREFIX, while others do not
|
||||
- JEMALLOC_HAVE_CLOCK_MONOTONIC_COARSE should be disabled
|
||||
|
||||
CLOCK_MONOTONIC_COARSE can go backwards after clock_adjtime(ADJ_FREQUENCY)
|
||||
Let's disable it for now, and this menas that CLOCK_MONOTONIC will be used,
|
||||
and this, should not be a problem, since:
|
||||
- jemalloc do not call clock_gettime() that frequently
|
||||
- the difference is CLOCK_MONOTONIC 20ns and CLOCK_MONOTONIC_COARSE 4ns
|
||||
|
||||
This can be done with the following command:
|
||||
|
||||
gg JEMALLOC_HAVE_CLOCK_MONOTONIC_COARSE | cut -d: -f1 | xargs sed -i 's@#define JEMALLOC_HAVE_CLOCK_MONOTONIC_COARSE@/* #undef JEMALLOC_HAVE_CLOCK_MONOTONIC_COARSE */@'
|
||||
|
@ -96,7 +96,7 @@
|
||||
/*
|
||||
* Defined if clock_gettime(CLOCK_MONOTONIC_COARSE, ...) is available.
|
||||
*/
|
||||
#define JEMALLOC_HAVE_CLOCK_MONOTONIC_COARSE
|
||||
/* #undef JEMALLOC_HAVE_CLOCK_MONOTONIC_COARSE */
|
||||
|
||||
/*
|
||||
* Defined if clock_gettime(CLOCK_MONOTONIC, ...) is available.
|
||||
|
@ -98,7 +98,7 @@
|
||||
/*
|
||||
* Defined if clock_gettime(CLOCK_MONOTONIC_COARSE, ...) is available.
|
||||
*/
|
||||
#define JEMALLOC_HAVE_CLOCK_MONOTONIC_COARSE
|
||||
/* #undef JEMALLOC_HAVE_CLOCK_MONOTONIC_COARSE */
|
||||
|
||||
/*
|
||||
* Defined if clock_gettime(CLOCK_MONOTONIC, ...) is available.
|
||||
|
@ -98,7 +98,7 @@
|
||||
/*
|
||||
* Defined if clock_gettime(CLOCK_MONOTONIC_COARSE, ...) is available.
|
||||
*/
|
||||
#define JEMALLOC_HAVE_CLOCK_MONOTONIC_COARSE
|
||||
/* #undef JEMALLOC_HAVE_CLOCK_MONOTONIC_COARSE */
|
||||
|
||||
/*
|
||||
* Defined if clock_gettime(CLOCK_MONOTONIC, ...) is available.
|
||||
|
@ -98,7 +98,7 @@
|
||||
/*
|
||||
* Defined if clock_gettime(CLOCK_MONOTONIC_COARSE, ...) is available.
|
||||
*/
|
||||
#define JEMALLOC_HAVE_CLOCK_MONOTONIC_COARSE
|
||||
/* #undef JEMALLOC_HAVE_CLOCK_MONOTONIC_COARSE */
|
||||
|
||||
/*
|
||||
* Defined if clock_gettime(CLOCK_MONOTONIC, ...) is available.
|
||||
|
@ -96,7 +96,7 @@
|
||||
/*
|
||||
* Defined if clock_gettime(CLOCK_MONOTONIC_COARSE, ...) is available.
|
||||
*/
|
||||
#define JEMALLOC_HAVE_CLOCK_MONOTONIC_COARSE
|
||||
/* #undef JEMALLOC_HAVE_CLOCK_MONOTONIC_COARSE */
|
||||
|
||||
/*
|
||||
* Defined if clock_gettime(CLOCK_MONOTONIC, ...) is available.
|
||||
|
@ -98,7 +98,7 @@
|
||||
/*
|
||||
* Defined if clock_gettime(CLOCK_MONOTONIC_COARSE, ...) is available.
|
||||
*/
|
||||
#define JEMALLOC_HAVE_CLOCK_MONOTONIC_COARSE
|
||||
/* #undef JEMALLOC_HAVE_CLOCK_MONOTONIC_COARSE */
|
||||
|
||||
/*
|
||||
* Defined if clock_gettime(CLOCK_MONOTONIC, ...) is available.
|
||||
|
@ -99,7 +99,7 @@
|
||||
/*
|
||||
* Defined if clock_gettime(CLOCK_MONOTONIC_COARSE, ...) is available.
|
||||
*/
|
||||
#define JEMALLOC_HAVE_CLOCK_MONOTONIC_COARSE
|
||||
/* #undef JEMALLOC_HAVE_CLOCK_MONOTONIC_COARSE */
|
||||
|
||||
/*
|
||||
* Defined if clock_gettime(CLOCK_MONOTONIC, ...) is available.
|
||||
|
@ -1,20 +1,21 @@
|
||||
if (NOT ENABLE_FIU)
|
||||
message (STATUS "Not using fiu")
|
||||
if (NOT ENABLE_LIBFIU)
|
||||
message (STATUS "Not using libfiu")
|
||||
return ()
|
||||
endif ()
|
||||
|
||||
set(FIU_DIR "${ClickHouse_SOURCE_DIR}/contrib/libfiu/")
|
||||
set(LIBFIU_DIR "${ClickHouse_SOURCE_DIR}/contrib/libfiu/")
|
||||
|
||||
set(FIU_SOURCES
|
||||
${FIU_DIR}/libfiu/fiu.c
|
||||
${FIU_DIR}/libfiu/fiu-rc.c
|
||||
${FIU_DIR}/libfiu/backtrace.c
|
||||
${FIU_DIR}/libfiu/wtable.c
|
||||
set(LIBFIU_SOURCES
|
||||
${LIBFIU_DIR}/libfiu/fiu.c
|
||||
${LIBFIU_DIR}/libfiu/fiu-rc.c
|
||||
${LIBFIU_DIR}/libfiu/backtrace.c
|
||||
${LIBFIU_DIR}/libfiu/wtable.c
|
||||
)
|
||||
|
||||
set(FIU_HEADERS "${FIU_DIR}/libfiu")
|
||||
set(LIBFIU_HEADERS "${LIBFIU_DIR}/libfiu")
|
||||
|
||||
add_library(_fiu ${FIU_SOURCES})
|
||||
target_compile_definitions(_fiu PUBLIC DUMMY_BACKTRACE)
|
||||
target_include_directories(_fiu PUBLIC ${FIU_HEADERS})
|
||||
add_library(ch_contrib::fiu ALIAS _fiu)
|
||||
add_library(_libfiu ${LIBFIU_SOURCES})
|
||||
target_compile_definitions(_libfiu PUBLIC DUMMY_BACKTRACE)
|
||||
target_compile_definitions(_libfiu PUBLIC FIU_ENABLE)
|
||||
target_include_directories(_libfiu PUBLIC ${LIBFIU_HEADERS})
|
||||
add_library(ch_contrib::libfiu ALIAS _libfiu)
|
||||
|
@ -54,7 +54,6 @@ set(SRCS
|
||||
"${LIBPQ_SOURCE_DIR}/port/pgstrcasecmp.c"
|
||||
"${LIBPQ_SOURCE_DIR}/port/thread.c"
|
||||
"${LIBPQ_SOURCE_DIR}/port/path.c"
|
||||
"${LIBPQ_SOURCE_DIR}/port/explicit_bzero.c"
|
||||
)
|
||||
|
||||
add_library(_libpq ${SRCS})
|
||||
|
2
contrib/libprotobuf-mutator
vendored
2
contrib/libprotobuf-mutator
vendored
@ -1 +1 @@
|
||||
Subproject commit a304ec48dcf15d942607032151f7e9ee504b5dcf
|
||||
Subproject commit 1f95f8083066f5b38fd2db172e7e7f9aa7c49d2d
|
2
contrib/librdkafka
vendored
2
contrib/librdkafka
vendored
@ -1 +1 @@
|
||||
Subproject commit 2d2aab6f5b79db1cfca15d7bf0dee75d00d82082
|
||||
Subproject commit 39d4ed49ccf3406e2bf825d5d7b0903b5a290782
|
2
contrib/libunwind
vendored
2
contrib/libunwind
vendored
@ -1 +1 @@
|
||||
Subproject commit d6a01c46327e56fd86beb8aaa31591fcd9a6b7df
|
||||
Subproject commit a89d904befea07814628c6ce0b44083c4e149c62
|
@ -4,9 +4,6 @@ set(LIBUNWIND_CXX_SOURCES
|
||||
"${LIBUNWIND_SOURCE_DIR}/src/libunwind.cpp"
|
||||
"${LIBUNWIND_SOURCE_DIR}/src/Unwind-EHABI.cpp"
|
||||
"${LIBUNWIND_SOURCE_DIR}/src/Unwind-seh.cpp")
|
||||
if (APPLE)
|
||||
set(LIBUNWIND_CXX_SOURCES ${LIBUNWIND_CXX_SOURCES} "${LIBUNWIND_SOURCE_DIR}/src/Unwind_AppleExtras.cpp")
|
||||
endif ()
|
||||
|
||||
set(LIBUNWIND_C_SOURCES
|
||||
"${LIBUNWIND_SOURCE_DIR}/src/UnwindLevel1.c"
|
||||
@ -32,6 +29,7 @@ set_target_properties(unwind PROPERTIES FOLDER "contrib/libunwind-cmake")
|
||||
|
||||
target_include_directories(unwind SYSTEM BEFORE PUBLIC $<BUILD_INTERFACE:${LIBUNWIND_SOURCE_DIR}/include>)
|
||||
target_compile_definitions(unwind PRIVATE -D_LIBUNWIND_NO_HEAP=1)
|
||||
target_compile_definitions(unwind PRIVATE -D_LIBUNWIND_REMEMBER_STACK_ALLOC=1)
|
||||
# NOTE: from this macros sizeof(unw_context_t)/sizeof(unw_cursor_t) is depends, so it should be set always
|
||||
target_compile_definitions(unwind PUBLIC -D_LIBUNWIND_IS_NATIVE_ONLY)
|
||||
|
||||
|
1
contrib/numactl
vendored
Submodule
1
contrib/numactl
vendored
Submodule
@ -0,0 +1 @@
|
||||
Subproject commit 8d13d63a05f0c3cd88bf777cbb61541202b7da08
|
30
contrib/numactl-cmake/CMakeLists.txt
Normal file
30
contrib/numactl-cmake/CMakeLists.txt
Normal file
@ -0,0 +1,30 @@
|
||||
if (NOT (
|
||||
OS_LINUX AND (ARCH_AMD64 OR ARCH_AARCH64 OR ARCH_LOONGARCH64))
|
||||
)
|
||||
if (ENABLE_NUMACTL)
|
||||
message (${RECONFIGURE_MESSAGE_LEVEL}
|
||||
"numactl is disabled implicitly because the OS or architecture is not supported. Use -DENABLE_NUMACTL=0")
|
||||
endif ()
|
||||
set (ENABLE_NUMACTL OFF)
|
||||
else()
|
||||
option (ENABLE_NUMACTL "Enable numactl" ${ENABLE_LIBRARIES})
|
||||
endif()
|
||||
|
||||
if (NOT ENABLE_NUMACTL)
|
||||
message (STATUS "Not using numactl")
|
||||
return()
|
||||
endif ()
|
||||
|
||||
set (LIBRARY_DIR "${ClickHouse_SOURCE_DIR}/contrib/numactl")
|
||||
|
||||
set (SRCS
|
||||
"${LIBRARY_DIR}/libnuma.c"
|
||||
"${LIBRARY_DIR}/syscall.c"
|
||||
)
|
||||
|
||||
add_library(_numactl ${SRCS})
|
||||
|
||||
target_include_directories(_numactl SYSTEM PRIVATE include)
|
||||
target_include_directories(_numactl SYSTEM PUBLIC "${LIBRARY_DIR}")
|
||||
|
||||
add_library(ch_contrib::numactl ALIAS _numactl)
|
82
contrib/numactl-cmake/include/config.h
Normal file
82
contrib/numactl-cmake/include/config.h
Normal file
@ -0,0 +1,82 @@
|
||||
/* config.h. Generated from config.h.in by configure. */
|
||||
/* config.h.in. Generated from configure.ac by autoheader. */
|
||||
|
||||
/* Checking for symver attribute */
|
||||
#define HAVE_ATTRIBUTE_SYMVER 0
|
||||
|
||||
/* Define to 1 if you have the <dlfcn.h> header file. */
|
||||
#define HAVE_DLFCN_H 1
|
||||
|
||||
/* Define to 1 if you have the <inttypes.h> header file. */
|
||||
#define HAVE_INTTYPES_H 1
|
||||
|
||||
/* Define to 1 if you have the <stdint.h> header file. */
|
||||
#define HAVE_STDINT_H 1
|
||||
|
||||
/* Define to 1 if you have the <stdio.h> header file. */
|
||||
#define HAVE_STDIO_H 1
|
||||
|
||||
/* Define to 1 if you have the <stdlib.h> header file. */
|
||||
#define HAVE_STDLIB_H 1
|
||||
|
||||
/* Define to 1 if you have the <strings.h> header file. */
|
||||
#define HAVE_STRINGS_H 1
|
||||
|
||||
/* Define to 1 if you have the <string.h> header file. */
|
||||
#define HAVE_STRING_H 1
|
||||
|
||||
/* Define to 1 if you have the <sys/stat.h> header file. */
|
||||
#define HAVE_SYS_STAT_H 1
|
||||
|
||||
/* Define to 1 if you have the <sys/types.h> header file. */
|
||||
#define HAVE_SYS_TYPES_H 1
|
||||
|
||||
/* Define to 1 if you have the <unistd.h> header file. */
|
||||
#define HAVE_UNISTD_H 1
|
||||
|
||||
/* Define to the sub-directory where libtool stores uninstalled libraries. */
|
||||
#define LT_OBJDIR ".libs/"
|
||||
|
||||
/* Name of package */
|
||||
#define PACKAGE "numactl"
|
||||
|
||||
/* Define to the address where bug reports for this package should be sent. */
|
||||
#define PACKAGE_BUGREPORT ""
|
||||
|
||||
/* Define to the full name of this package. */
|
||||
#define PACKAGE_NAME "numactl"
|
||||
|
||||
/* Define to the full name and version of this package. */
|
||||
#define PACKAGE_STRING "numactl 2.1"
|
||||
|
||||
/* Define to the one symbol short name of this package. */
|
||||
#define PACKAGE_TARNAME "numactl"
|
||||
|
||||
/* Define to the home page for this package. */
|
||||
#define PACKAGE_URL ""
|
||||
|
||||
/* Define to the version of this package. */
|
||||
#define PACKAGE_VERSION "2.1"
|
||||
|
||||
/* Define to 1 if all of the C89 standard headers exist (not just the ones
|
||||
required in a freestanding environment). This macro is provided for
|
||||
backward compatibility; new code need not use it. */
|
||||
#define STDC_HEADERS 1
|
||||
|
||||
/* If the compiler supports a TLS storage class define it to that here */
|
||||
#define TLS __thread
|
||||
|
||||
/* Version number of package */
|
||||
#define VERSION "2.1"
|
||||
|
||||
/* Number of bits in a file offset, on hosts where this is settable. */
|
||||
/* #undef _FILE_OFFSET_BITS */
|
||||
|
||||
/* Define to 1 on platforms where this makes off_t a 64-bit type. */
|
||||
/* #undef _LARGE_FILES */
|
||||
|
||||
/* Number of bits in time_t, on hosts where this is settable. */
|
||||
/* #undef _TIME_BITS */
|
||||
|
||||
/* Define to 1 on platforms where this makes time_t a 64-bit type. */
|
||||
/* #undef __MINGW_USE_VC2005_COMPAT */
|
2
contrib/openssl
vendored
2
contrib/openssl
vendored
@ -1 +1 @@
|
||||
Subproject commit ee2bb8513b28bf86b35404dd17a0e29305ca9e08
|
||||
Subproject commit 66deddc1e53cda8706604a019777259372d1bd62
|
@ -1298,7 +1298,6 @@ elseif(ARCH_PPC64LE)
|
||||
${OPENSSL_SOURCE_DIR}/crypto/camellia/camellia.c
|
||||
${OPENSSL_SOURCE_DIR}/crypto/camellia/cmll_cbc.c
|
||||
${OPENSSL_SOURCE_DIR}/crypto/chacha/chacha_enc.c
|
||||
${OPENSSL_SOURCE_DIR}/crypto/mem_clr.c
|
||||
${OPENSSL_SOURCE_DIR}/crypto/rc4/rc4_enc.c
|
||||
${OPENSSL_SOURCE_DIR}/crypto/rc4/rc4_skey.c
|
||||
${OPENSSL_SOURCE_DIR}/crypto/sha/keccak1600.c
|
||||
|
2
contrib/orc
vendored
2
contrib/orc
vendored
@ -1 +1 @@
|
||||
Subproject commit 947cebaf9432d708253ac08dc3012daa6b4ede6f
|
||||
Subproject commit bcc025c09828c556f54cfbdf83a66b9acae7d17f
|
2
contrib/pocketfft
vendored
2
contrib/pocketfft
vendored
@ -1 +1 @@
|
||||
Subproject commit 9efd4da52cf8d28d14531d14e43ad9d913807546
|
||||
Subproject commit f4c1aa8aa9ce79ad39e80f2c9c41b92ead90fda3
|
34
contrib/prometheus-protobufs-cmake/CMakeLists.txt
Normal file
34
contrib/prometheus-protobufs-cmake/CMakeLists.txt
Normal file
@ -0,0 +1,34 @@
|
||||
option(ENABLE_PROMETHEUS_PROTOBUFS "Enable Prometheus Protobufs" ${ENABLE_PROTOBUF})
|
||||
|
||||
if(NOT ENABLE_PROMETHEUS_PROTOBUFS)
|
||||
message(STATUS "Not using prometheus-protobufs")
|
||||
return()
|
||||
endif()
|
||||
|
||||
set(Protobuf_INCLUDE_DIR "${ClickHouse_SOURCE_DIR}/contrib/google-protobuf/src")
|
||||
set(Prometheus_INCLUDE_DIR "${ClickHouse_SOURCE_DIR}/contrib/prometheus-protobufs")
|
||||
set(GogoProto_INCLUDE_DIR "${ClickHouse_SOURCE_DIR}/contrib/prometheus-protobufs-gogo")
|
||||
|
||||
# Protobuf_IMPORT_DIRS specify where the protobuf compiler will look for .proto files.
|
||||
set(Old_Protobuf_IMPORT_DIRS ${Protobuf_IMPORT_DIRS})
|
||||
list(APPEND Protobuf_IMPORT_DIRS "${Protobuf_INCLUDE_DIR}" "${Prometheus_INCLUDE_DIR}" "${GogoProto_INCLUDE_DIR}")
|
||||
|
||||
PROTOBUF_GENERATE_CPP(prometheus_protobufs_sources prometheus_protobufs_headers
|
||||
"prompb/remote.proto"
|
||||
"prompb/types.proto"
|
||||
"gogoproto/gogo.proto"
|
||||
)
|
||||
|
||||
set(Protobuf_IMPORT_DIRS ${Old_Protobuf_IMPORT_DIRS})
|
||||
|
||||
# Ignore warnings while compiling protobuf-generated *.pb.h and *.pb.cpp files.
|
||||
set (CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -w")
|
||||
|
||||
# Disable clang-tidy for protobuf-generated *.pb.h and *.pb.cpp files.
|
||||
set (CMAKE_CXX_CLANG_TIDY "")
|
||||
|
||||
add_library(_prometheus_protobufs ${prometheus_protobufs_sources} ${prometheus_protobufs_headers})
|
||||
target_include_directories(_prometheus_protobufs SYSTEM PUBLIC "${CMAKE_CURRENT_BINARY_DIR}")
|
||||
target_link_libraries (_prometheus_protobufs PUBLIC ch_contrib::protobuf)
|
||||
|
||||
add_library (ch_contrib::prometheus_protobufs ALIAS _prometheus_protobufs)
|
35
contrib/prometheus-protobufs-gogo/LICENSE
Normal file
35
contrib/prometheus-protobufs-gogo/LICENSE
Normal file
@ -0,0 +1,35 @@
|
||||
Copyright (c) 2022, The Cosmos SDK Authors. All rights reserved.
|
||||
Copyright (c) 2013, The GoGo Authors. All rights reserved.
|
||||
|
||||
Protocol Buffers for Go with Gadgets
|
||||
|
||||
Go support for Protocol Buffers - Google's data interchange format
|
||||
|
||||
Copyright 2010 The Go Authors. All rights reserved.
|
||||
https://github.com/golang/protobuf
|
||||
|
||||
Redistribution and use in source and binary forms, with or without
|
||||
modification, are permitted provided that the following conditions are
|
||||
met:
|
||||
|
||||
* Redistributions of source code must retain the above copyright
|
||||
notice, this list of conditions and the following disclaimer.
|
||||
* Redistributions in binary form must reproduce the above
|
||||
copyright notice, this list of conditions and the following disclaimer
|
||||
in the documentation and/or other materials provided with the
|
||||
distribution.
|
||||
* Neither the name of Google Inc. nor the names of its
|
||||
contributors may be used to endorse or promote products derived from
|
||||
this software without specific prior written permission.
|
||||
|
||||
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
4
contrib/prometheus-protobufs-gogo/README
Normal file
4
contrib/prometheus-protobufs-gogo/README
Normal file
@ -0,0 +1,4 @@
|
||||
File "gogoproto/gogo.proto" was downloaded from the "Protocol Buffers for Go with Gadgets" project:
|
||||
https://github.com/cosmos/gogoproto/blob/main/gogoproto/gogo.proto
|
||||
|
||||
File "gogoproto/gogo.proto" is used in ClickHouse to compile prometheus protobufs.
|
145
contrib/prometheus-protobufs-gogo/gogoproto/gogo.proto
Normal file
145
contrib/prometheus-protobufs-gogo/gogoproto/gogo.proto
Normal file
@ -0,0 +1,145 @@
|
||||
// Protocol Buffers for Go with Gadgets
|
||||
//
|
||||
// Copyright (c) 2013, The GoGo Authors. All rights reserved.
|
||||
// http://github.com/cosmos/gogoproto
|
||||
//
|
||||
// Redistribution and use in source and binary forms, with or without
|
||||
// modification, are permitted provided that the following conditions are
|
||||
// met:
|
||||
//
|
||||
// * Redistributions of source code must retain the above copyright
|
||||
// notice, this list of conditions and the following disclaimer.
|
||||
// * Redistributions in binary form must reproduce the above
|
||||
// copyright notice, this list of conditions and the following disclaimer
|
||||
// in the documentation and/or other materials provided with the
|
||||
// distribution.
|
||||
//
|
||||
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
syntax = "proto2";
|
||||
package gogoproto;
|
||||
|
||||
import "google/protobuf/descriptor.proto";
|
||||
|
||||
option java_package = "com.google.protobuf";
|
||||
option java_outer_classname = "GoGoProtos";
|
||||
option go_package = "github.com/cosmos/gogoproto/gogoproto";
|
||||
|
||||
extend google.protobuf.EnumOptions {
|
||||
optional bool goproto_enum_prefix = 62001;
|
||||
optional bool goproto_enum_stringer = 62021;
|
||||
optional bool enum_stringer = 62022;
|
||||
optional string enum_customname = 62023;
|
||||
optional bool enumdecl = 62024;
|
||||
}
|
||||
|
||||
extend google.protobuf.EnumValueOptions {
|
||||
optional string enumvalue_customname = 66001;
|
||||
}
|
||||
|
||||
extend google.protobuf.FileOptions {
|
||||
optional bool goproto_getters_all = 63001;
|
||||
optional bool goproto_enum_prefix_all = 63002;
|
||||
optional bool goproto_stringer_all = 63003;
|
||||
optional bool verbose_equal_all = 63004;
|
||||
optional bool face_all = 63005;
|
||||
optional bool gostring_all = 63006;
|
||||
optional bool populate_all = 63007;
|
||||
optional bool stringer_all = 63008;
|
||||
optional bool onlyone_all = 63009;
|
||||
|
||||
optional bool equal_all = 63013;
|
||||
optional bool description_all = 63014;
|
||||
optional bool testgen_all = 63015;
|
||||
optional bool benchgen_all = 63016;
|
||||
optional bool marshaler_all = 63017;
|
||||
optional bool unmarshaler_all = 63018;
|
||||
optional bool stable_marshaler_all = 63019;
|
||||
|
||||
optional bool sizer_all = 63020;
|
||||
|
||||
optional bool goproto_enum_stringer_all = 63021;
|
||||
optional bool enum_stringer_all = 63022;
|
||||
|
||||
optional bool unsafe_marshaler_all = 63023;
|
||||
optional bool unsafe_unmarshaler_all = 63024;
|
||||
|
||||
optional bool goproto_extensions_map_all = 63025;
|
||||
optional bool goproto_unrecognized_all = 63026;
|
||||
optional bool gogoproto_import = 63027;
|
||||
optional bool protosizer_all = 63028;
|
||||
optional bool compare_all = 63029;
|
||||
optional bool typedecl_all = 63030;
|
||||
optional bool enumdecl_all = 63031;
|
||||
|
||||
optional bool goproto_registration = 63032;
|
||||
optional bool messagename_all = 63033;
|
||||
|
||||
optional bool goproto_sizecache_all = 63034;
|
||||
optional bool goproto_unkeyed_all = 63035;
|
||||
}
|
||||
|
||||
extend google.protobuf.MessageOptions {
|
||||
optional bool goproto_getters = 64001;
|
||||
optional bool goproto_stringer = 64003;
|
||||
optional bool verbose_equal = 64004;
|
||||
optional bool face = 64005;
|
||||
optional bool gostring = 64006;
|
||||
optional bool populate = 64007;
|
||||
optional bool stringer = 67008;
|
||||
optional bool onlyone = 64009;
|
||||
|
||||
optional bool equal = 64013;
|
||||
optional bool description = 64014;
|
||||
optional bool testgen = 64015;
|
||||
optional bool benchgen = 64016;
|
||||
optional bool marshaler = 64017;
|
||||
optional bool unmarshaler = 64018;
|
||||
optional bool stable_marshaler = 64019;
|
||||
|
||||
optional bool sizer = 64020;
|
||||
|
||||
optional bool unsafe_marshaler = 64023;
|
||||
optional bool unsafe_unmarshaler = 64024;
|
||||
|
||||
optional bool goproto_extensions_map = 64025;
|
||||
optional bool goproto_unrecognized = 64026;
|
||||
|
||||
optional bool protosizer = 64028;
|
||||
optional bool compare = 64029;
|
||||
|
||||
optional bool typedecl = 64030;
|
||||
|
||||
optional bool messagename = 64033;
|
||||
|
||||
optional bool goproto_sizecache = 64034;
|
||||
optional bool goproto_unkeyed = 64035;
|
||||
}
|
||||
|
||||
extend google.protobuf.FieldOptions {
|
||||
optional bool nullable = 65001;
|
||||
optional bool embed = 65002;
|
||||
optional string customtype = 65003;
|
||||
optional string customname = 65004;
|
||||
optional string jsontag = 65005;
|
||||
optional string moretags = 65006;
|
||||
optional string casttype = 65007;
|
||||
optional string castkey = 65008;
|
||||
optional string castvalue = 65009;
|
||||
|
||||
optional bool stdtime = 65010;
|
||||
optional bool stdduration = 65011;
|
||||
optional bool wktpointer = 65012;
|
||||
|
||||
optional string castrepeated = 65013;
|
||||
}
|
201
contrib/prometheus-protobufs/LICENSE
Normal file
201
contrib/prometheus-protobufs/LICENSE
Normal file
@ -0,0 +1,201 @@
|
||||
Apache License
|
||||
Version 2.0, January 2004
|
||||
http://www.apache.org/licenses/
|
||||
|
||||
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
|
||||
|
||||
1. Definitions.
|
||||
|
||||
"License" shall mean the terms and conditions for use, reproduction,
|
||||
and distribution as defined by Sections 1 through 9 of this document.
|
||||
|
||||
"Licensor" shall mean the copyright owner or entity authorized by
|
||||
the copyright owner that is granting the License.
|
||||
|
||||
"Legal Entity" shall mean the union of the acting entity and all
|
||||
other entities that control, are controlled by, or are under common
|
||||
control with that entity. For the purposes of this definition,
|
||||
"control" means (i) the power, direct or indirect, to cause the
|
||||
direction or management of such entity, whether by contract or
|
||||
otherwise, or (ii) ownership of fifty percent (50%) or more of the
|
||||
outstanding shares, or (iii) beneficial ownership of such entity.
|
||||
|
||||
"You" (or "Your") shall mean an individual or Legal Entity
|
||||
exercising permissions granted by this License.
|
||||
|
||||
"Source" form shall mean the preferred form for making modifications,
|
||||
including but not limited to software source code, documentation
|
||||
source, and configuration files.
|
||||
|
||||
"Object" form shall mean any form resulting from mechanical
|
||||
transformation or translation of a Source form, including but
|
||||
not limited to compiled object code, generated documentation,
|
||||
and conversions to other media types.
|
||||
|
||||
"Work" shall mean the work of authorship, whether in Source or
|
||||
Object form, made available under the License, as indicated by a
|
||||
copyright notice that is included in or attached to the work
|
||||
(an example is provided in the Appendix below).
|
||||
|
||||
"Derivative Works" shall mean any work, whether in Source or Object
|
||||
form, that is based on (or derived from) the Work and for which the
|
||||
editorial revisions, annotations, elaborations, or other modifications
|
||||
represent, as a whole, an original work of authorship. For the purposes
|
||||
of this License, Derivative Works shall not include works that remain
|
||||
separable from, or merely link (or bind by name) to the interfaces of,
|
||||
the Work and Derivative Works thereof.
|
||||
|
||||
"Contribution" shall mean any work of authorship, including
|
||||
the original version of the Work and any modifications or additions
|
||||
to that Work or Derivative Works thereof, that is intentionally
|
||||
submitted to Licensor for inclusion in the Work by the copyright owner
|
||||
or by an individual or Legal Entity authorized to submit on behalf of
|
||||
the copyright owner. For the purposes of this definition, "submitted"
|
||||
means any form of electronic, verbal, or written communication sent
|
||||
to the Licensor or its representatives, including but not limited to
|
||||
communication on electronic mailing lists, source code control systems,
|
||||
and issue tracking systems that are managed by, or on behalf of, the
|
||||
Licensor for the purpose of discussing and improving the Work, but
|
||||
excluding communication that is conspicuously marked or otherwise
|
||||
designated in writing by the copyright owner as "Not a Contribution."
|
||||
|
||||
"Contributor" shall mean Licensor and any individual or Legal Entity
|
||||
on behalf of whom a Contribution has been received by Licensor and
|
||||
subsequently incorporated within the Work.
|
||||
|
||||
2. Grant of Copyright License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
copyright license to reproduce, prepare Derivative Works of,
|
||||
publicly display, publicly perform, sublicense, and distribute the
|
||||
Work and such Derivative Works in Source or Object form.
|
||||
|
||||
3. Grant of Patent License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
(except as stated in this section) patent license to make, have made,
|
||||
use, offer to sell, sell, import, and otherwise transfer the Work,
|
||||
where such license applies only to those patent claims licensable
|
||||
by such Contributor that are necessarily infringed by their
|
||||
Contribution(s) alone or by combination of their Contribution(s)
|
||||
with the Work to which such Contribution(s) was submitted. If You
|
||||
institute patent litigation against any entity (including a
|
||||
cross-claim or counterclaim in a lawsuit) alleging that the Work
|
||||
or a Contribution incorporated within the Work constitutes direct
|
||||
or contributory patent infringement, then any patent licenses
|
||||
granted to You under this License for that Work shall terminate
|
||||
as of the date such litigation is filed.
|
||||
|
||||
4. Redistribution. You may reproduce and distribute copies of the
|
||||
Work or Derivative Works thereof in any medium, with or without
|
||||
modifications, and in Source or Object form, provided that You
|
||||
meet the following conditions:
|
||||
|
||||
(a) You must give any other recipients of the Work or
|
||||
Derivative Works a copy of this License; and
|
||||
|
||||
(b) You must cause any modified files to carry prominent notices
|
||||
stating that You changed the files; and
|
||||
|
||||
(c) You must retain, in the Source form of any Derivative Works
|
||||
that You distribute, all copyright, patent, trademark, and
|
||||
attribution notices from the Source form of the Work,
|
||||
excluding those notices that do not pertain to any part of
|
||||
the Derivative Works; and
|
||||
|
||||
(d) If the Work includes a "NOTICE" text file as part of its
|
||||
distribution, then any Derivative Works that You distribute must
|
||||
include a readable copy of the attribution notices contained
|
||||
within such NOTICE file, excluding those notices that do not
|
||||
pertain to any part of the Derivative Works, in at least one
|
||||
of the following places: within a NOTICE text file distributed
|
||||
as part of the Derivative Works; within the Source form or
|
||||
documentation, if provided along with the Derivative Works; or,
|
||||
within a display generated by the Derivative Works, if and
|
||||
wherever such third-party notices normally appear. The contents
|
||||
of the NOTICE file are for informational purposes only and
|
||||
do not modify the License. You may add Your own attribution
|
||||
notices within Derivative Works that You distribute, alongside
|
||||
or as an addendum to the NOTICE text from the Work, provided
|
||||
that such additional attribution notices cannot be construed
|
||||
as modifying the License.
|
||||
|
||||
You may add Your own copyright statement to Your modifications and
|
||||
may provide additional or different license terms and conditions
|
||||
for use, reproduction, or distribution of Your modifications, or
|
||||
for any such Derivative Works as a whole, provided Your use,
|
||||
reproduction, and distribution of the Work otherwise complies with
|
||||
the conditions stated in this License.
|
||||
|
||||
5. Submission of Contributions. Unless You explicitly state otherwise,
|
||||
any Contribution intentionally submitted for inclusion in the Work
|
||||
by You to the Licensor shall be under the terms and conditions of
|
||||
this License, without any additional terms or conditions.
|
||||
Notwithstanding the above, nothing herein shall supersede or modify
|
||||
the terms of any separate license agreement you may have executed
|
||||
with Licensor regarding such Contributions.
|
||||
|
||||
6. Trademarks. This License does not grant permission to use the trade
|
||||
names, trademarks, service marks, or product names of the Licensor,
|
||||
except as required for reasonable and customary use in describing the
|
||||
origin of the Work and reproducing the content of the NOTICE file.
|
||||
|
||||
7. Disclaimer of Warranty. Unless required by applicable law or
|
||||
agreed to in writing, Licensor provides the Work (and each
|
||||
Contributor provides its Contributions) on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||
implied, including, without limitation, any warranties or conditions
|
||||
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
|
||||
PARTICULAR PURPOSE. You are solely responsible for determining the
|
||||
appropriateness of using or redistributing the Work and assume any
|
||||
risks associated with Your exercise of permissions under this License.
|
||||
|
||||
8. Limitation of Liability. In no event and under no legal theory,
|
||||
whether in tort (including negligence), contract, or otherwise,
|
||||
unless required by applicable law (such as deliberate and grossly
|
||||
negligent acts) or agreed to in writing, shall any Contributor be
|
||||
liable to You for damages, including any direct, indirect, special,
|
||||
incidental, or consequential damages of any character arising as a
|
||||
result of this License or out of the use or inability to use the
|
||||
Work (including but not limited to damages for loss of goodwill,
|
||||
work stoppage, computer failure or malfunction, or any and all
|
||||
other commercial damages or losses), even if such Contributor
|
||||
has been advised of the possibility of such damages.
|
||||
|
||||
9. Accepting Warranty or Additional Liability. While redistributing
|
||||
the Work or Derivative Works thereof, You may choose to offer,
|
||||
and charge a fee for, acceptance of support, warranty, indemnity,
|
||||
or other liability obligations and/or rights consistent with this
|
||||
License. However, in accepting such obligations, You may act only
|
||||
on Your own behalf and on Your sole responsibility, not on behalf
|
||||
of any other Contributor, and only if You agree to indemnify,
|
||||
defend, and hold each Contributor harmless for any liability
|
||||
incurred by, or claims asserted against, such Contributor by reason
|
||||
of your accepting any such warranty or additional liability.
|
||||
|
||||
END OF TERMS AND CONDITIONS
|
||||
|
||||
APPENDIX: How to apply the Apache License to your work.
|
||||
|
||||
To apply the Apache License to your work, attach the following
|
||||
boilerplate notice, with the fields enclosed by brackets "[]"
|
||||
replaced with your own identifying information. (Don't include
|
||||
the brackets!) The text should be enclosed in the appropriate
|
||||
comment syntax for the file format. We also recommend that a
|
||||
file or class name and description of purpose be included on the
|
||||
same "printed page" as the copyright notice for easier
|
||||
identification within third-party archives.
|
||||
|
||||
Copyright [yyyy] [name of copyright owner]
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
2
contrib/prometheus-protobufs/README
Normal file
2
contrib/prometheus-protobufs/README
Normal file
@ -0,0 +1,2 @@
|
||||
Files "prompb/remote.proto" and "prompb/types.proto" were downloaded from the Prometheus repository:
|
||||
https://github.com/prometheus/prometheus/tree/main/prompb
|
88
contrib/prometheus-protobufs/prompb/remote.proto
Normal file
88
contrib/prometheus-protobufs/prompb/remote.proto
Normal file
@ -0,0 +1,88 @@
|
||||
// Copyright 2016 Prometheus Team
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
syntax = "proto3";
|
||||
package prometheus;
|
||||
|
||||
option go_package = "prompb";
|
||||
|
||||
import "prompb/types.proto";
|
||||
import "gogoproto/gogo.proto";
|
||||
|
||||
message WriteRequest {
|
||||
repeated prometheus.TimeSeries timeseries = 1 [(gogoproto.nullable) = false];
|
||||
// Cortex uses this field to determine the source of the write request.
|
||||
// We reserve it to avoid any compatibility issues.
|
||||
reserved 2;
|
||||
repeated prometheus.MetricMetadata metadata = 3 [(gogoproto.nullable) = false];
|
||||
}
|
||||
|
||||
// ReadRequest represents a remote read request.
|
||||
message ReadRequest {
|
||||
repeated Query queries = 1;
|
||||
|
||||
enum ResponseType {
|
||||
// Server will return a single ReadResponse message with matched series that includes list of raw samples.
|
||||
// It's recommended to use streamed response types instead.
|
||||
//
|
||||
// Response headers:
|
||||
// Content-Type: "application/x-protobuf"
|
||||
// Content-Encoding: "snappy"
|
||||
SAMPLES = 0;
|
||||
// Server will stream a delimited ChunkedReadResponse message that
|
||||
// contains XOR or HISTOGRAM(!) encoded chunks for a single series.
|
||||
// Each message is following varint size and fixed size bigendian
|
||||
// uint32 for CRC32 Castagnoli checksum.
|
||||
//
|
||||
// Response headers:
|
||||
// Content-Type: "application/x-streamed-protobuf; proto=prometheus.ChunkedReadResponse"
|
||||
// Content-Encoding: ""
|
||||
STREAMED_XOR_CHUNKS = 1;
|
||||
}
|
||||
|
||||
// accepted_response_types allows negotiating the content type of the response.
|
||||
//
|
||||
// Response types are taken from the list in the FIFO order. If no response type in `accepted_response_types` is
|
||||
// implemented by server, error is returned.
|
||||
// For request that do not contain `accepted_response_types` field the SAMPLES response type will be used.
|
||||
repeated ResponseType accepted_response_types = 2;
|
||||
}
|
||||
|
||||
// ReadResponse is a response when response_type equals SAMPLES.
|
||||
message ReadResponse {
|
||||
// In same order as the request's queries.
|
||||
repeated QueryResult results = 1;
|
||||
}
|
||||
|
||||
message Query {
|
||||
int64 start_timestamp_ms = 1;
|
||||
int64 end_timestamp_ms = 2;
|
||||
repeated prometheus.LabelMatcher matchers = 3;
|
||||
prometheus.ReadHints hints = 4;
|
||||
}
|
||||
|
||||
message QueryResult {
|
||||
// Samples within a time series must be ordered by time.
|
||||
repeated prometheus.TimeSeries timeseries = 1;
|
||||
}
|
||||
|
||||
// ChunkedReadResponse is a response when response_type equals STREAMED_XOR_CHUNKS.
|
||||
// We strictly stream full series after series, optionally split by time. This means that a single frame can contain
|
||||
// partition of the single series, but once a new series is started to be streamed it means that no more chunks will
|
||||
// be sent for previous one. Series are returned sorted in the same way TSDB block are internally.
|
||||
message ChunkedReadResponse {
|
||||
repeated prometheus.ChunkedSeries chunked_series = 1;
|
||||
|
||||
// query_index represents an index of the query from ReadRequest.queries these chunks relates to.
|
||||
int64 query_index = 2;
|
||||
}
|
187
contrib/prometheus-protobufs/prompb/types.proto
Normal file
187
contrib/prometheus-protobufs/prompb/types.proto
Normal file
@ -0,0 +1,187 @@
|
||||
// Copyright 2017 Prometheus Team
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
syntax = "proto3";
|
||||
package prometheus;
|
||||
|
||||
option go_package = "prompb";
|
||||
|
||||
import "gogoproto/gogo.proto";
|
||||
|
||||
message MetricMetadata {
|
||||
enum MetricType {
|
||||
UNKNOWN = 0;
|
||||
COUNTER = 1;
|
||||
GAUGE = 2;
|
||||
HISTOGRAM = 3;
|
||||
GAUGEHISTOGRAM = 4;
|
||||
SUMMARY = 5;
|
||||
INFO = 6;
|
||||
STATESET = 7;
|
||||
}
|
||||
|
||||
// Represents the metric type, these match the set from Prometheus.
|
||||
// Refer to github.com/prometheus/common/model/metadata.go for details.
|
||||
MetricType type = 1;
|
||||
string metric_family_name = 2;
|
||||
string help = 4;
|
||||
string unit = 5;
|
||||
}
|
||||
|
||||
message Sample {
|
||||
double value = 1;
|
||||
// timestamp is in ms format, see model/timestamp/timestamp.go for
|
||||
// conversion from time.Time to Prometheus timestamp.
|
||||
int64 timestamp = 2;
|
||||
}
|
||||
|
||||
message Exemplar {
|
||||
// Optional, can be empty.
|
||||
repeated Label labels = 1 [(gogoproto.nullable) = false];
|
||||
double value = 2;
|
||||
// timestamp is in ms format, see model/timestamp/timestamp.go for
|
||||
// conversion from time.Time to Prometheus timestamp.
|
||||
int64 timestamp = 3;
|
||||
}
|
||||
|
||||
// A native histogram, also known as a sparse histogram.
|
||||
// Original design doc:
|
||||
// https://docs.google.com/document/d/1cLNv3aufPZb3fNfaJgdaRBZsInZKKIHo9E6HinJVbpM/edit
|
||||
// The appendix of this design doc also explains the concept of float
|
||||
// histograms. This Histogram message can represent both, the usual
|
||||
// integer histogram as well as a float histogram.
|
||||
message Histogram {
|
||||
enum ResetHint {
|
||||
UNKNOWN = 0; // Need to test for a counter reset explicitly.
|
||||
YES = 1; // This is the 1st histogram after a counter reset.
|
||||
NO = 2; // There was no counter reset between this and the previous Histogram.
|
||||
GAUGE = 3; // This is a gauge histogram where counter resets don't happen.
|
||||
}
|
||||
|
||||
oneof count { // Count of observations in the histogram.
|
||||
uint64 count_int = 1;
|
||||
double count_float = 2;
|
||||
}
|
||||
double sum = 3; // Sum of observations in the histogram.
|
||||
// The schema defines the bucket schema. Currently, valid numbers
|
||||
// are -4 <= n <= 8. They are all for base-2 bucket schemas, where 1
|
||||
// is a bucket boundary in each case, and then each power of two is
|
||||
// divided into 2^n logarithmic buckets. Or in other words, each
|
||||
// bucket boundary is the previous boundary times 2^(2^-n). In the
|
||||
// future, more bucket schemas may be added using numbers < -4 or >
|
||||
// 8.
|
||||
sint32 schema = 4;
|
||||
double zero_threshold = 5; // Breadth of the zero bucket.
|
||||
oneof zero_count { // Count in zero bucket.
|
||||
uint64 zero_count_int = 6;
|
||||
double zero_count_float = 7;
|
||||
}
|
||||
|
||||
// Negative Buckets.
|
||||
repeated BucketSpan negative_spans = 8 [(gogoproto.nullable) = false];
|
||||
// Use either "negative_deltas" or "negative_counts", the former for
|
||||
// regular histograms with integer counts, the latter for float
|
||||
// histograms.
|
||||
repeated sint64 negative_deltas = 9; // Count delta of each bucket compared to previous one (or to zero for 1st bucket).
|
||||
repeated double negative_counts = 10; // Absolute count of each bucket.
|
||||
|
||||
// Positive Buckets.
|
||||
repeated BucketSpan positive_spans = 11 [(gogoproto.nullable) = false];
|
||||
// Use either "positive_deltas" or "positive_counts", the former for
|
||||
// regular histograms with integer counts, the latter for float
|
||||
// histograms.
|
||||
repeated sint64 positive_deltas = 12; // Count delta of each bucket compared to previous one (or to zero for 1st bucket).
|
||||
repeated double positive_counts = 13; // Absolute count of each bucket.
|
||||
|
||||
ResetHint reset_hint = 14;
|
||||
// timestamp is in ms format, see model/timestamp/timestamp.go for
|
||||
// conversion from time.Time to Prometheus timestamp.
|
||||
int64 timestamp = 15;
|
||||
}
|
||||
|
||||
// A BucketSpan defines a number of consecutive buckets with their
|
||||
// offset. Logically, it would be more straightforward to include the
|
||||
// bucket counts in the Span. However, the protobuf representation is
|
||||
// more compact in the way the data is structured here (with all the
|
||||
// buckets in a single array separate from the Spans).
|
||||
message BucketSpan {
|
||||
sint32 offset = 1; // Gap to previous span, or starting point for 1st span (which can be negative).
|
||||
uint32 length = 2; // Length of consecutive buckets.
|
||||
}
|
||||
|
||||
// TimeSeries represents samples and labels for a single time series.
|
||||
message TimeSeries {
|
||||
// For a timeseries to be valid, and for the samples and exemplars
|
||||
// to be ingested by the remote system properly, the labels field is required.
|
||||
repeated Label labels = 1 [(gogoproto.nullable) = false];
|
||||
repeated Sample samples = 2 [(gogoproto.nullable) = false];
|
||||
repeated Exemplar exemplars = 3 [(gogoproto.nullable) = false];
|
||||
repeated Histogram histograms = 4 [(gogoproto.nullable) = false];
|
||||
}
|
||||
|
||||
message Label {
|
||||
string name = 1;
|
||||
string value = 2;
|
||||
}
|
||||
|
||||
message Labels {
|
||||
repeated Label labels = 1 [(gogoproto.nullable) = false];
|
||||
}
|
||||
|
||||
// Matcher specifies a rule, which can match or set of labels or not.
|
||||
message LabelMatcher {
|
||||
enum Type {
|
||||
EQ = 0;
|
||||
NEQ = 1;
|
||||
RE = 2;
|
||||
NRE = 3;
|
||||
}
|
||||
Type type = 1;
|
||||
string name = 2;
|
||||
string value = 3;
|
||||
}
|
||||
|
||||
message ReadHints {
|
||||
int64 step_ms = 1; // Query step size in milliseconds.
|
||||
string func = 2; // String representation of surrounding function or aggregation.
|
||||
int64 start_ms = 3; // Start time in milliseconds.
|
||||
int64 end_ms = 4; // End time in milliseconds.
|
||||
repeated string grouping = 5; // List of label names used in aggregation.
|
||||
bool by = 6; // Indicate whether it is without or by.
|
||||
int64 range_ms = 7; // Range vector selector range in milliseconds.
|
||||
}
|
||||
|
||||
// Chunk represents a TSDB chunk.
|
||||
// Time range [min, max] is inclusive.
|
||||
message Chunk {
|
||||
int64 min_time_ms = 1;
|
||||
int64 max_time_ms = 2;
|
||||
|
||||
// We require this to match chunkenc.Encoding.
|
||||
enum Encoding {
|
||||
UNKNOWN = 0;
|
||||
XOR = 1;
|
||||
HISTOGRAM = 2;
|
||||
FLOAT_HISTOGRAM = 3;
|
||||
}
|
||||
Encoding type = 3;
|
||||
bytes data = 4;
|
||||
}
|
||||
|
||||
// ChunkedSeries represents single, encoded time series.
|
||||
message ChunkedSeries {
|
||||
// Labels should be sorted.
|
||||
repeated Label labels = 1 [(gogoproto.nullable) = false];
|
||||
// Chunks will be in start time order and may overlap.
|
||||
repeated Chunk chunks = 2 [(gogoproto.nullable) = false];
|
||||
}
|
2
contrib/qpl
vendored
2
contrib/qpl
vendored
@ -1 +1 @@
|
||||
Subproject commit d4715e0e79896b85612158e135ee1a85f3b3e04d
|
||||
Subproject commit c2ced94c53c1ee22191201a59878e9280bc9b9b8
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user