mirror of
https://github.com/ClickHouse/ClickHouse.git
synced 2024-11-21 07:01:59 +00:00
Merge remote-tracking branch 'upstream/master' into HEAD
This commit is contained in:
commit
ed6d6056f2
@ -16,3 +16,6 @@
|
||||
|
||||
# Applied Black formatter for Python code
|
||||
e6f5a3f98b21ba99cf274a9833797889e020a2b3
|
||||
|
||||
# Enabling clang-tidy readability-else-no-return rule
|
||||
67c1e89d90ef576e62f8b1c68269742a3c6f9b1e
|
||||
|
2
.github/ISSUE_TEMPLATE/20_feature-request.md
vendored
2
.github/ISSUE_TEMPLATE/20_feature-request.md
vendored
@ -15,7 +15,7 @@ assignees: ''
|
||||
|
||||
**Use case**
|
||||
|
||||
> A clear and concise description of what is the intended usage scenario is.
|
||||
> A clear and concise description of what the intended usage scenario is.
|
||||
|
||||
**Describe the solution you'd like**
|
||||
|
||||
|
5
.github/PULL_REQUEST_TEMPLATE.md
vendored
5
.github/PULL_REQUEST_TEMPLATE.md
vendored
@ -12,7 +12,7 @@ tests/ci/cancel_and_rerun_workflow_lambda/app.py
|
||||
- Backward Incompatible Change
|
||||
- Build/Testing/Packaging Improvement
|
||||
- Documentation (changelog entry is not required)
|
||||
- Critical Bug Fix (crash, LOGICAL_ERROR, data loss, RBAC)
|
||||
- Critical Bug Fix (crash, data loss, RBAC)
|
||||
- Bug Fix (user-visible misbehavior in an official stable release)
|
||||
- CI Fix or Improvement (changelog entry is not required)
|
||||
- Not for changelog (changelog entry is not required)
|
||||
@ -59,6 +59,9 @@ At a minimum, the following information should be added (but add more as needed)
|
||||
- [ ] <!---ci_exclude_tsan|msan|ubsan|coverage--> Exclude: All with TSAN, MSAN, UBSAN, Coverage
|
||||
- [ ] <!---ci_exclude_aarch64|release|debug--> Exclude: All with aarch64, release, debug
|
||||
---
|
||||
- [ ] <!---ci_include_fuzzer--> Run only fuzzers related jobs (libFuzzer fuzzers, AST fuzzers, etc.)
|
||||
- [ ] <!---ci_exclude_ast--> Exclude: AST fuzzers
|
||||
---
|
||||
- [ ] <!---do_not_test--> Do not test
|
||||
- [ ] <!---woolen_wolfdog--> Woolen Wolfdog
|
||||
- [ ] <!---upload_all--> Upload binaries for special builds
|
||||
|
1
.github/actionlint.yml
vendored
1
.github/actionlint.yml
vendored
@ -4,7 +4,6 @@ self-hosted-runner:
|
||||
- func-tester
|
||||
- func-tester-aarch64
|
||||
- fuzzer-unit-tester
|
||||
- stress-tester
|
||||
- style-checker
|
||||
- style-checker-aarch64
|
||||
- release-maker
|
||||
|
20
.github/actions/clean/action.yml
vendored
20
.github/actions/clean/action.yml
vendored
@ -1,11 +1,23 @@
|
||||
name: Clean runner
|
||||
description: Clean the runner's temp path on ending
|
||||
inputs:
|
||||
images:
|
||||
description: clean docker images
|
||||
default: false
|
||||
type: boolean
|
||||
runs:
|
||||
using: "composite"
|
||||
steps:
|
||||
- name: Clean
|
||||
- name: Clean Temp
|
||||
shell: bash
|
||||
run: |
|
||||
docker ps --quiet | xargs --no-run-if-empty docker kill ||:
|
||||
docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||:
|
||||
sudo rm -fr "${{runner.temp}}"
|
||||
sudo rm -fr "${{runner.temp}}"
|
||||
- name: Clean Docker Containers
|
||||
shell: bash
|
||||
run: |
|
||||
docker rm -vf $(docker ps -aq) ||:
|
||||
- name: Clean Docker Images
|
||||
if: ${{ inputs.images }}
|
||||
shell: bash
|
||||
run: |
|
||||
docker rmi -f $(docker images -aq) ||:
|
||||
|
34
.github/actions/debug/action.yml
vendored
Normal file
34
.github/actions/debug/action.yml
vendored
Normal file
@ -0,0 +1,34 @@
|
||||
name: DebugInfo
|
||||
description: Prints workflow debug info
|
||||
|
||||
runs:
|
||||
using: "composite"
|
||||
steps:
|
||||
- name: Envs, event.json and contexts
|
||||
shell: bash
|
||||
run: |
|
||||
echo '::group::Environment variables'
|
||||
env | sort
|
||||
echo '::endgroup::'
|
||||
|
||||
echo '::group::event.json'
|
||||
python3 -m json.tool "$GITHUB_EVENT_PATH"
|
||||
echo '::endgroup::'
|
||||
|
||||
cat << 'EOF'
|
||||
::group::github context
|
||||
${{ toJSON(github) }}
|
||||
::endgroup::
|
||||
|
||||
::group::env context
|
||||
${{ toJSON(env) }}
|
||||
::endgroup::
|
||||
|
||||
::group::runner context
|
||||
${{ toJSON(runner) }}
|
||||
::endgroup::
|
||||
|
||||
::group::job context
|
||||
${{ toJSON(job) }}
|
||||
::endgroup::
|
||||
EOF
|
99
.github/workflows/auto_releases.yml
vendored
Normal file
99
.github/workflows/auto_releases.yml
vendored
Normal file
@ -0,0 +1,99 @@
|
||||
name: AutoReleases
|
||||
|
||||
env:
|
||||
PYTHONUNBUFFERED: 1
|
||||
|
||||
concurrency:
|
||||
group: autoreleases
|
||||
|
||||
on:
|
||||
# schedule:
|
||||
# - cron: '0 9 * * *'
|
||||
workflow_dispatch:
|
||||
inputs:
|
||||
dry-run:
|
||||
description: 'Dry run'
|
||||
required: false
|
||||
default: false
|
||||
type: boolean
|
||||
|
||||
jobs:
|
||||
AutoReleaseInfo:
|
||||
runs-on: [self-hosted, release-maker]
|
||||
outputs:
|
||||
data: ${{ steps.info.outputs.AUTO_RELEASE_PARAMS }}
|
||||
dry_run: ${{ steps.info.outputs.DRY_RUN }}
|
||||
steps:
|
||||
- name: Set envs
|
||||
run: |
|
||||
cat >> "$GITHUB_ENV" << 'EOF'
|
||||
ROBOT_CLICKHOUSE_SSH_KEY<<RCSK
|
||||
${{secrets.ROBOT_CLICKHOUSE_SSH_KEY}}
|
||||
RCSK
|
||||
EOF
|
||||
echo "DRY_RUN=true" >> "$GITHUB_ENV"
|
||||
- name: Check out repository code
|
||||
uses: ClickHouse/checkout@v1
|
||||
with:
|
||||
fetch-depth: 0 # full history needed
|
||||
- name: Debug Info
|
||||
uses: ./.github/actions/debug
|
||||
- name: Prepare Info
|
||||
id: info
|
||||
run: |
|
||||
cd "$GITHUB_WORKSPACE/tests/ci"
|
||||
python3 auto_release.py --prepare
|
||||
echo "::group::Auto Release Info"
|
||||
python3 -m json.tool /tmp/autorelease_info.json
|
||||
echo "::endgroup::"
|
||||
{
|
||||
echo 'AUTO_RELEASE_PARAMS<<EOF'
|
||||
cat /tmp/autorelease_params.json
|
||||
echo 'EOF'
|
||||
} >> "$GITHUB_OUTPUT"
|
||||
if [[ "${{ github.event_name }}" == "schedule" ]]; then
|
||||
echo "DRY_RUN=true" >> "$GITHUB_OUTPUT"
|
||||
else
|
||||
echo "DRY_RUN=${{ github.event.inputs.dry-run }}" >> "$GITHUB_OUTPUT"
|
||||
fi
|
||||
- name: Post Release Branch statuses
|
||||
run: |
|
||||
cd "$GITHUB_WORKSPACE/tests/ci"
|
||||
python3 auto_release.py --post-status
|
||||
- name: Clean up
|
||||
uses: ./.github/actions/clean
|
||||
|
||||
Releases:
|
||||
needs: AutoReleaseInfo
|
||||
strategy:
|
||||
matrix:
|
||||
release_params: ${{ fromJson(needs.AutoReleaseInfo.outputs.data).releases }}
|
||||
max-parallel: 1
|
||||
name: Release ${{ matrix.release_params.release_branch }}
|
||||
uses: ./.github/workflows/create_release.yml
|
||||
with:
|
||||
ref: ${{ matrix.release_params.commit_sha }}
|
||||
type: patch
|
||||
dry-run: ${{ fromJson(needs.AutoReleaseInfo.outputs.dry_run) }}
|
||||
secrets:
|
||||
ROBOT_CLICKHOUSE_COMMIT_TOKEN: ${{ secrets.ROBOT_CLICKHOUSE_COMMIT_TOKEN }}
|
||||
|
||||
CleanUp:
|
||||
needs: [Releases]
|
||||
runs-on: [self-hosted, release-maker]
|
||||
steps:
|
||||
- uses: ./.github/actions/clean
|
||||
with:
|
||||
images: true
|
||||
|
||||
# PostSlackMessage:
|
||||
# needs: [Releases]
|
||||
# runs-on: [self-hosted, release-maker]
|
||||
# if: ${{ !cancelled() }}
|
||||
# steps:
|
||||
# - name: Check out repository code
|
||||
# uses: ClickHouse/checkout@v1
|
||||
# - name: Post
|
||||
# run: |
|
||||
# cd "$GITHUB_WORKSPACE/tests/ci"
|
||||
# python3 auto_release.py --post-auto-release-complete --wf-status ${{ job.status }}
|
23
.github/workflows/backport_branches.yml
vendored
23
.github/workflows/backport_branches.yml
vendored
@ -27,6 +27,8 @@ jobs:
|
||||
clear-repository: true # to ensure correct digests
|
||||
fetch-depth: 0 # to get version
|
||||
filter: tree:0
|
||||
- name: Debug Info
|
||||
uses: ./.github/actions/debug
|
||||
- name: Labels check
|
||||
run: |
|
||||
cd "$GITHUB_WORKSPACE/tests/ci"
|
||||
@ -227,18 +229,26 @@ jobs:
|
||||
uses: ./.github/workflows/reusable_test.yml
|
||||
with:
|
||||
test_name: Stress test (tsan)
|
||||
runner_type: stress-tester
|
||||
runner_type: func-tester
|
||||
data: ${{ needs.RunConfig.outputs.data }}
|
||||
#############################################################################################
|
||||
############################# INTEGRATION TESTS #############################################
|
||||
#############################################################################################
|
||||
IntegrationTestsRelease:
|
||||
needs: [RunConfig, BuilderDebRelease]
|
||||
IntegrationTestsAsanOldAnalyzer:
|
||||
needs: [RunConfig, BuilderDebAsan]
|
||||
if: ${{ !failure() && !cancelled() }}
|
||||
uses: ./.github/workflows/reusable_test.yml
|
||||
with:
|
||||
test_name: Integration tests (release)
|
||||
runner_type: stress-tester
|
||||
test_name: Integration tests (asan, old analyzer)
|
||||
runner_type: func-tester
|
||||
data: ${{ needs.RunConfig.outputs.data }}
|
||||
IntegrationTestsTsan:
|
||||
needs: [RunConfig, BuilderDebTsan]
|
||||
if: ${{ !failure() && !cancelled() }}
|
||||
uses: ./.github/workflows/reusable_test.yml
|
||||
with:
|
||||
test_name: Integration tests (tsan)
|
||||
runner_type: func-tester
|
||||
data: ${{ needs.RunConfig.outputs.data }}
|
||||
FinishCheck:
|
||||
if: ${{ !cancelled() }}
|
||||
@ -248,7 +258,8 @@ jobs:
|
||||
- FunctionalStatelessTestAsan
|
||||
- FunctionalStatefulTestDebug
|
||||
- StressTestTsan
|
||||
- IntegrationTestsRelease
|
||||
- IntegrationTestsTsan
|
||||
- IntegrationTestsAsanOldAnalyzer
|
||||
- CompatibilityCheckX86
|
||||
- CompatibilityCheckAarch64
|
||||
runs-on: [self-hosted, style-checker]
|
||||
|
2
.github/workflows/cherry_pick.yml
vendored
2
.github/workflows/cherry_pick.yml
vendored
@ -33,6 +33,8 @@ jobs:
|
||||
clear-repository: true
|
||||
token: ${{secrets.ROBOT_CLICKHOUSE_COMMIT_TOKEN}}
|
||||
fetch-depth: 0
|
||||
- name: Debug Info
|
||||
uses: ./.github/actions/debug
|
||||
- name: Cherry pick
|
||||
run: |
|
||||
cd "$GITHUB_WORKSPACE/tests/ci"
|
||||
|
33
.github/workflows/create_release.yml
vendored
33
.github/workflows/create_release.yml
vendored
@ -2,6 +2,10 @@ name: CreateRelease
|
||||
|
||||
concurrency:
|
||||
group: release
|
||||
|
||||
env:
|
||||
PYTHONUNBUFFERED: 1
|
||||
|
||||
'on':
|
||||
workflow_dispatch:
|
||||
inputs:
|
||||
@ -26,6 +30,28 @@ concurrency:
|
||||
required: false
|
||||
default: false
|
||||
type: boolean
|
||||
workflow_call:
|
||||
inputs:
|
||||
ref:
|
||||
description: 'Git reference (branch or commit sha) from which to create the release'
|
||||
required: true
|
||||
type: string
|
||||
type:
|
||||
description: 'The type of release: "new" for a new release or "patch" for a patch release'
|
||||
required: true
|
||||
type: string
|
||||
only-repo:
|
||||
description: 'Run only repos updates including docker (repo-recovery, tests)'
|
||||
required: false
|
||||
default: false
|
||||
type: boolean
|
||||
dry-run:
|
||||
description: 'Dry run'
|
||||
required: false
|
||||
default: false
|
||||
type: boolean
|
||||
secrets:
|
||||
ROBOT_CLICKHOUSE_COMMIT_TOKEN:
|
||||
|
||||
jobs:
|
||||
CreateRelease:
|
||||
@ -33,13 +59,13 @@ jobs:
|
||||
GH_TOKEN: ${{ secrets.ROBOT_CLICKHOUSE_COMMIT_TOKEN }}
|
||||
runs-on: [self-hosted, release-maker]
|
||||
steps:
|
||||
- name: DebugInfo
|
||||
uses: hmarr/debug-action@f7318c783045ac39ed9bb497e22ce835fdafbfe6
|
||||
- name: Check out repository code
|
||||
uses: ClickHouse/checkout@v1
|
||||
with:
|
||||
token: ${{secrets.ROBOT_CLICKHOUSE_COMMIT_TOKEN}}
|
||||
fetch-depth: 0
|
||||
- name: Debug Info
|
||||
uses: ./.github/actions/debug
|
||||
- name: Prepare Release Info
|
||||
shell: bash
|
||||
run: |
|
||||
@ -101,6 +127,7 @@ jobs:
|
||||
--volume=".:/wd" --workdir="/wd" \
|
||||
clickhouse/style-test \
|
||||
./tests/ci/changelog.py -v --debug-helpers \
|
||||
--gh-user-or-token ${{ secrets.ROBOT_CLICKHOUSE_COMMIT_TOKEN }} \
|
||||
--jobs=5 \
|
||||
--output="./docs/changelogs/${{ env.RELEASE_TAG }}.md" ${{ env.RELEASE_TAG }}
|
||||
git add ./docs/changelogs/${{ env.RELEASE_TAG }}.md
|
||||
@ -129,9 +156,9 @@ jobs:
|
||||
if: ${{ inputs.type == 'patch' && ! inputs.only-repo }}
|
||||
shell: bash
|
||||
run: |
|
||||
python3 ./tests/ci/create_release.py --set-progress-completed
|
||||
git reset --hard HEAD
|
||||
git checkout "$GITHUB_REF_NAME"
|
||||
python3 ./tests/ci/create_release.py --set-progress-completed
|
||||
- name: Create GH Release
|
||||
if: ${{ inputs.type == 'patch' && ! inputs.only-repo }}
|
||||
shell: bash
|
||||
|
5
.github/workflows/docker_test_images.yml
vendored
5
.github/workflows/docker_test_images.yml
vendored
@ -1,4 +1,5 @@
|
||||
name: Build docker images
|
||||
|
||||
'on':
|
||||
workflow_call:
|
||||
inputs:
|
||||
@ -11,6 +12,10 @@ name: Build docker images
|
||||
required: false
|
||||
type: boolean
|
||||
default: false
|
||||
|
||||
env:
|
||||
PYTHONUNBUFFERED: 1
|
||||
|
||||
jobs:
|
||||
DockerBuildAarch64:
|
||||
runs-on: [self-hosted, style-checker-aarch64]
|
||||
|
9
.github/workflows/jepsen.yml
vendored
9
.github/workflows/jepsen.yml
vendored
@ -8,27 +8,28 @@ on: # yamllint disable-line rule:truthy
|
||||
schedule:
|
||||
- cron: '0 */6 * * *'
|
||||
workflow_dispatch:
|
||||
|
||||
jobs:
|
||||
RunConfig:
|
||||
runs-on: [self-hosted, style-checker-aarch64]
|
||||
outputs:
|
||||
data: ${{ steps.runconfig.outputs.CI_DATA }}
|
||||
steps:
|
||||
- name: DebugInfo
|
||||
uses: hmarr/debug-action@f7318c783045ac39ed9bb497e22ce835fdafbfe6
|
||||
- name: Check out repository code
|
||||
uses: ClickHouse/checkout@v1
|
||||
with:
|
||||
clear-repository: true # to ensure correct digests
|
||||
fetch-depth: 0 # to get version
|
||||
filter: tree:0
|
||||
- name: Debug Info
|
||||
uses: ./.github/actions/debug
|
||||
- name: PrepareRunConfig
|
||||
id: runconfig
|
||||
run: |
|
||||
echo "::group::configure CI run"
|
||||
python3 "$GITHUB_WORKSPACE/tests/ci/ci.py" --configure --workflow "$GITHUB_WORKFLOW" --outfile ${{ runner.temp }}/ci_run_data.json
|
||||
echo "::endgroup::"
|
||||
|
||||
|
||||
echo "::group::CI run configure results"
|
||||
python3 -m json.tool ${{ runner.temp }}/ci_run_data.json
|
||||
echo "::endgroup::"
|
||||
@ -67,7 +68,7 @@ jobs:
|
||||
if: ${{ !cancelled() }}
|
||||
run: |
|
||||
export WORKFLOW_RESULT_FILE="/tmp/workflow_results.json"
|
||||
cat >> "$WORKFLOW_RESULT_FILE" << 'EOF'
|
||||
cat > "$WORKFLOW_RESULT_FILE" << 'EOF'
|
||||
${{ toJson(needs) }}
|
||||
EOF
|
||||
python3 ./tests/ci/ci_buddy.py --check-wf-status
|
||||
|
4
.github/workflows/master.yml
vendored
4
.github/workflows/master.yml
vendored
@ -15,14 +15,14 @@ jobs:
|
||||
outputs:
|
||||
data: ${{ steps.runconfig.outputs.CI_DATA }}
|
||||
steps:
|
||||
- name: DebugInfo
|
||||
uses: hmarr/debug-action@f7318c783045ac39ed9bb497e22ce835fdafbfe6
|
||||
- name: Check out repository code
|
||||
uses: ClickHouse/checkout@v1
|
||||
with:
|
||||
clear-repository: true # to ensure correct digests
|
||||
fetch-depth: 0 # to get version
|
||||
filter: tree:0
|
||||
- name: Debug Info
|
||||
uses: ./.github/actions/debug
|
||||
- name: Merge sync PR
|
||||
run: |
|
||||
cd "$GITHUB_WORKSPACE/tests/ci"
|
||||
|
11
.github/workflows/merge_queue.yml
vendored
11
.github/workflows/merge_queue.yml
vendored
@ -14,14 +14,14 @@ jobs:
|
||||
outputs:
|
||||
data: ${{ steps.runconfig.outputs.CI_DATA }}
|
||||
steps:
|
||||
- name: DebugInfo
|
||||
uses: hmarr/debug-action@f7318c783045ac39ed9bb497e22ce835fdafbfe6
|
||||
- name: Check out repository code
|
||||
uses: ClickHouse/checkout@v1
|
||||
with:
|
||||
clear-repository: true # to ensure correct digests
|
||||
fetch-depth: 0 # to get a version
|
||||
filter: tree:0
|
||||
- name: Debug Info
|
||||
uses: ./.github/actions/debug
|
||||
- name: Cancel PR workflow
|
||||
run: |
|
||||
python3 "$GITHUB_WORKSPACE/tests/ci/ci.py" --cancel-previous-run
|
||||
@ -58,13 +58,8 @@ jobs:
|
||||
test_name: Style check
|
||||
runner_type: style-checker-aarch64
|
||||
run_command: |
|
||||
python3 style_check.py
|
||||
python3 style_check.py --no-push
|
||||
data: ${{ needs.RunConfig.outputs.data }}
|
||||
secrets:
|
||||
secret_envs: |
|
||||
ROBOT_CLICKHOUSE_SSH_KEY<<RCSK
|
||||
${{secrets.ROBOT_CLICKHOUSE_SSH_KEY}}
|
||||
RCSK
|
||||
FastTest:
|
||||
needs: [RunConfig, BuildDockers]
|
||||
if: ${{ !failure() && !cancelled() && contains(fromJson(needs.RunConfig.outputs.data).jobs_data.jobs_to_do, 'Fast test') }}
|
||||
|
4
.github/workflows/nightly.yml
vendored
4
.github/workflows/nightly.yml
vendored
@ -15,14 +15,14 @@ jobs:
|
||||
outputs:
|
||||
data: ${{ steps.runconfig.outputs.CI_DATA }}
|
||||
steps:
|
||||
- name: DebugInfo
|
||||
uses: hmarr/debug-action@f7318c783045ac39ed9bb497e22ce835fdafbfe6
|
||||
- name: Check out repository code
|
||||
uses: ClickHouse/checkout@v1
|
||||
with:
|
||||
clear-repository: true # to ensure correct digests
|
||||
fetch-depth: 0 # to get version
|
||||
filter: tree:0
|
||||
- name: Debug Info
|
||||
uses: ./.github/actions/debug
|
||||
- name: PrepareRunConfig
|
||||
id: runconfig
|
||||
run: |
|
||||
|
12
.github/workflows/pull_request.yml
vendored
12
.github/workflows/pull_request.yml
vendored
@ -25,17 +25,14 @@ jobs:
|
||||
outputs:
|
||||
data: ${{ steps.runconfig.outputs.CI_DATA }}
|
||||
steps:
|
||||
- name: DebugInfo
|
||||
uses: hmarr/debug-action@f7318c783045ac39ed9bb497e22ce835fdafbfe6
|
||||
- name: Check out repository code
|
||||
uses: ClickHouse/checkout@v1
|
||||
with:
|
||||
clear-repository: true # to ensure correct digests
|
||||
fetch-depth: 0 # to get a version
|
||||
filter: tree:0
|
||||
- name: Cancel previous Sync PR workflow
|
||||
run: |
|
||||
python3 "$GITHUB_WORKSPACE/tests/ci/ci.py" --cancel-previous-run
|
||||
- name: Debug Info
|
||||
uses: ./.github/actions/debug
|
||||
- name: Set pending Sync status
|
||||
run: |
|
||||
python3 "$GITHUB_WORKSPACE/tests/ci/ci.py" --set-pending-status
|
||||
@ -82,10 +79,7 @@ jobs:
|
||||
python3 style_check.py
|
||||
data: ${{ needs.RunConfig.outputs.data }}
|
||||
secrets:
|
||||
secret_envs: |
|
||||
ROBOT_CLICKHOUSE_SSH_KEY<<RCSK
|
||||
${{secrets.ROBOT_CLICKHOUSE_SSH_KEY}}
|
||||
RCSK
|
||||
robot_git_token: ${{secrets.ROBOT_CLICKHOUSE_SSH_KEY}}
|
||||
FastTest:
|
||||
needs: [RunConfig, BuildDockers, StyleCheck]
|
||||
if: ${{ !failure() && !cancelled() && contains(fromJson(needs.RunConfig.outputs.data).jobs_data.jobs_to_do, 'Fast test') }}
|
||||
|
24
.github/workflows/release_branches.yml
vendored
24
.github/workflows/release_branches.yml
vendored
@ -24,6 +24,8 @@ jobs:
|
||||
clear-repository: true # to ensure correct digests
|
||||
fetch-depth: 0 # to get version
|
||||
filter: tree:0
|
||||
- name: Debug Info
|
||||
uses: ./.github/actions/debug
|
||||
- name: Labels check
|
||||
run: |
|
||||
cd "$GITHUB_WORKSPACE/tests/ci"
|
||||
@ -130,6 +132,7 @@ jobs:
|
||||
with:
|
||||
build_name: package_debug
|
||||
data: ${{ needs.RunConfig.outputs.data }}
|
||||
force: true
|
||||
BuilderBinDarwin:
|
||||
needs: [RunConfig, BuildDockers]
|
||||
if: ${{ !failure() && !cancelled() }}
|
||||
@ -371,7 +374,7 @@ jobs:
|
||||
uses: ./.github/workflows/reusable_test.yml
|
||||
with:
|
||||
test_name: Stress test (asan)
|
||||
runner_type: stress-tester
|
||||
runner_type: func-tester
|
||||
data: ${{ needs.RunConfig.outputs.data }}
|
||||
StressTestTsan:
|
||||
needs: [RunConfig, BuilderDebTsan]
|
||||
@ -379,7 +382,7 @@ jobs:
|
||||
uses: ./.github/workflows/reusable_test.yml
|
||||
with:
|
||||
test_name: Stress test (tsan)
|
||||
runner_type: stress-tester
|
||||
runner_type: func-tester
|
||||
data: ${{ needs.RunConfig.outputs.data }}
|
||||
StressTestMsan:
|
||||
needs: [RunConfig, BuilderDebMsan]
|
||||
@ -387,7 +390,7 @@ jobs:
|
||||
uses: ./.github/workflows/reusable_test.yml
|
||||
with:
|
||||
test_name: Stress test (msan)
|
||||
runner_type: stress-tester
|
||||
runner_type: func-tester
|
||||
data: ${{ needs.RunConfig.outputs.data }}
|
||||
StressTestUBsan:
|
||||
needs: [RunConfig, BuilderDebUBsan]
|
||||
@ -395,7 +398,7 @@ jobs:
|
||||
uses: ./.github/workflows/reusable_test.yml
|
||||
with:
|
||||
test_name: Stress test (ubsan)
|
||||
runner_type: stress-tester
|
||||
runner_type: func-tester
|
||||
data: ${{ needs.RunConfig.outputs.data }}
|
||||
StressTestDebug:
|
||||
needs: [RunConfig, BuilderDebDebug]
|
||||
@ -403,7 +406,7 @@ jobs:
|
||||
uses: ./.github/workflows/reusable_test.yml
|
||||
with:
|
||||
test_name: Stress test (debug)
|
||||
runner_type: stress-tester
|
||||
runner_type: func-tester
|
||||
data: ${{ needs.RunConfig.outputs.data }}
|
||||
#############################################################################################
|
||||
############################# INTEGRATION TESTS #############################################
|
||||
@ -414,7 +417,7 @@ jobs:
|
||||
uses: ./.github/workflows/reusable_test.yml
|
||||
with:
|
||||
test_name: Integration tests (asan)
|
||||
runner_type: stress-tester
|
||||
runner_type: func-tester
|
||||
data: ${{ needs.RunConfig.outputs.data }}
|
||||
IntegrationTestsAnalyzerAsan:
|
||||
needs: [RunConfig, BuilderDebAsan]
|
||||
@ -422,7 +425,7 @@ jobs:
|
||||
uses: ./.github/workflows/reusable_test.yml
|
||||
with:
|
||||
test_name: Integration tests (asan, old analyzer)
|
||||
runner_type: stress-tester
|
||||
runner_type: func-tester
|
||||
data: ${{ needs.RunConfig.outputs.data }}
|
||||
IntegrationTestsTsan:
|
||||
needs: [RunConfig, BuilderDebTsan]
|
||||
@ -430,7 +433,7 @@ jobs:
|
||||
uses: ./.github/workflows/reusable_test.yml
|
||||
with:
|
||||
test_name: Integration tests (tsan)
|
||||
runner_type: stress-tester
|
||||
runner_type: func-tester
|
||||
data: ${{ needs.RunConfig.outputs.data }}
|
||||
IntegrationTestsRelease:
|
||||
needs: [RunConfig, BuilderDebRelease]
|
||||
@ -438,7 +441,7 @@ jobs:
|
||||
uses: ./.github/workflows/reusable_test.yml
|
||||
with:
|
||||
test_name: Integration tests (release)
|
||||
runner_type: stress-tester
|
||||
runner_type: func-tester
|
||||
data: ${{ needs.RunConfig.outputs.data }}
|
||||
FinishCheck:
|
||||
if: ${{ !cancelled() }}
|
||||
@ -482,7 +485,7 @@ jobs:
|
||||
if: ${{ !failure() }}
|
||||
run: |
|
||||
# update overall ci report
|
||||
python3 finish_check.py --wf-status ${{ contains(needs.*.result, 'failure') && 'failure' || 'success' }}
|
||||
python3 ./tests/ci/finish_check.py --wf-status ${{ contains(needs.*.result, 'failure') && 'failure' || 'success' }}
|
||||
- name: Check Workflow results
|
||||
if: ${{ !cancelled() }}
|
||||
run: |
|
||||
@ -490,5 +493,4 @@ jobs:
|
||||
cat > "$WORKFLOW_RESULT_FILE" << 'EOF'
|
||||
${{ toJson(needs) }}
|
||||
EOF
|
||||
|
||||
python3 ./tests/ci/ci_buddy.py --check-wf-status
|
||||
|
17
.github/workflows/reusable_build.yml
vendored
17
.github/workflows/reusable_build.yml
vendored
@ -34,8 +34,11 @@ name: Build ClickHouse
|
||||
description: additional ENV variables to setup the job
|
||||
type: string
|
||||
secrets:
|
||||
secret_envs:
|
||||
description: if given, it's passed to the environments
|
||||
robot_git_token:
|
||||
required: false
|
||||
ci_db_url:
|
||||
required: false
|
||||
ci_db_password:
|
||||
required: false
|
||||
|
||||
jobs:
|
||||
@ -58,10 +61,18 @@ jobs:
|
||||
run: |
|
||||
cat >> "$GITHUB_ENV" << 'EOF'
|
||||
${{inputs.additional_envs}}
|
||||
${{secrets.secret_envs}}
|
||||
DOCKER_TAG<<DOCKER_JSON
|
||||
${{ toJson(fromJson(inputs.data).docker_data.images) }}
|
||||
DOCKER_JSON
|
||||
ROBOT_CLICKHOUSE_SSH_KEY<<RCSK
|
||||
${{secrets.robot_git_token}}
|
||||
RCSK
|
||||
CI_DB_URL<<CIDBU
|
||||
${{ secrets.ci_db_url }}
|
||||
CIDBU
|
||||
CI_DB_PASSWORD<<CIDBP
|
||||
${{ secrets.ci_db_password }}
|
||||
CIDBP
|
||||
EOF
|
||||
python3 "$GITHUB_WORKSPACE"/tests/ci/ci_config.py --build-name "${{inputs.build_name}}" >> "$GITHUB_ENV"
|
||||
- name: Apply sparse checkout for contrib # in order to check that it doesn't break build
|
||||
|
15
.github/workflows/reusable_build_stage.yml
vendored
15
.github/workflows/reusable_build_stage.yml
vendored
@ -1,7 +1,11 @@
|
||||
### FIXME: merge reusable_test.yml and reusable_build.yml as they are almost identical
|
||||
# and then merge reusable_build_stage.yml and reusable_test_stage.yml
|
||||
|
||||
env:
|
||||
PYTHONUNBUFFERED: 1
|
||||
|
||||
name: BuildStageWF
|
||||
|
||||
'on':
|
||||
workflow_call:
|
||||
inputs:
|
||||
@ -14,8 +18,11 @@ name: BuildStageWF
|
||||
type: string
|
||||
required: true
|
||||
secrets:
|
||||
secret_envs:
|
||||
description: if given, it's passed to the environments
|
||||
robot_git_token:
|
||||
required: false
|
||||
ci_db_url:
|
||||
required: false
|
||||
ci_db_password:
|
||||
required: false
|
||||
|
||||
jobs:
|
||||
@ -35,4 +42,6 @@ jobs:
|
||||
checkout_depth: 0
|
||||
data: ${{ inputs.data }}
|
||||
secrets:
|
||||
secret_envs: ${{ secrets.secret_envs }}
|
||||
robot_git_token: ${{ secrets.robot_git_token }}
|
||||
ci_db_url: ${{ secrets.ci_db_url }}
|
||||
ci_db_password: ${{ secrets.ci_db_password }}
|
||||
|
21
.github/workflows/reusable_simple_job.yml
vendored
21
.github/workflows/reusable_simple_job.yml
vendored
@ -45,8 +45,11 @@ name: Simple job
|
||||
type: boolean
|
||||
default: false
|
||||
secrets:
|
||||
secret_envs:
|
||||
description: if given, it's passed to the environments
|
||||
robot_git_token:
|
||||
required: false
|
||||
ci_db_url:
|
||||
required: false
|
||||
ci_db_password:
|
||||
required: false
|
||||
|
||||
|
||||
@ -62,8 +65,6 @@ jobs:
|
||||
env:
|
||||
GITHUB_JOB_OVERRIDDEN: ${{inputs.test_name}}
|
||||
steps:
|
||||
- name: DebugInfo
|
||||
uses: hmarr/debug-action@f7318c783045ac39ed9bb497e22ce835fdafbfe6
|
||||
- name: Check out repository code
|
||||
uses: ClickHouse/checkout@v1
|
||||
with:
|
||||
@ -72,12 +73,22 @@ jobs:
|
||||
submodules: ${{inputs.submodules}}
|
||||
fetch-depth: ${{inputs.checkout_depth}}
|
||||
filter: tree:0
|
||||
- name: Debug Info
|
||||
uses: ./.github/actions/debug
|
||||
- name: Set build envs
|
||||
run: |
|
||||
cat >> "$GITHUB_ENV" << 'EOF'
|
||||
CHECK_NAME=${{ inputs.test_name }}
|
||||
${{inputs.additional_envs}}
|
||||
${{secrets.secret_envs}}
|
||||
ROBOT_CLICKHOUSE_SSH_KEY<<RCSK
|
||||
${{secrets.robot_git_token}}
|
||||
RCSK
|
||||
CI_DB_URL<<CIDBU
|
||||
${{ secrets.ci_db_url }}
|
||||
CIDBU
|
||||
CI_DB_PASSWORD<<CIDBP
|
||||
${{ secrets.ci_db_password }}
|
||||
CIDBP
|
||||
EOF
|
||||
- name: Common setup
|
||||
uses: ./.github/actions/common_setup
|
||||
|
17
.github/workflows/reusable_test.yml
vendored
17
.github/workflows/reusable_test.yml
vendored
@ -40,8 +40,11 @@ name: Testing workflow
|
||||
type: string
|
||||
default: "$GITHUB_WORKSPACE/tests/ci"
|
||||
secrets:
|
||||
secret_envs:
|
||||
description: if given, it's passed to the environments
|
||||
robot_git_token:
|
||||
required: false
|
||||
ci_db_url:
|
||||
required: false
|
||||
ci_db_password:
|
||||
required: false
|
||||
|
||||
|
||||
@ -75,10 +78,18 @@ jobs:
|
||||
cat >> "$GITHUB_ENV" << 'EOF'
|
||||
CHECK_NAME=${{ inputs.test_name }}
|
||||
${{inputs.additional_envs}}
|
||||
${{secrets.secret_envs}}
|
||||
DOCKER_TAG<<DOCKER_JSON
|
||||
${{ toJson(fromJson(inputs.data).docker_data.images) }}
|
||||
DOCKER_JSON
|
||||
ROBOT_CLICKHOUSE_SSH_KEY<<RCSK
|
||||
${{secrets.robot_git_token}}
|
||||
RCSK
|
||||
CI_DB_URL<<CIDBU
|
||||
${{ secrets.ci_db_url }}
|
||||
CIDBU
|
||||
CI_DB_PASSWORD<<CIDBP
|
||||
${{ secrets.ci_db_password }}
|
||||
CIDBP
|
||||
EOF
|
||||
- name: Common setup
|
||||
uses: ./.github/actions/common_setup
|
||||
|
15
.github/workflows/reusable_test_stage.yml
vendored
15
.github/workflows/reusable_test_stage.yml
vendored
@ -1,4 +1,8 @@
|
||||
env:
|
||||
PYTHONUNBUFFERED: 1
|
||||
|
||||
name: StageWF
|
||||
|
||||
'on':
|
||||
workflow_call:
|
||||
inputs:
|
||||
@ -11,8 +15,11 @@ name: StageWF
|
||||
type: string
|
||||
required: true
|
||||
secrets:
|
||||
secret_envs:
|
||||
description: if given, it's passed to the environments
|
||||
robot_git_token:
|
||||
required: false
|
||||
ci_db_url:
|
||||
required: false
|
||||
ci_db_password:
|
||||
required: false
|
||||
|
||||
jobs:
|
||||
@ -28,4 +35,6 @@ jobs:
|
||||
runner_type: ${{ matrix.job_name_and_runner_type.runner_type }}
|
||||
data: ${{ inputs.data }}
|
||||
secrets:
|
||||
secret_envs: ${{ secrets.secret_envs }}
|
||||
robot_git_token: ${{ secrets.robot_git_token }}
|
||||
ci_db_url: ${{ secrets.ci_db_url }}
|
||||
ci_db_password: ${{ secrets.ci_db_password }}
|
||||
|
1
.gitignore
vendored
1
.gitignore
vendored
@ -159,6 +159,7 @@ website/package-lock.json
|
||||
/programs/server/store
|
||||
/programs/server/uuid
|
||||
/programs/server/coordination
|
||||
/programs/server/workload
|
||||
|
||||
# temporary test files
|
||||
tests/queries/0_stateless/test_*
|
||||
|
28
.gitmodules
vendored
28
.gitmodules
vendored
@ -108,7 +108,7 @@
|
||||
url = https://github.com/ClickHouse/icudata
|
||||
[submodule "contrib/icu"]
|
||||
path = contrib/icu
|
||||
url = https://github.com/unicode-org/icu
|
||||
url = https://github.com/ClickHouse/icu
|
||||
[submodule "contrib/flatbuffers"]
|
||||
path = contrib/flatbuffers
|
||||
url = https://github.com/ClickHouse/flatbuffers
|
||||
@ -170,9 +170,6 @@
|
||||
[submodule "contrib/fast_float"]
|
||||
path = contrib/fast_float
|
||||
url = https://github.com/fastfloat/fast_float
|
||||
[submodule "contrib/libpq"]
|
||||
path = contrib/libpq
|
||||
url = https://github.com/ClickHouse/libpq
|
||||
[submodule "contrib/NuRaft"]
|
||||
path = contrib/NuRaft
|
||||
url = https://github.com/ClickHouse/NuRaft
|
||||
@ -230,15 +227,6 @@
|
||||
[submodule "contrib/minizip-ng"]
|
||||
path = contrib/minizip-ng
|
||||
url = https://github.com/zlib-ng/minizip-ng
|
||||
[submodule "contrib/annoy"]
|
||||
path = contrib/annoy
|
||||
url = https://github.com/ClickHouse/annoy
|
||||
[submodule "contrib/qpl"]
|
||||
path = contrib/qpl
|
||||
url = https://github.com/intel/qpl
|
||||
[submodule "contrib/idxd-config"]
|
||||
path = contrib/idxd-config
|
||||
url = https://github.com/intel/idxd-config
|
||||
[submodule "contrib/QAT-ZSTD-Plugin"]
|
||||
path = contrib/QAT-ZSTD-Plugin
|
||||
url = https://github.com/intel/QAT-ZSTD-Plugin
|
||||
@ -344,13 +332,10 @@
|
||||
url = https://github.com/ClickHouse/usearch.git
|
||||
[submodule "contrib/SimSIMD"]
|
||||
path = contrib/SimSIMD
|
||||
url = https://github.com/ashvardanian/SimSIMD.git
|
||||
url = https://github.com/ClickHouse/SimSIMD.git
|
||||
[submodule "contrib/FP16"]
|
||||
path = contrib/FP16
|
||||
url = https://github.com/Maratyszcza/FP16.git
|
||||
[submodule "contrib/robin-map"]
|
||||
path = contrib/robin-map
|
||||
url = https://github.com/Tessil/robin-map.git
|
||||
[submodule "contrib/aklomp-base64"]
|
||||
path = contrib/aklomp-base64
|
||||
url = https://github.com/aklomp/base64.git
|
||||
@ -372,6 +357,15 @@
|
||||
[submodule "contrib/double-conversion"]
|
||||
path = contrib/double-conversion
|
||||
url = https://github.com/ClickHouse/double-conversion.git
|
||||
[submodule "contrib/mongo-cxx-driver"]
|
||||
path = contrib/mongo-cxx-driver
|
||||
url = https://github.com/ClickHouse/mongo-cxx-driver.git
|
||||
[submodule "contrib/mongo-c-driver"]
|
||||
path = contrib/mongo-c-driver
|
||||
url = https://github.com/ClickHouse/mongo-c-driver.git
|
||||
[submodule "contrib/numactl"]
|
||||
path = contrib/numactl
|
||||
url = https://github.com/ClickHouse/numactl.git
|
||||
[submodule "contrib/postgres"]
|
||||
path = contrib/postgres
|
||||
url = https://github.com/ClickHouse/postgres.git
|
||||
|
471
CHANGELOG.md
471
CHANGELOG.md
@ -1,4 +1,7 @@
|
||||
### Table of Contents
|
||||
**[ClickHouse release v24.10, 2024-10-31](#2410)**<br/>
|
||||
**[ClickHouse release v24.9, 2024-09-26](#249)**<br/>
|
||||
**[ClickHouse release v24.8 LTS, 2024-08-20](#248)**<br/>
|
||||
**[ClickHouse release v24.7, 2024-07-30](#247)**<br/>
|
||||
**[ClickHouse release v24.6, 2024-07-01](#246)**<br/>
|
||||
**[ClickHouse release v24.5, 2024-05-30](#245)**<br/>
|
||||
@ -10,6 +13,472 @@
|
||||
|
||||
# 2024 Changelog
|
||||
|
||||
### <a id="2410"></a> ClickHouse release 24.10, 2024-10-31
|
||||
|
||||
#### Backward Incompatible Change
|
||||
* Allow to write `SETTINGS` before `FORMAT` in a chain of queries with `UNION` when subqueries are inside parentheses. This closes [#39712](https://github.com/ClickHouse/ClickHouse/issues/39712). Change the behavior when a query has the SETTINGS clause specified twice in a sequence. The closest SETTINGS clause will have a preference for the corresponding subquery. In the previous versions, the outermost SETTINGS clause could take a preference over the inner one. [#68614](https://github.com/ClickHouse/ClickHouse/pull/68614) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* Reordering of filter conditions from `[PRE]WHERE` clause is now allowed by default. It could be disabled by setting `allow_reorder_prewhere_conditions` to `false`. [#70657](https://github.com/ClickHouse/ClickHouse/pull/70657) ([Nikita Taranov](https://github.com/nickitat)).
|
||||
* Remove the `idxd-config` library, which has an incompatible license. This also removes the experimental Intel DeflateQPL codec. [#70987](https://github.com/ClickHouse/ClickHouse/pull/70987) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
|
||||
#### New Feature
|
||||
* Allow to grant access to the wildcard prefixes. `GRANT SELECT ON db.table_pefix_* TO user`. [#65311](https://github.com/ClickHouse/ClickHouse/pull/65311) ([pufit](https://github.com/pufit)).
|
||||
* If you press space bar during query runtime, the client will display a real-time table with detailed metrics. You can enable it globally with the new `--progress-table` option in clickhouse-client; a new `--enable-progress-table-toggle` is associated with the `--progress-table` option, and toggles the rendering of the progress table by pressing the control key (Space). [#63689](https://github.com/ClickHouse/ClickHouse/pull/63689) ([Maria Khristenko](https://github.com/mariaKhr)), [#70423](https://github.com/ClickHouse/ClickHouse/pull/70423) ([Julia Kartseva](https://github.com/jkartseva)).
|
||||
* Allow to cache read files for object storage table engines and data lakes using hash from ETag + file path as cache key. [#70135](https://github.com/ClickHouse/ClickHouse/pull/70135) ([Kseniia Sumarokova](https://github.com/kssenii)).
|
||||
* Support creating a table with a query: `CREATE TABLE ... CLONE AS ...`. It clones the source table's schema and then attaches all partitions to the newly created table. This feature is only supported with tables of the `MergeTree` family Closes [#65015](https://github.com/ClickHouse/ClickHouse/issues/65015). [#69091](https://github.com/ClickHouse/ClickHouse/pull/69091) ([tuanpach](https://github.com/tuanpach)).
|
||||
* Add a new system table, `system.query_metric_log` which contains history of memory and metric values from table system.events for individual queries, periodically flushed to disk. [#66532](https://github.com/ClickHouse/ClickHouse/pull/66532) ([Pablo Marcos](https://github.com/pamarcos)).
|
||||
* A simple SELECT query can be written with implicit SELECT to enable calculator-style expressions, e.g., `ch "1 + 2"`. This is controlled by a new setting, `implicit_select`. [#68502](https://github.com/ClickHouse/ClickHouse/pull/68502) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* Support the `--copy` mode for clickhouse local as a shortcut for format conversion [#68503](https://github.com/ClickHouse/ClickHouse/issues/68503). [#68583](https://github.com/ClickHouse/ClickHouse/pull/68583) ([Denis Hananein](https://github.com/denis-hananein)).
|
||||
* Add a builtin HTML page for visualizing merges which is available at the `/merges` path. [#70821](https://github.com/ClickHouse/ClickHouse/pull/70821) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* Add support for `arrayUnion` function. [#68989](https://github.com/ClickHouse/ClickHouse/pull/68989) ([Peter Nguyen](https://github.com/petern48)).
|
||||
* Allow parametrised SQL aliases. [#50665](https://github.com/ClickHouse/ClickHouse/pull/50665) ([Anton Kozlov](https://github.com/tonickkozlov)).
|
||||
* A new aggregate function `quantileExactWeightedInterpolated`, which is a interpolated version based on quantileExactWeighted. Some people may wonder why we need a new `quantileExactWeightedInterpolated` since we already have `quantileExactInterpolatedWeighted`. The reason is the new one is more accurate than the old one. This is for spark compatibility. [#69619](https://github.com/ClickHouse/ClickHouse/pull/69619) ([李扬](https://github.com/taiyang-li)).
|
||||
* A new function `arrayElementOrNull`. It returns `NULL` if the array index is out of range or a Map key not found. [#69646](https://github.com/ClickHouse/ClickHouse/pull/69646) ([李扬](https://github.com/taiyang-li)).
|
||||
* Allows users to specify regular expressions through new `message_regexp` and `message_regexp_negative` fields in the `config.xml` file to filter out logging. The logging is applied to the formatted un-colored text for the most intuitive developer experience. [#69657](https://github.com/ClickHouse/ClickHouse/pull/69657) ([Peter Nguyen](https://github.com/petern48)).
|
||||
* Added `RIPEMD160` function, which computes the RIPEMD-160 cryptographic hash of a string. Example: `SELECT HEX(RIPEMD160('The quick brown fox jumps over the lazy dog'))` returns `37F332F68DB77BD9D7EDD4969571AD671CF9DD3B`. [#70087](https://github.com/ClickHouse/ClickHouse/pull/70087) ([Dergousov Maxim](https://github.com/m7kss1)).
|
||||
* Support reading `Iceberg` tables on `HDFS`. [#70268](https://github.com/ClickHouse/ClickHouse/pull/70268) ([flynn](https://github.com/ucasfl)).
|
||||
* Support for CTE in the form of `WITH ... INSERT`, as previously we only supported `INSERT ... WITH ...`. [#70593](https://github.com/ClickHouse/ClickHouse/pull/70593) ([Shichao Jin](https://github.com/jsc0218)).
|
||||
* MongoDB integration: support for all MongoDB types, support for WHERE and ORDER BY statements on MongoDB side, restriction for expressions unsupported by MongoDB. Note that the new inegration is disabled by default, to use it, please set `<use_legacy_mongodb_integration>` to `false` in server config. [#63279](https://github.com/ClickHouse/ClickHouse/pull/63279) ([Kirill Nikiforov](https://github.com/allmazz)).
|
||||
* A new function `getSettingOrDefault` added to return the default value and avoid exception if a custom setting is not found in the current profile. [#69917](https://github.com/ClickHouse/ClickHouse/pull/69917) ([Shankar](https://github.com/shiyer7474)).
|
||||
|
||||
#### Experimental feature
|
||||
* Refreshable materialized views are production ready. [#70550](https://github.com/ClickHouse/ClickHouse/pull/70550) ([Michael Kolupaev](https://github.com/al13n321)). Refreshable materialized views are now supported in Replicated databases. [#60669](https://github.com/ClickHouse/ClickHouse/pull/60669) ([Michael Kolupaev](https://github.com/al13n321)).
|
||||
* Parallel replicas are moved from experimental to beta. Reworked settings that control the behavior of parallel replicas algorithms. A quick recap: ClickHouse has four different algorithms for parallel reading involving multiple replicas, which is reflected in the setting `parallel_replicas_mode`, the default value for it is `read_tasks` Additionally, the toggle-switch setting `enable_parallel_replicas` has been added. [#63151](https://github.com/ClickHouse/ClickHouse/pull/63151) ([Alexey Milovidov](https://github.com/alexey-milovidov)), ([Nikita Mikhaylov](https://github.com/nikitamikhaylov)).
|
||||
* Support for the `Dynamic` type in most functions by executing them on internal types inside `Dynamic`. [#69691](https://github.com/ClickHouse/ClickHouse/pull/69691) ([Pavel Kruglov](https://github.com/Avogar)).
|
||||
* Allow to read/write the `JSON` type as a binary string in `RowBinary` format under settings `input_format_binary_read_json_as_string/output_format_binary_write_json_as_string`. [#70288](https://github.com/ClickHouse/ClickHouse/pull/70288) ([Pavel Kruglov](https://github.com/Avogar)).
|
||||
* Allow to serialize/deserialize `JSON` column as single String column in the Native format. For output use setting `output_format_native_write_json_as_string`. For input, use serialization version `1` before the column data. [#70312](https://github.com/ClickHouse/ClickHouse/pull/70312) ([Pavel Kruglov](https://github.com/Avogar)).
|
||||
* Introduced a special (experimental) mode of a merge selector for MergeTree tables which makes it more aggressive for the partitions that are close to the limit by the number of parts. It is controlled by the `merge_selector_use_blurry_base` MergeTree-level setting. [#70645](https://github.com/ClickHouse/ClickHouse/pull/70645) ([Nikita Mikhaylov](https://github.com/nikitamikhaylov)).
|
||||
* Implement generic ser/de between Avro's `Union` and ClickHouse's `Variant` types. Resolves [#69713](https://github.com/ClickHouse/ClickHouse/issues/69713). [#69712](https://github.com/ClickHouse/ClickHouse/pull/69712) ([Jiří Kozlovský](https://github.com/jirislav)).
|
||||
|
||||
#### Performance Improvement
|
||||
* Refactor `IDisk` and `IObjectStorage` for better performance. Tables from `plain` and `plain_rewritable` object storages will initialize faster. [#68146](https://github.com/ClickHouse/ClickHouse/pull/68146) ([Alexey Milovidov](https://github.com/alexey-milovidov), [Julia Kartseva](https://github.com/jkartseva)). Do not call the LIST object storage API when determining if a file or directory exists on the plain rewritable disk, as it can be cost-inefficient. [#70852](https://github.com/ClickHouse/ClickHouse/pull/70852) ([Julia Kartseva](https://github.com/jkartseva)). Reduce the number of object storage HEAD API requests in the plain_rewritable disk. [#70915](https://github.com/ClickHouse/ClickHouse/pull/70915) ([Julia Kartseva](https://github.com/jkartseva)).
|
||||
* Added an ability to parse data directly into sparse columns. [#69828](https://github.com/ClickHouse/ClickHouse/pull/69828) ([Anton Popov](https://github.com/CurtizJ)).
|
||||
* Improved performance of parsing formats with high number of missed values (e.g. `JSONEachRow`). [#69875](https://github.com/ClickHouse/ClickHouse/pull/69875) ([Anton Popov](https://github.com/CurtizJ)).
|
||||
* Supports parallel reading of parquet row groups and prefetching of row groups in single-threaded mode. [#69862](https://github.com/ClickHouse/ClickHouse/pull/69862) ([LiuNeng](https://github.com/liuneng1994)).
|
||||
* Support minmax index for `pointInPolygon`. [#62085](https://github.com/ClickHouse/ClickHouse/pull/62085) ([JackyWoo](https://github.com/JackyWoo)).
|
||||
* Use bloom filters when reading Parquet files. [#62966](https://github.com/ClickHouse/ClickHouse/pull/62966) ([Arthur Passos](https://github.com/arthurpassos)).
|
||||
* Lock-free parts rename to avoid INSERT affect SELECT (due to parts lock) (under normal circumstances with `fsync_part_directory`, QPS of SELECT with INSERT in parallel, increased 2x, under heavy load the effect is even bigger). Note, this only includes `ReplicatedMergeTree` for now. [#64955](https://github.com/ClickHouse/ClickHouse/pull/64955) ([Azat Khuzhin](https://github.com/azat)).
|
||||
* Respect `ttl_only_drop_parts` on `materialize ttl`; only read necessary columns to recalculate TTL and drop parts by replacing them with an empty one. [#65488](https://github.com/ClickHouse/ClickHouse/pull/65488) ([Andrey Zvonov](https://github.com/zvonand)).
|
||||
* Optimized thread creation in the ThreadPool to minimize lock contention. Thread creation is now performed outside of the critical section to avoid delays in job scheduling and thread management under high load conditions. This leads to a much more responsive ClickHouse under heavy concurrent load. [#68694](https://github.com/ClickHouse/ClickHouse/pull/68694) ([filimonov](https://github.com/filimonov)).
|
||||
* Enable reading `LowCardinality` string columns from `ORC`. [#69481](https://github.com/ClickHouse/ClickHouse/pull/69481) ([李扬](https://github.com/taiyang-li)).
|
||||
* Use `LowCardinality` for `ProfileEvents` in system logs such as `part_log`, `query_views_log`, `filesystem_cache_log`. [#70152](https://github.com/ClickHouse/ClickHouse/pull/70152) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* Improve performance of `fromUnixTimestamp`/`toUnixTimestamp` functions. [#71042](https://github.com/ClickHouse/ClickHouse/pull/71042) ([kevinyhzou](https://github.com/KevinyhZou)).
|
||||
* Don't disable nonblocking read from page cache for the entire server when reading from a blocking I/O. This was leading to a poorer performance when a single filesystem (e.g., tmpfs) didn't support the `preadv2` syscall while others do. [#70299](https://github.com/ClickHouse/ClickHouse/pull/70299) ([Antonio Andelic](https://github.com/antonio2368)).
|
||||
* `ALTER TABLE .. REPLACE PARTITION` doesn't wait anymore for mutations/merges that happen in other partitions. [#59138](https://github.com/ClickHouse/ClickHouse/pull/59138) ([Vasily Nemkov](https://github.com/Enmk)).
|
||||
* Don't do validation when synchronizing ACL from Keeper. It's validating during creation. It shouldn't matter that much, but there are installations with tens of thousands or even more user created, and the unnecessary hash validation can take a long time to finish during server startup (it synchronizes everything from keeper). [#70644](https://github.com/ClickHouse/ClickHouse/pull/70644) ([Raúl Marín](https://github.com/Algunenano)).
|
||||
|
||||
#### Improvement
|
||||
* `CREATE TABLE AS` will copy `PRIMARY KEY`, `ORDER BY`, and similar clauses (of `MergeTree` tables). [#69739](https://github.com/ClickHouse/ClickHouse/pull/69739) ([sakulali](https://github.com/sakulali)).
|
||||
* Support 64-bit XID in Keeper. It can be enabled with the `use_xid_64` configuration value. [#69908](https://github.com/ClickHouse/ClickHouse/pull/69908) ([Antonio Andelic](https://github.com/antonio2368)).
|
||||
* Command-line arguments for Bool settings are set to true when no value is provided for the argument (e.g. `clickhouse-client --optimize_aggregation_in_order --query "SELECT 1"`). [#70459](https://github.com/ClickHouse/ClickHouse/pull/70459) ([davidtsuk](https://github.com/davidtsuk)).
|
||||
* Added user-level settings `min_free_disk_bytes_to_throw_insert` and `min_free_disk_ratio_to_throw_insert` to prevent insertions on disks that are almost full. [#69755](https://github.com/ClickHouse/ClickHouse/pull/69755) ([Marco Vilas Boas](https://github.com/marco-vb)).
|
||||
* Embedded documentation for settings will be strictly more detailed and complete than the documentation on the website. This is the first step before making the website documentation always auto-generated from the source code. This has long-standing implications: - it will be guaranteed to have every setting; - there is no chance of having default values obsolete; - we can generate this documentation for each ClickHouse version; - the documentation can be displayed by the server itself even without Internet access. Generate the docs on the website from the source code. [#70289](https://github.com/ClickHouse/ClickHouse/pull/70289) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* Allow empty needle in the function `replace`, the same behavior with PostgreSQL. [#69918](https://github.com/ClickHouse/ClickHouse/pull/69918) ([zhanglistar](https://github.com/zhanglistar)).
|
||||
* Allow empty needle in functions `replaceRegexp*`. [#70053](https://github.com/ClickHouse/ClickHouse/pull/70053) ([zhanglistar](https://github.com/zhanglistar)).
|
||||
* Symbolic links for tables in the `data/database_name/` directory are created for the actual paths to the table's data, depending on the storage policy, instead of the `store/...` directory on the default disk. [#61777](https://github.com/ClickHouse/ClickHouse/pull/61777) ([Kirill](https://github.com/kirillgarbar)).
|
||||
* While parsing an `Enum` field from `JSON`, a string containing an integer will be interpreted as the corresponding `Enum` element. This closes [#65119](https://github.com/ClickHouse/ClickHouse/issues/65119). [#66801](https://github.com/ClickHouse/ClickHouse/pull/66801) ([scanhex12](https://github.com/scanhex12)).
|
||||
* Allow `TRIM` -ing `LEADING` or `TRAILING` empty string as a no-op. Closes [#67792](https://github.com/ClickHouse/ClickHouse/issues/67792). [#68455](https://github.com/ClickHouse/ClickHouse/pull/68455) ([Peter Nguyen](https://github.com/petern48)).
|
||||
* Improve compatibility of `cast(timestamp as String)` with Spark. [#69179](https://github.com/ClickHouse/ClickHouse/pull/69179) ([Wenzheng Liu](https://github.com/lwz9103)).
|
||||
* Always use the new analyzer to calculate constant expressions when `enable_analyzer` is set to `true`. Support calculation of `executable` table function arguments without using `SELECT` query for constant expressions. [#69292](https://github.com/ClickHouse/ClickHouse/pull/69292) ([Dmitry Novik](https://github.com/novikd)).
|
||||
* Add a setting `enable_secure_identifiers` to disallow identifiers with special characters. [#69411](https://github.com/ClickHouse/ClickHouse/pull/69411) ([tuanpach](https://github.com/tuanpach)).
|
||||
* Add `show_create_query_identifier_quoting_rule` to define identifier quoting behavior in the `SHOW CREATE TABLE` query result. Possible values: - `user_display`: When the identifiers is a keyword. - `when_necessary`: When the identifiers is one of `{"distinct", "all", "table"}` and when it could lead to ambiguity: column names, dictionary attribute names. - `always`: Always quote identifiers. [#69448](https://github.com/ClickHouse/ClickHouse/pull/69448) ([tuanpach](https://github.com/tuanpach)).
|
||||
* Improve restoring of access entities' dependencies [#69563](https://github.com/ClickHouse/ClickHouse/pull/69563) ([Vitaly Baranov](https://github.com/vitlibar)).
|
||||
* If you run `clickhouse-client` or other CLI application, and it starts up slowly due to an overloaded server, and you start typing your query, such as `SELECT`, the previous versions will display the remaining of the terminal echo contents before printing the greetings message, such as `SELECTClickHouse local version 24.10.1.1.` instead of `ClickHouse local version 24.10.1.1.`. Now it is fixed. This closes [#31696](https://github.com/ClickHouse/ClickHouse/issues/31696). [#69856](https://github.com/ClickHouse/ClickHouse/pull/69856) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* Add new column `readonly_duration` to the `system.replicas` table. Needed to be able to distinguish actual readonly replicas from sentinel ones in alerts. [#69871](https://github.com/ClickHouse/ClickHouse/pull/69871) ([Miсhael Stetsyuk](https://github.com/mstetsyuk)).
|
||||
* Change the type of `join_output_by_rowlist_perkey_rows_threshold` setting type to unsigned integer. [#69886](https://github.com/ClickHouse/ClickHouse/pull/69886) ([kevinyhzou](https://github.com/KevinyhZou)).
|
||||
* Enhance OpenTelemetry span logging to include query settings. [#70011](https://github.com/ClickHouse/ClickHouse/pull/70011) ([sharathks118](https://github.com/sharathks118)).
|
||||
* Add diagnostic info about higher-order array functions if lambda result type is unexpected. [#70093](https://github.com/ClickHouse/ClickHouse/pull/70093) ([ttanay](https://github.com/ttanay)).
|
||||
* Keeper improvement: less locking during cluster changes. [#70275](https://github.com/ClickHouse/ClickHouse/pull/70275) ([Antonio Andelic](https://github.com/antonio2368)).
|
||||
* Add `WITH IMPLICIT` and `FINAL` keywords to the `SHOW GRANTS` command. Fix a minor bug with implicit grants: [#70094](https://github.com/ClickHouse/ClickHouse/issues/70094). [#70293](https://github.com/ClickHouse/ClickHouse/pull/70293) ([pufit](https://github.com/pufit)).
|
||||
* Respect `compatibility` for MergeTree settings. The `compatibility` value is taken from the `default` profile on server startup, and default MergeTree settings are changed accordingly. Further changes of the `compatibility` setting do not affect MergeTree settings. [#70322](https://github.com/ClickHouse/ClickHouse/pull/70322) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
|
||||
* Avoid spamming the logs with large HTTP response bodies in case of errors during inter-server communication. [#70487](https://github.com/ClickHouse/ClickHouse/pull/70487) ([Vladimir Cherkasov](https://github.com/vdimir)).
|
||||
* Added a new setting `max_parts_to_move` to control the maximum number of parts that can be moved at once. [#70520](https://github.com/ClickHouse/ClickHouse/pull/70520) ([Vladimir Cherkasov](https://github.com/vdimir)).
|
||||
* Limit the frequency of certain log messages. [#70601](https://github.com/ClickHouse/ClickHouse/pull/70601) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* `CHECK TABLE` with `PART` qualifier was incorrectly formatted in the client. [#70660](https://github.com/ClickHouse/ClickHouse/pull/70660) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* Support writing the column index and the offset index using parquet native writer. [#70669](https://github.com/ClickHouse/ClickHouse/pull/70669) ([LiuNeng](https://github.com/liuneng1994)).
|
||||
* Support parsing `DateTime64` for microsecond and timezone in joda syntax ("joda" is a popular Java library for date and time, and the "joda syntax" is that library's style). [#70737](https://github.com/ClickHouse/ClickHouse/pull/70737) ([kevinyhzou](https://github.com/KevinyhZou)).
|
||||
* Changed an approach to figure out if a cloud storage supports [batch delete](https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteObjects.html) or not. [#70786](https://github.com/ClickHouse/ClickHouse/pull/70786) ([Vitaly Baranov](https://github.com/vitlibar)).
|
||||
* Support for Parquet page v2 in the native reader. [#70807](https://github.com/ClickHouse/ClickHouse/pull/70807) ([Arthur Passos](https://github.com/arthurpassos)).
|
||||
* A check if table has both `storage_policy` and `disk` set. A check if a new storage policy is compatible with an old one when using `disk` setting is added. [#70839](https://github.com/ClickHouse/ClickHouse/pull/70839) ([Kirill](https://github.com/kirillgarbar)).
|
||||
* Add `system.s3_queue_settings` and `system.azure_queue_settings`. [#70841](https://github.com/ClickHouse/ClickHouse/pull/70841) ([Kseniia Sumarokova](https://github.com/kssenii)).
|
||||
* Functions `base58Encode` and `base58Decode` now accept arguments of type `FixedString`. Example: `SELECT base58Encode(toFixedString('plaintext', 9));`. [#70846](https://github.com/ClickHouse/ClickHouse/pull/70846) ([Faizan Patel](https://github.com/faizan2786)).
|
||||
* Add the `partition` column to every entry type of the part log. Previously, it was set only for some entries. This closes [#70819](https://github.com/ClickHouse/ClickHouse/issues/70819). [#70848](https://github.com/ClickHouse/ClickHouse/pull/70848) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* Add `MergeStart` and `MutateStart` events into `system.part_log` which helps with merges analysis and visualization. [#70850](https://github.com/ClickHouse/ClickHouse/pull/70850) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* Add a profile event about the number of merged source parts. It allows the monitoring of the fanout of the merge tree in production. [#70908](https://github.com/ClickHouse/ClickHouse/pull/70908) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* Background downloads to the filesystem cache were enabled back. [#70929](https://github.com/ClickHouse/ClickHouse/pull/70929) ([Nikita Taranov](https://github.com/nickitat)).
|
||||
* Add a new merge selector algorithm, named `Trivial`, for professional usage only. It is worse than the `Simple` merge selector. [#70969](https://github.com/ClickHouse/ClickHouse/pull/70969) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* Support for atomic `CREATE OR REPLACE VIEW`. [#70536](https://github.com/ClickHouse/ClickHouse/pull/70536) ([tuanpach](https://github.com/tuanpach))
|
||||
* Added `strict_once` mode to aggregate function `windowFunnel` to avoid counting one event several times in case it matches multiple conditions, close [#21835](https://github.com/ClickHouse/ClickHouse/issues/21835). [#69738](https://github.com/ClickHouse/ClickHouse/pull/69738) ([Vladimir Cherkasov](https://github.com/vdimir)).
|
||||
|
||||
#### Bug Fix (user-visible misbehavior in an official stable release)
|
||||
* Apply configuration updates in global context object. It fixes issues like [#62308](https://github.com/ClickHouse/ClickHouse/issues/62308). [#62944](https://github.com/ClickHouse/ClickHouse/pull/62944) ([Amos Bird](https://github.com/amosbird)).
|
||||
* Fix `ReadSettings` not using user set values, because defaults were only used. [#65625](https://github.com/ClickHouse/ClickHouse/pull/65625) ([Kseniia Sumarokova](https://github.com/kssenii)).
|
||||
* Fix type mismatch issue in `sumMapFiltered` when using signed arguments. [#58408](https://github.com/ClickHouse/ClickHouse/pull/58408) ([Chen768959](https://github.com/Chen768959)).
|
||||
* Fix toHour-like conversion functions' monotonicity when optional time zone argument is passed. [#60264](https://github.com/ClickHouse/ClickHouse/pull/60264) ([Amos Bird](https://github.com/amosbird)).
|
||||
* Relax `supportsPrewhere` check for `Merge` tables. This fixes [#61064](https://github.com/ClickHouse/ClickHouse/issues/61064). It was hardened unnecessarily in [#60082](https://github.com/ClickHouse/ClickHouse/issues/60082). [#61091](https://github.com/ClickHouse/ClickHouse/pull/61091) ([Amos Bird](https://github.com/amosbird)).
|
||||
* Fix `use_concurrency_control` setting handling for proper `concurrent_threads_soft_limit_num` limit enforcing. This enables concurrency control by default because previously it was broken. [#61473](https://github.com/ClickHouse/ClickHouse/pull/61473) ([Sergei Trifonov](https://github.com/serxa)).
|
||||
* Fix incorrect `JOIN ON` section optimization in case of `IS NULL` check under any other function (like `NOT`) that may lead to wrong results. Closes [#67915](https://github.com/ClickHouse/ClickHouse/issues/67915). [#68049](https://github.com/ClickHouse/ClickHouse/pull/68049) ([Vladimir Cherkasov](https://github.com/vdimir)).
|
||||
* Prevent `ALTER` queries that would make the `CREATE` query of tables invalid. [#68574](https://github.com/ClickHouse/ClickHouse/pull/68574) ([János Benjamin Antal](https://github.com/antaljanosbenjamin)).
|
||||
* Fix inconsistent AST formatting for `negate` (`-`) and `NOT` functions with tuples and arrays. [#68600](https://github.com/ClickHouse/ClickHouse/pull/68600) ([Vladimir Cherkasov](https://github.com/vdimir)).
|
||||
* Fix insertion of incomplete type into `Dynamic` during deserialization. It could lead to `Parameter out of bound` errors. [#69291](https://github.com/ClickHouse/ClickHouse/pull/69291) ([Pavel Kruglov](https://github.com/Avogar)).
|
||||
* Zero-copy replication, which is experimental and should not be used in production: fix inf loop after `restore replica` in the replicated merge tree with zero copy. [#69293](https://github.com/CljmnickHouse/ClickHouse/pull/69293) ([MikhailBurdukov](https://github.com/MikhailBurdukov)).
|
||||
* Return back default value of `processing_threads_num` as number of cpu cores in storage `S3Queue`. [#69384](https://github.com/ClickHouse/ClickHouse/pull/69384) ([Kseniia Sumarokova](https://github.com/kssenii)).
|
||||
* Bypass try/catch flow when de/serializing nested repeated protobuf to nested columns (fixes [#41971](https://github.com/ClickHouse/ClickHouse/issues/41971)). [#69556](https://github.com/ClickHouse/ClickHouse/pull/69556) ([Eliot Hautefeuille](https://github.com/hileef)).
|
||||
* Fix crash during insertion into FixedString column in PostgreSQL engine. [#69584](https://github.com/ClickHouse/ClickHouse/pull/69584) ([Pavel Kruglov](https://github.com/Avogar)).
|
||||
* Fix crash when executing `create view t as (with recursive 42 as ttt select ttt);`. [#69676](https://github.com/ClickHouse/ClickHouse/pull/69676) ([Han Fei](https://github.com/hanfei1991)).
|
||||
* Fixed `maxMapState` throwing 'Bad get' if value type is DateTime64. [#69787](https://github.com/ClickHouse/ClickHouse/pull/69787) ([Michael Kolupaev](https://github.com/al13n321)).
|
||||
* Fix `getSubcolumn` with `LowCardinality` columns by overriding `useDefaultImplementationForLowCardinalityColumns` to return `true`. [#69831](https://github.com/ClickHouse/ClickHouse/pull/69831) ([Miсhael Stetsyuk](https://github.com/mstetsyuk)).
|
||||
* Fix permanent blocked distributed sends if a DROP of distributed table failed. [#69843](https://github.com/ClickHouse/ClickHouse/pull/69843) ([Azat Khuzhin](https://github.com/azat)).
|
||||
* Fix non-cancellable queries containing WITH FILL with NaN keys. This closes [#69261](https://github.com/ClickHouse/ClickHouse/issues/69261). [#69845](https://github.com/ClickHouse/ClickHouse/pull/69845) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* Fix analyzer default with old compatibility value. [#69895](https://github.com/ClickHouse/ClickHouse/pull/69895) ([Raúl Marín](https://github.com/Algunenano)).
|
||||
* Don't check dependencies during CREATE OR REPLACE VIEW during DROP of old table. Previously CREATE OR REPLACE query failed when there are dependent tables of the recreated view. [#69907](https://github.com/ClickHouse/ClickHouse/pull/69907) ([Pavel Kruglov](https://github.com/Avogar)).
|
||||
* Something for Decimal. Fixes [#69730](https://github.com/ClickHouse/ClickHouse/issues/69730). [#69978](https://github.com/ClickHouse/ClickHouse/pull/69978) ([Arthur Passos](https://github.com/arthurpassos)).
|
||||
* Now DEFINER/INVOKER will work with parameterized views. [#69984](https://github.com/ClickHouse/ClickHouse/pull/69984) ([pufit](https://github.com/pufit)).
|
||||
* Fix parsing for view's definers. [#69985](https://github.com/ClickHouse/ClickHouse/pull/69985) ([pufit](https://github.com/pufit)).
|
||||
* Fixed a bug when the timezone could change the result of the query with a `Date` or `Date32` arguments. [#70036](https://github.com/ClickHouse/ClickHouse/pull/70036) ([Yarik Briukhovetskyi](https://github.com/yariks5s)).
|
||||
* Fixes `Block structure mismatch` for queries with nested views and `WHERE` condition. Fixes [#66209](https://github.com/ClickHouse/ClickHouse/issues/66209). [#70054](https://github.com/ClickHouse/ClickHouse/pull/70054) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
|
||||
* Avoid reusing columns among different named tuples when evaluating `tuple` functions. This fixes [#70022](https://github.com/ClickHouse/ClickHouse/issues/70022). [#70103](https://github.com/ClickHouse/ClickHouse/pull/70103) ([Amos Bird](https://github.com/amosbird)).
|
||||
* Fix wrong LOGICAL_ERROR when replacing literals in ranges. [#70122](https://github.com/ClickHouse/ClickHouse/pull/70122) ([Pablo Marcos](https://github.com/pamarcos)).
|
||||
* Check for Nullable(Nothing) type during ALTER TABLE MODIFY COLUMN/QUERY to prevent tables with such data type. [#70123](https://github.com/ClickHouse/ClickHouse/pull/70123) ([Pavel Kruglov](https://github.com/Avogar)).
|
||||
* Proper error message for illegal query `JOIN ... ON *` , close [#68650](https://github.com/ClickHouse/ClickHouse/issues/68650). [#70124](https://github.com/ClickHouse/ClickHouse/pull/70124) ([Vladimir Cherkasov](https://github.com/vdimir)).
|
||||
* Fix wrong result with skipping index. [#70127](https://github.com/ClickHouse/ClickHouse/pull/70127) ([Raúl Marín](https://github.com/Algunenano)).
|
||||
* Fix data race in ColumnObject/ColumnTuple decompress method that could lead to heap use after free. [#70137](https://github.com/ClickHouse/ClickHouse/pull/70137) ([Pavel Kruglov](https://github.com/Avogar)).
|
||||
* Fix possible hung in ALTER COLUMN with Dynamic type. [#70144](https://github.com/ClickHouse/ClickHouse/pull/70144) ([Pavel Kruglov](https://github.com/Avogar)).
|
||||
* Now ClickHouse will consider more errors as retriable and will not mark data parts as broken in case of such errors. [#70145](https://github.com/ClickHouse/ClickHouse/pull/70145) ([alesapin](https://github.com/alesapin)).
|
||||
* Use correct `max_types` parameter during Dynamic type creation for JSON subcolumn. [#70147](https://github.com/ClickHouse/ClickHouse/pull/70147) ([Pavel Kruglov](https://github.com/Avogar)).
|
||||
* Fix the password being displayed in `system.query_log` for users with bcrypt password authentication method. [#70148](https://github.com/ClickHouse/ClickHouse/pull/70148) ([Nikolay Degterinsky](https://github.com/evillique)).
|
||||
* Fix event counter for the native interface (InterfaceNativeSendBytes). [#70153](https://github.com/ClickHouse/ClickHouse/pull/70153) ([Yakov Olkhovskiy](https://github.com/yakov-olkhovskiy)).
|
||||
* Fix possible crash related to JSON columns. [#70172](https://github.com/ClickHouse/ClickHouse/pull/70172) ([Pavel Kruglov](https://github.com/Avogar)).
|
||||
* Fix multiple issues with arrayMin and arrayMax. [#70207](https://github.com/ClickHouse/ClickHouse/pull/70207) ([Raúl Marín](https://github.com/Algunenano)).
|
||||
* Respect setting allow_simdjson in the JSON type parser. [#70218](https://github.com/ClickHouse/ClickHouse/pull/70218) ([Pavel Kruglov](https://github.com/Avogar)).
|
||||
* Fix a null pointer dereference on creating a materialized view with two selects and an `INTERSECT`, e.g. `CREATE MATERIALIZED VIEW v0 AS (SELECT 1) INTERSECT (SELECT 1);`. [#70264](https://github.com/ClickHouse/ClickHouse/pull/70264) ([Konstantin Bogdanov](https://github.com/thevar1able)).
|
||||
* Don't modify global settings with startup scripts. Previously, changing a setting in a startup script would change it globally. [#70310](https://github.com/ClickHouse/ClickHouse/pull/70310) ([Antonio Andelic](https://github.com/antonio2368)).
|
||||
* Fix ALTER of `Dynamic` type with reducing max_types parameter that could lead to server crash. [#70328](https://github.com/ClickHouse/ClickHouse/pull/70328) ([Pavel Kruglov](https://github.com/Avogar)).
|
||||
* Fix crash when using WITH FILL incorrectly. [#70338](https://github.com/ClickHouse/ClickHouse/pull/70338) ([Raúl Marín](https://github.com/Algunenano)).
|
||||
* Fix possible use-after-free in `SYSTEM DROP FORMAT SCHEMA CACHE FOR Protobuf`. [#70358](https://github.com/ClickHouse/ClickHouse/pull/70358) ([Azat Khuzhin](https://github.com/azat)).
|
||||
* Fix crash during GROUP BY JSON sub-object subcolumn. [#70374](https://github.com/ClickHouse/ClickHouse/pull/70374) ([Pavel Kruglov](https://github.com/Avogar)).
|
||||
* Don't prefetch parts for vertical merges if part has no rows. [#70452](https://github.com/ClickHouse/ClickHouse/pull/70452) ([Antonio Andelic](https://github.com/antonio2368)).
|
||||
* Fix crash in WHERE with lambda functions. [#70464](https://github.com/ClickHouse/ClickHouse/pull/70464) ([Raúl Marín](https://github.com/Algunenano)).
|
||||
* Fix table creation with `CREATE ... AS table_function(...)` with database `Replicated` and unavailable table function source on secondary replica. [#70511](https://github.com/ClickHouse/ClickHouse/pull/70511) ([Kseniia Sumarokova](https://github.com/kssenii)).
|
||||
* Ignore all output on async insert with `wait_for_async_insert=1`. Closes [#62644](https://github.com/ClickHouse/ClickHouse/issues/62644). [#70530](https://github.com/ClickHouse/ClickHouse/pull/70530) ([Konstantin Bogdanov](https://github.com/thevar1able)).
|
||||
* Ignore frozen_metadata.txt while traversing shadow directory from system.remote_data_paths. [#70590](https://github.com/ClickHouse/ClickHouse/pull/70590) ([Aleksei Filatov](https://github.com/aalexfvk)).
|
||||
* Fix creation of stateful window functions on misaligned memory. [#70631](https://github.com/ClickHouse/ClickHouse/pull/70631) ([Raúl Marín](https://github.com/Algunenano)).
|
||||
* Fixed rare crashes in `SELECT`-s and merges after adding a column of `Array` type with non-empty default expression. [#70695](https://github.com/ClickHouse/ClickHouse/pull/70695) ([Anton Popov](https://github.com/CurtizJ)).
|
||||
* Insert into table function s3 will respect query settings. [#70696](https://github.com/ClickHouse/ClickHouse/pull/70696) ([Vladimir Cherkasov](https://github.com/vdimir)).
|
||||
* Fix infinite recursion when inferring a protobuf schema when skipping unsupported fields is enabled. [#70697](https://github.com/ClickHouse/ClickHouse/pull/70697) ([Raúl Marín](https://github.com/Algunenano)).
|
||||
* Disable enable_named_columns_in_function_tuple by default. [#70833](https://github.com/ClickHouse/ClickHouse/pull/70833) ([Raúl Marín](https://github.com/Algunenano)).
|
||||
* Fix S3Queue table engine setting processing_threads_num not being effective in case it was deduced from the number of cpu cores on the server. [#70837](https://github.com/ClickHouse/ClickHouse/pull/70837) ([Kseniia Sumarokova](https://github.com/kssenii)).
|
||||
* Normalize named tuple arguments in aggregation states. This fixes [#69732](https://github.com/ClickHouse/ClickHouse/issues/69732) . [#70853](https://github.com/ClickHouse/ClickHouse/pull/70853) ([Amos Bird](https://github.com/amosbird)).
|
||||
* Fix a logical error due to negative zeros in the two-level hash table. This closes [#70973](https://github.com/ClickHouse/ClickHouse/issues/70973). [#70979](https://github.com/ClickHouse/ClickHouse/pull/70979) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* Fix `limit by`, `limit with ties` for distributed and parallel replicas. [#70880](https://github.com/ClickHouse/ClickHouse/pull/70880) ([Nikita Taranov](https://github.com/nickitat)).
|
||||
|
||||
|
||||
### <a id="249"></a> ClickHouse release 24.9, 2024-09-26
|
||||
|
||||
#### Backward Incompatible Change
|
||||
* Expressions like `a[b].c` are supported for named tuples, as well as named subscripts from arbitrary expressions, e.g., `expr().name`. This is useful for processing JSON. This closes [#54965](https://github.com/ClickHouse/ClickHouse/issues/54965). In previous versions, an expression of form `expr().name` was parsed as `tupleElement(expr(), name)`, and the query analyzer was searching for a column `name` rather than for the corresponding tuple element; while in the new version, it is changed to `tupleElement(expr(), 'name')`. In most cases, the previous version was not working, but it is possible to imagine a very unusual scenario when this change could lead to incompatibility: if you stored names of tuple elements in a column or an alias, that was named differently than the tuple element's name: `SELECT 'b' AS a, CAST([tuple(123)] AS 'Array(Tuple(b UInt8))') AS t, t[1].a`. It is very unlikely that you used such queries, but we still have to mark this change as potentially backward incompatible. [#68435](https://github.com/ClickHouse/ClickHouse/pull/68435) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* When the setting `print_pretty_type_names` is enabled, it will print `Tuple` data type in a pretty form in `SHOW CREATE TABLE` statements, `formatQuery` function, and in the interactive mode in `clickhouse-client` and `clickhouse-local`. In previous versions, this setting was only applied to `DESCRIBE` queries and `toTypeName`. This closes [#65753](https://github.com/ClickHouse/ClickHouse/issues/65753). [#68492](https://github.com/ClickHouse/ClickHouse/pull/68492) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* Do not allow explicitly specifying UUID when creating a table in `Replicated` databases. Also, do not allow explicitly specifying Keeper path and replica name for *MergeTree tables in Replicated databases. It introduces a new setting `database_replicated_allow_explicit_uuid` and changes the type of `database_replicated_allow_replicated_engine_arguments` from Bool to UInt64 [#66104](https://github.com/ClickHouse/ClickHouse/pull/66104) ([Alexander Tokmakov](https://github.com/tavplubix)).
|
||||
|
||||
#### New Feature
|
||||
* Allow a user to have multiple authentication methods instead of only one. Allow authentication methods to be reset to most recently added method. If you want to run instances on 24.8 and one on 24.9 for some time, it's better to set `max_authentication_methods_per_user` = 1 for that period to avoid potential incompatibilities. [#65277](https://github.com/ClickHouse/ClickHouse/pull/65277) ([Arthur Passos](https://github.com/arthurpassos)).
|
||||
* Add support for `ATTACH PARTITION ALL FROM`. [#61987](https://github.com/ClickHouse/ClickHouse/pull/61987) ([Kirill Nikiforov](https://github.com/allmazz)).
|
||||
* Add the `input_format_json_empty_as_default` setting which, when enabled, treats empty fields in JSON inputs as default values. Closes [#59339](https://github.com/ClickHouse/ClickHouse/issues/59339). [#66782](https://github.com/ClickHouse/ClickHouse/pull/66782) ([Alexis Arnaud](https://github.com/a-a-f)).
|
||||
* Added functions `overlay` and `overlayUTF8` which replace parts of a string by another string. Example: `SELECT overlay('Hello New York', 'Jersey', 11)` returns `Hello New Jersey`. [#66933](https://github.com/ClickHouse/ClickHouse/pull/66933) ([李扬](https://github.com/taiyang-li)).
|
||||
* Add support for lightweight deletes in partition `DELETE FROM [db.]table [ON CLUSTER cluster] [IN PARTITION partition_expr] WHERE expr; ` [#67805](https://github.com/ClickHouse/ClickHouse/pull/67805) ([sunny](https://github.com/sunny19930321)).
|
||||
* Implemented comparison for `Interval` data type values of different domains (such as seconds and minutes) so they are converting now to the least supertype. [#68057](https://github.com/ClickHouse/ClickHouse/pull/68057) ([Yarik Briukhovetskyi](https://github.com/yariks5s)).
|
||||
* Add `create_if_not_exists` setting to default to `IF NOT EXISTS` behavior during CREATE statements. [#68164](https://github.com/ClickHouse/ClickHouse/pull/68164) ([Peter Nguyen](https://github.com/petern48)).
|
||||
* Makes it possible to read `Iceberg` tables in Azure and locally. [#68210](https://github.com/ClickHouse/ClickHouse/pull/68210) ([Daniil Ivanik](https://github.com/divanik)).
|
||||
* Query cache entries can now be dropped by tag. For example, the query cache entry created by `SELECT 1 SETTINGS use_query_cache = true, query_cache_tag = 'abc'` can now be dropped by `SYSTEM DROP QUERY CACHE TAG 'abc'`. [#68477](https://github.com/ClickHouse/ClickHouse/pull/68477) ([Michał Tabaszewski](https://github.com/pinsvin00)).
|
||||
* Add storage encryption for named collections. [#68615](https://github.com/ClickHouse/ClickHouse/pull/68615) ([Pablo Marcos](https://github.com/pamarcos)).
|
||||
* Add virtual column `_headers` for the `URL` table engine. Closes [#65026](https://github.com/ClickHouse/ClickHouse/issues/65026). [#68867](https://github.com/ClickHouse/ClickHouse/pull/68867) ([flynn](https://github.com/ucasfl)).
|
||||
* Add `system.projections` table to track available projections. [#68901](https://github.com/ClickHouse/ClickHouse/pull/68901) ([Jordi Villar](https://github.com/jrdi)).
|
||||
* Add new function `arrayZipUnaligned` for spark compatibility (which is named `arrays_zip` in Spark), which allowed unaligned arrays based on original `arrayZip`. [#69030](https://github.com/ClickHouse/ClickHouse/pull/69030) ([李扬](https://github.com/taiyang-li)).
|
||||
* Added `cp`/`mv` commands for the keeper client command line application which atomically copies/moves node. [#69034](https://github.com/ClickHouse/ClickHouse/pull/69034) ([Mikhail Artemenko](https://github.com/Michicosun)).
|
||||
* Adds argument `scale` (default: `true`) to function `arrayAUC` which allows to skip the normalization step (issue [#69609](https://github.com/ClickHouse/ClickHouse/issues/69609)). [#69717](https://github.com/ClickHouse/ClickHouse/pull/69717) ([gabrielmcg44](https://github.com/gabrielmcg44)).
|
||||
|
||||
#### Experimental feature
|
||||
* Adds a setting `input_format_try_infer_variants` which allows `Variant` type to be inferred during schema inference for text formats when there is more than one possible type for column/array elements. [#63798](https://github.com/ClickHouse/ClickHouse/pull/63798) ([Shaun Struwig](https://github.com/Blargian)).
|
||||
* Add aggregate functions `distinctDynamicTypes`/`distinctJSONPaths`/`distinctJSONPathsAndTypes` for better introspection of JSON column type content. [#68463](https://github.com/ClickHouse/ClickHouse/pull/68463) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||
* New algorithm to determine the unit of marks distribution between parallel replicas by a consistent hash. Different numbers of marks chosen for different read patterns to improve performance. [#68424](https://github.com/ClickHouse/ClickHouse/pull/68424) ([Nikita Taranov](https://github.com/nickitat)).
|
||||
* Previously the algorithmic complexity of part deduplication logic in parallel replica announcement handling was O(n^2) which could take noticeable time for tables with many part (or partitions). This change makes the complexity O(n*log(n)). [#69596](https://github.com/ClickHouse/ClickHouse/pull/69596) ([Alexander Gololobov](https://github.com/davenger)).
|
||||
* Refreshable materialized view improvements: append mode (`... REFRESH EVERY 1 MINUTE APPEND ...`) to add rows to existing table instead of overwriting the whole table, retries (disabled by default, configured in SETTINGS section of the query), `SYSTEM WAIT VIEW <name>` query that waits for the currently running refresh, some fixes. [#58934](https://github.com/ClickHouse/ClickHouse/pull/58934) ([Michael Kolupaev](https://github.com/al13n321)).
|
||||
* Added `min_max`as a new type of (experimental) statistics. It supports estimating range predicates over numeric columns, e.g. `x < 100`. [#67013](https://github.com/ClickHouse/ClickHouse/pull/67013) ([JackyWoo](https://github.com/JackyWoo)).
|
||||
* Improve castOrDefault from Variant/Dynamic columns so it works when inner types are not convertable at all. [#67150](https://github.com/ClickHouse/ClickHouse/pull/67150) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||
* Replication of subset of columns is now available through MaterializedPostgreSQL. Closes [#33748](https://github.com/ClickHouse/ClickHouse/issues/33748). [#69092](https://github.com/ClickHouse/ClickHouse/pull/69092) ([Kruglov Kirill](https://github.com/1on)).
|
||||
|
||||
#### Performance Improvement
|
||||
* Implemented reading of required files only for Hive partitioning. [#68963](https://github.com/ClickHouse/ClickHouse/pull/68963) ([Yarik Briukhovetskyi](https://github.com/yariks5s)).
|
||||
* Improve the JOIN performance by rearranging the right table by keys while the table keys are dense in the LEFT or INNER hash joins. [#60341](https://github.com/ClickHouse/ClickHouse/pull/60341) ([kevinyhzou](https://github.com/KevinyhZou)).
|
||||
* Improve ALL JOIN performance by appending the list of rows lazily. [#63677](https://github.com/ClickHouse/ClickHouse/pull/63677) ([kevinyhzou](https://github.com/KevinyhZou)).
|
||||
* Load filesystem cache metadata asynchronously during the boot process, in order to make restarts faster (controlled by setting `load_metadata_asynchronously`). [#65736](https://github.com/ClickHouse/ClickHouse/pull/65736) ([Daniel Pozo Escalona](https://github.com/danipozo)).
|
||||
* Functions `array` and `map` were optimized to process certain common cases much faster. [#67707](https://github.com/ClickHouse/ClickHouse/pull/67707) ([李扬](https://github.com/taiyang-li)).
|
||||
* Trivial optimize on ORC strings reading especially when a column contains no NULLs. [#67794](https://github.com/ClickHouse/ClickHouse/pull/67794) ([李扬](https://github.com/taiyang-li)).
|
||||
* Improved overall performance of merges by reducing the overhead of scheduling steps of merges. [#68016](https://github.com/ClickHouse/ClickHouse/pull/68016) ([Anton Popov](https://github.com/CurtizJ)).
|
||||
* Speed up requests to S3 when a profile is not set, credentials are not set, and IMDS is not available (for example, when you are querying a public bucket on a machine outside of a cloud). This closes [#52771](https://github.com/ClickHouse/ClickHouse/issues/52771). [#68082](https://github.com/ClickHouse/ClickHouse/pull/68082) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* Devirtualize format reader in `RowInputFormatWithNamesAndTypes` for some performance improvement. [#68437](https://github.com/ClickHouse/ClickHouse/pull/68437) ([李扬](https://github.com/taiyang-li)).
|
||||
* Add the parallel merge for `uniq` aggregate function when aggregating with a group by key to maximize the CPU utilization. [#68441](https://github.com/ClickHouse/ClickHouse/pull/68441) ([Jiebin Sun](https://github.com/jiebinn)).
|
||||
* Add settings `output_format_orc_dictionary_key_size_threshold` to allow user to enable dict encoding for string column in `ORC` output format. It helps reduce the output `ORC` file size and improve reading performance significantly. [#68591](https://github.com/ClickHouse/ClickHouse/pull/68591) ([李扬](https://github.com/taiyang-li)).
|
||||
* Introduce new Keeper request RemoveRecursive which removes node with all it's subtree. [#69332](https://github.com/ClickHouse/ClickHouse/pull/69332) ([Mikhail Artemenko](https://github.com/Michicosun)).
|
||||
* Speedup insertion performance into a table with a vector similarity index by adding data to the vector index in parallel. [#69493](https://github.com/ClickHouse/ClickHouse/pull/69493) ([flynn](https://github.com/ucasfl)).
|
||||
* Reduce memory usage of inserts to JSON by using adaptive write buffer size. A lot of files created by JSON column in wide part contains small amount of data and it doesn't make sense to allocate 1MB buffer for them. [#69272](https://github.com/ClickHouse/ClickHouse/pull/69272) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||
* Avoid returning a thread in the concurrent hash join threadpool to avoid query excessively spawn threads. [#69406](https://github.com/ClickHouse/ClickHouse/pull/69406) ([Duc Canh Le](https://github.com/canhld94)).
|
||||
|
||||
#### Improvement
|
||||
* CREATE TABLE AS now copies PRIMARY KEY, ORDER BY, and similar clauses. Now it supports only for MergeTree family of table engines. [#69076](https://github.com/ClickHouse/ClickHouse/pull/69076) ([sakulali](https://github.com/sakulali)).
|
||||
* Hardened parts of the codebase related to parsing of small entities. The following (minor) bugs were found and fixed: - if a `DeltaLake` table is partitioned by Bool, the partition value is always interpreted as false; - `ExternalDistributed` table was using only a single shard in the provided addresses; the value of `max_threads` setting and similar were printed as `'auto(N)'` instead of `auto(N)`. [#52503](https://github.com/ClickHouse/ClickHouse/pull/52503) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* Use cgroup-specific metrics for CPU usage accounting instead of system-wide metrics. [#62003](https://github.com/ClickHouse/ClickHouse/pull/62003) ([Nikita Taranov](https://github.com/nickitat)).
|
||||
* IO scheduling for remote S3 disks is now done on the level of HTTP socket streams (instead of the whole S3 requests) to resolve `bandwidth_limit` throttling issues. [#65182](https://github.com/ClickHouse/ClickHouse/pull/65182) ([Sergei Trifonov](https://github.com/serxa)).
|
||||
* Functions `upperUTF8` and `lowerUTF8` were previously only able to uppercase / lowercase Cyrillic characters. This limitation is now removed and characters in arbitrary languages are uppercased/lowercased. Example: `SELECT upperUTF8('Süden')` now returns `SÜDEN`. [#65761](https://github.com/ClickHouse/ClickHouse/pull/65761) ([李扬](https://github.com/taiyang-li)).
|
||||
* When lightweight delete happens on a table with projection(s), despite users have choices either throw an exception (by default) or drop the projection when the lightweight delete would happen, now there is the third option to still have lightweight delete and then rebuild projection(s). [#66169](https://github.com/ClickHouse/ClickHouse/pull/66169) ([jsc0218](https://github.com/jsc0218)).
|
||||
* Two options (`dns_allow_resolve_names_to_ipv4` and `dns_allow_resolve_names_to_ipv6`) have been added, to allow block connections ip family. [#66895](https://github.com/ClickHouse/ClickHouse/pull/66895) ([MikhailBurdukov](https://github.com/MikhailBurdukov)).
|
||||
* Make Ctrl-Z ignorance configurable (ignore_shell_suspend) in clickhouse-client. [#67134](https://github.com/ClickHouse/ClickHouse/pull/67134) ([Azat Khuzhin](https://github.com/azat)).
|
||||
* Improve UTF-8 validation in JSON output formats. Ensures that valid JSON is generated in the case of certain byte sequences in the result data. [#67938](https://github.com/ClickHouse/ClickHouse/pull/67938) ([mwoenker](https://github.com/mwoenker)).
|
||||
* Added profile events for merges and mutations for better introspection. [#68015](https://github.com/ClickHouse/ClickHouse/pull/68015) ([Anton Popov](https://github.com/CurtizJ)).
|
||||
* ODBC: get http_max_tries from the server configuration. [#68128](https://github.com/ClickHouse/ClickHouse/pull/68128) ([Rodolphe Dugé de Bernonville](https://github.com/RodolpheDuge)).
|
||||
* Add wildcard support for user identification in X.509 SubjectAltName extension. [#68236](https://github.com/ClickHouse/ClickHouse/pull/68236) ([Marco Vilas Boas](https://github.com/marco-vb)).
|
||||
* Improve schema inference of date times. Now `DateTime64` used only when date time has fractional part, otherwise regular DateTime is used. Inference of Date/DateTime is more strict now, especially when `date_time_input_format='best_effort'` to avoid inferring date times from strings in corner cases. [#68382](https://github.com/ClickHouse/ClickHouse/pull/68382) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||
* Delete old code of named collections from dictionaries and substitute it to the new, which allows to use DDL created named collections in dictionaries. Closes [#60936](https://github.com/ClickHouse/ClickHouse/issues/60936), closes [#36890](https://github.com/ClickHouse/ClickHouse/issues/36890). [#68412](https://github.com/ClickHouse/ClickHouse/pull/68412) ([Kseniia Sumarokova](https://github.com/kssenii)).
|
||||
* Use HTTP/1.1 instead of HTTP/1.0 (set by default) for external HTTP authenticators. [#68456](https://github.com/ClickHouse/ClickHouse/pull/68456) ([Aleksei Filatov](https://github.com/aalexfvk)).
|
||||
* Added a new set of metrics for thread pool introspection, providing deeper insights into thread pool performance and behavior. [#68674](https://github.com/ClickHouse/ClickHouse/pull/68674) ([filimonov](https://github.com/filimonov)).
|
||||
* Support query parameters in async inserts with format `Values`. [#68741](https://github.com/ClickHouse/ClickHouse/pull/68741) ([Anton Popov](https://github.com/CurtizJ)).
|
||||
* Support `Date32` on `dateTrunc` and `toStartOfInterval`. [#68874](https://github.com/ClickHouse/ClickHouse/pull/68874) ([LiuNeng](https://github.com/liuneng1994)).
|
||||
* Add `plan_step_name` and `plan_step_description` columns to `system.processors_profile_log`. [#68954](https://github.com/ClickHouse/ClickHouse/pull/68954) ([Alexander Gololobov](https://github.com/davenger)).
|
||||
* Support for the Spanish language in the embedded dictionaries. [#69035](https://github.com/ClickHouse/ClickHouse/pull/69035) ([Vasily Okunev](https://github.com/VOkunev)).
|
||||
* Add CPU arch to the short fault information message. [#69037](https://github.com/ClickHouse/ClickHouse/pull/69037) ([Konstantin Bogdanov](https://github.com/thevar1able)).
|
||||
* Queries will fail faster if a new Keeper connection cannot be established during retries. [#69148](https://github.com/ClickHouse/ClickHouse/pull/69148) ([Raúl Marín](https://github.com/Algunenano)).
|
||||
* Update Database Factory so it would be possible for user defined database engines to have arguments, settings and table overrides (similar to StorageFactory). [#69201](https://github.com/ClickHouse/ClickHouse/pull/69201) ([NikBarykin](https://github.com/NikBarykin)).
|
||||
* Restore mode that replaces all external table engines and functions to the `Null` engine (`restore_replace_external_engines_to_null`, `restore_replace_external_table_functions_to_null` settings) was failing if table had SETTINGS. Now it removes settings from table definition in this case and allows to restore such tables. [#69253](https://github.com/ClickHouse/ClickHouse/pull/69253) ([Ilya Yatsishin](https://github.com/qoega)).
|
||||
* CLICKHOUSE_PASSWORD is correctly escaped for XML in clickhouse image's entrypoint. [#69301](https://github.com/ClickHouse/ClickHouse/pull/69301) ([aohoyd](https://github.com/aohoyd)).
|
||||
* Allow empty arguments for `arrayZip`/`arrayZipUnaligned`, as concat did in https://github.com/ClickHouse/ClickHouse/pull/65887. It is for spark compatiability in Gluten CH Backend. [#69576](https://github.com/ClickHouse/ClickHouse/pull/69576) ([李扬](https://github.com/taiyang-li)).
|
||||
* Support more advanced SSL options for Keeper's internal communication (e.g. private keys with passphrase). [#69582](https://github.com/ClickHouse/ClickHouse/pull/69582) ([Antonio Andelic](https://github.com/antonio2368)).
|
||||
* Index analysis can take noticeable time for big tables with many parts or partitions. This change should enable killing a heavy query at that stage. [#69606](https://github.com/ClickHouse/ClickHouse/pull/69606) ([Alexander Gololobov](https://github.com/davenger)).
|
||||
* Masking sensitive info in `gcs` table function. [#69611](https://github.com/ClickHouse/ClickHouse/pull/69611) ([Vitaly Baranov](https://github.com/vitlibar)).
|
||||
* Rebuild projection for merges that reduce number of rows. [#62364](https://github.com/ClickHouse/ClickHouse/pull/62364) ([cangyin](https://github.com/cangyin)).
|
||||
|
||||
#### Bug Fix (user-visible misbehavior in an official stable release)
|
||||
* Fix attaching table when pg dbname contains "-" in the experimental and unsupported MaterializedPostgreSQL engine. [#62730](https://github.com/ClickHouse/ClickHouse/pull/62730) ([takakawa](https://github.com/takakawa)).
|
||||
* Fixed error on generated columns in the experimental and totally unsupported MaterializedPostgreSQL engine when adnum ordering is broken [#63161](https://github.com/ClickHouse/ClickHouse/issues/63161). Fixed error on id column with nextval expression as default in the experimental and totally unsupported MaterializedPostgreSQL when there are generated columns in table. Fixed error on dropping publication with symbols except \[a-z1-9-\]. [#67664](https://github.com/ClickHouse/ClickHouse/pull/67664) ([Kruglov Kirill](https://github.com/1on)).
|
||||
* Storage Join to support Nullable columns in the left table, close [#61247](https://github.com/ClickHouse/ClickHouse/issues/61247). [#66926](https://github.com/ClickHouse/ClickHouse/pull/66926) ([vdimir](https://github.com/vdimir)).
|
||||
* Incorrect query result with parallel replicas (distribute queries as well) when `IN` operator contains conversion to Decimal(). The bug was introduced with the new analyzer. [#67234](https://github.com/ClickHouse/ClickHouse/pull/67234) ([Igor Nikonov](https://github.com/devcrafter)).
|
||||
* Fix the problem that alter modify order by causes inconsistent metadata. [#67436](https://github.com/ClickHouse/ClickHouse/pull/67436) ([iceFireser](https://github.com/iceFireser)).
|
||||
* Fix the upper bound of the function `fromModifiedJulianDay`. It was supposed to be `9999-12-31` but was mistakenly set to `9999-01-01`. [#67583](https://github.com/ClickHouse/ClickHouse/pull/67583) ([PHO](https://github.com/depressed-pho)).
|
||||
* Fix when the index is not at the beginning of the tuple during `IN` query. [#67626](https://github.com/ClickHouse/ClickHouse/pull/67626) ([Yarik Briukhovetskyi](https://github.com/yariks5s)).
|
||||
* Fix expiration in `RoleCache`. [#67748](https://github.com/ClickHouse/ClickHouse/pull/67748) ([Vitaly Baranov](https://github.com/vitlibar)).
|
||||
* Fix window view missing blocks due to slow flush to view. [#67983](https://github.com/ClickHouse/ClickHouse/pull/67983) ([Raúl Marín](https://github.com/Algunenano)).
|
||||
* Fix MSan issue caused by incorrect date format. [#68105](https://github.com/ClickHouse/ClickHouse/pull/68105) ([JackyWoo](https://github.com/JackyWoo)).
|
||||
* Fixed crash in Parquet filtering when data types in the file substantially differ from requested types (e.g. `... FROM file('a.parquet', Parquet, 'x String')`, but the file has `x Int64`). Without this fix, use `input_format_parquet_filter_push_down = 0` as a workaround. [#68131](https://github.com/ClickHouse/ClickHouse/pull/68131) ([Michael Kolupaev](https://github.com/al13n321)).
|
||||
* Fix crash in `lag`/`lead` which is introduced in [#67091](https://github.com/ClickHouse/ClickHouse/issues/67091). [#68262](https://github.com/ClickHouse/ClickHouse/pull/68262) ([lgbo](https://github.com/lgbo-ustc)).
|
||||
* Try fix postgres crash when query is cancelled. [#68288](https://github.com/ClickHouse/ClickHouse/pull/68288) ([Kseniia Sumarokova](https://github.com/kssenii)).
|
||||
* After https://github.com/ClickHouse/ClickHouse/pull/61984 `schema_inference_make_columns_nullable=0` still can make columns `Nullable` in Parquet/Arrow formats. The change was backward incompatible and users noticed the changes in the behaviour. This PR makes `schema_inference_make_columns_nullable=0` to work as before (no Nullable columns will be inferred) and introduces new value `auto` for this setting that will make columns `Nullable` only if data has information about nullability. [#68298](https://github.com/ClickHouse/ClickHouse/pull/68298) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||
* Fixes [#50868](https://github.com/ClickHouse/ClickHouse/issues/50868). Small DateTime64 constant values returned by a nested subquery inside a distributed query were wrongly transformed to Nulls, thus causing errors and possible incorrect query results. [#68323](https://github.com/ClickHouse/ClickHouse/pull/68323) ([Shankar](https://github.com/shiyer7474)).
|
||||
* Fix missing sync replica mode in query `SYSTEM SYNC REPLICA`. [#68326](https://github.com/ClickHouse/ClickHouse/pull/68326) ([Duc Canh Le](https://github.com/canhld94)).
|
||||
* Fix bug in key condition. [#68354](https://github.com/ClickHouse/ClickHouse/pull/68354) ([Han Fei](https://github.com/hanfei1991)).
|
||||
* Fix crash on drop or rename a role that is used in LDAP external user directory. [#68355](https://github.com/ClickHouse/ClickHouse/pull/68355) ([Andrey Zvonov](https://github.com/zvonand)).
|
||||
* Fix Progress column value of system.view_refreshes greater than 1 [#68377](https://github.com/ClickHouse/ClickHouse/issues/68377). [#68378](https://github.com/ClickHouse/ClickHouse/pull/68378) ([megao](https://github.com/jetgm)).
|
||||
* Process regexp flags correctly. [#68389](https://github.com/ClickHouse/ClickHouse/pull/68389) ([Han Fei](https://github.com/hanfei1991)).
|
||||
* PostgreSQL-style cast operator (`::`) works correctly even for SQL-style hex and binary string literals (e.g., `SELECT x'414243'::String`). This closes [#68324](https://github.com/ClickHouse/ClickHouse/issues/68324). [#68482](https://github.com/ClickHouse/ClickHouse/pull/68482) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* Minor patch for https://github.com/ClickHouse/ClickHouse/pull/68131. [#68494](https://github.com/ClickHouse/ClickHouse/pull/68494) ([Chang chen](https://github.com/baibaichen)).
|
||||
* Fix [#68239](https://github.com/ClickHouse/ClickHouse/issues/68239) SAMPLE n where n is an integer. [#68499](https://github.com/ClickHouse/ClickHouse/pull/68499) ([Denis Hananein](https://github.com/denis-hananein)).
|
||||
* Fix bug in mann-whitney-utest when the size of two districutions are not equal. [#68556](https://github.com/ClickHouse/ClickHouse/pull/68556) ([Han Fei](https://github.com/hanfei1991)).
|
||||
* After unexpected restart, fail to start replication of ReplicatedMergeTree due to abnormal handling of covered-by-broken part. [#68584](https://github.com/ClickHouse/ClickHouse/pull/68584) ([baolin](https://github.com/baolinhuang)).
|
||||
* Fix `LOGICAL_ERROR`s when functions `sipHash64Keyed`, `sipHash128Keyed`, or `sipHash128ReferenceKeyed` are applied to empty arrays or tuples. [#68630](https://github.com/ClickHouse/ClickHouse/pull/68630) ([Robert Schulze](https://github.com/rschu1ze)).
|
||||
* Full text index may filter out wrong columns when index multiple columns, it didn't reset row_id between different columns, the reproduce procedure is in tests/queries/0_stateless/03228_full_text_with_multi_col.sql. Without this. [#68644](https://github.com/ClickHouse/ClickHouse/pull/68644) ([siyuan](https://github.com/linkwk7)).
|
||||
* Fix invalid character '\t' and '\n' in replica_name when creating a Replicated table, which causes incorrect parsing of 'source replica' in LogEntry. Mentioned in issue [#68640](https://github.com/ClickHouse/ClickHouse/issues/68640). [#68645](https://github.com/ClickHouse/ClickHouse/pull/68645) ([Zhigao Hong](https://github.com/zghong)).
|
||||
* Added back virtual columns ` _table` and `_database` to distributed tables. They were available until version 24.3. [#68672](https://github.com/ClickHouse/ClickHouse/pull/68672) ([Anton Popov](https://github.com/CurtizJ)).
|
||||
* Fix possible error `Size of permutation (0) is less than required (...)` during Variant column permutation. [#68681](https://github.com/ClickHouse/ClickHouse/pull/68681) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||
* Fix possible error `DB::Exception: Block structure mismatch in joined block stream: different columns:` with new JSON column. [#68686](https://github.com/ClickHouse/ClickHouse/pull/68686) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||
* Fix issue with materialized constant keys when hashing maps with arrays as keys in functions `sipHash(64/128)Keyed`. [#68731](https://github.com/ClickHouse/ClickHouse/pull/68731) ([Salvatore Mesoraca](https://github.com/aiven-sal)).
|
||||
* Make `ColumnsDescription::toString` format each column using the same `IAST::FormatState object`. This results in uniform columns metadata being written to disk and ZooKeeper. [#68733](https://github.com/ClickHouse/ClickHouse/pull/68733) ([Miсhael Stetsyuk](https://github.com/mstetsyuk)).
|
||||
* Fix merging of aggregated data for grouping sets. [#68744](https://github.com/ClickHouse/ClickHouse/pull/68744) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
|
||||
* Fix logical error, when we create a replicated merge tree, alter a column and then execute modify statistics. [#68820](https://github.com/ClickHouse/ClickHouse/pull/68820) ([Han Fei](https://github.com/hanfei1991)).
|
||||
* Fix resolving dynamic subcolumns from subqueries in analyzer. [#68824](https://github.com/ClickHouse/ClickHouse/pull/68824) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||
* Fix complex types metadata parsing in DeltaLake. Closes [#68739](https://github.com/ClickHouse/ClickHouse/issues/68739). [#68836](https://github.com/ClickHouse/ClickHouse/pull/68836) ([Kseniia Sumarokova](https://github.com/kssenii)).
|
||||
* Fixed asynchronous inserts in case when metadata of table is changed (by `ALTER ADD/MODIFY COLUMN` queries) after insert but before flush to the table. [#68837](https://github.com/ClickHouse/ClickHouse/pull/68837) ([Anton Popov](https://github.com/CurtizJ)).
|
||||
* Fix unexpected exception when passing empty tuple in array. This fixes [#68618](https://github.com/ClickHouse/ClickHouse/issues/68618). [#68848](https://github.com/ClickHouse/ClickHouse/pull/68848) ([Amos Bird](https://github.com/amosbird)).
|
||||
* Fix parsing pure metadata mutations commands. [#68935](https://github.com/ClickHouse/ClickHouse/pull/68935) ([János Benjamin Antal](https://github.com/antaljanosbenjamin)).
|
||||
* Fix possible wrong result during anyHeavy state merge. [#68950](https://github.com/ClickHouse/ClickHouse/pull/68950) ([Raúl Marín](https://github.com/Algunenano)).
|
||||
* Fixed writing to Materialized Views with enabled setting `optimize_functions_to_subcolumns`. [#68951](https://github.com/ClickHouse/ClickHouse/pull/68951) ([Anton Popov](https://github.com/CurtizJ)).
|
||||
* Don't use serializations cache in const Dynamic column methods. It could let to use-of-uninitialized value or even race condition during aggregations. [#68953](https://github.com/ClickHouse/ClickHouse/pull/68953) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||
* Fix parsing error when null should be inserted as default in some cases during JSON type parsing. [#68955](https://github.com/ClickHouse/ClickHouse/pull/68955) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||
* Fix `Content-Encoding` not sent in some compressed responses. [#64802](https://github.com/ClickHouse/ClickHouse/issues/64802). [#68975](https://github.com/ClickHouse/ClickHouse/pull/68975) ([Konstantin Bogdanov](https://github.com/thevar1able)).
|
||||
* There were cases when path was concatenated incorrectly and had the `//` part in it, solving this problem using path normalization. [#69066](https://github.com/ClickHouse/ClickHouse/pull/69066) ([Yarik Briukhovetskyi](https://github.com/yariks5s)).
|
||||
* Fix logical error when we have empty async insert. [#69080](https://github.com/ClickHouse/ClickHouse/pull/69080) ([Han Fei](https://github.com/hanfei1991)).
|
||||
* Fixed data race of progress indication in clickhouse-client during query canceling. [#69081](https://github.com/ClickHouse/ClickHouse/pull/69081) ([Sergei Trifonov](https://github.com/serxa)).
|
||||
* Fix a bug that the vector similarity index (currently experimental) was not utilized when used with cosine distance as distance function. [#69090](https://github.com/ClickHouse/ClickHouse/pull/69090) ([flynn](https://github.com/ucasfl)).
|
||||
* This change addresses an issue where attempting to create a Replicated database again after a server failure during the initial creation process could result in error. [#69102](https://github.com/ClickHouse/ClickHouse/pull/69102) ([Miсhael Stetsyuk](https://github.com/mstetsyuk)).
|
||||
* Don't infer Bool type from String in CSV when `input_format_csv_try_infer_numbers_from_strings = 1` because we don't allow reading bool values from strings. [#69109](https://github.com/ClickHouse/ClickHouse/pull/69109) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||
* Fix explain ast insert queries parsing errors on client when `--multiquery` is enabled. [#69123](https://github.com/ClickHouse/ClickHouse/pull/69123) ([wxybear](https://github.com/wxybear)).
|
||||
* `UNION` clause in subqueries wasn't handled correctly in queries with parallel replicas and lead to LOGICAL_ERROR `Duplicate announcement received for replica`. [#69146](https://github.com/ClickHouse/ClickHouse/pull/69146) ([Igor Nikonov](https://github.com/devcrafter)).
|
||||
* Fix propogating structure argument in s3Cluster. Previously the `DEFAULT` expression of the column could be lost when sending the query to the replicas in s3Cluster. [#69147](https://github.com/ClickHouse/ClickHouse/pull/69147) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||
* Respect format settings in Values format during conversion from expression to the destination type. [#69149](https://github.com/ClickHouse/ClickHouse/pull/69149) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||
* Fix `clickhouse-client --queries-file` for readonly users (previously fails with `Cannot modify 'log_comment' setting in readonly mode`). [#69175](https://github.com/ClickHouse/ClickHouse/pull/69175) ([Azat Khuzhin](https://github.com/azat)).
|
||||
* Fix data race in clickhouse-client when it's piped to a process that terminated early. [#69186](https://github.com/ClickHouse/ClickHouse/pull/69186) ([vdimir](https://github.com/vdimir)).
|
||||
* Fix incorrect results of Fix uniq and GROUP BY for JSON/Dynamic types. [#69203](https://github.com/ClickHouse/ClickHouse/pull/69203) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||
* Fix the INFILE format detection for asynchronous inserts. If the format is not explicitly defined in the FORMAT clause, it can be detected from the INFILE file extension. [#69237](https://github.com/ClickHouse/ClickHouse/pull/69237) ([Julia Kartseva](https://github.com/jkartseva)).
|
||||
* After [this issue](https://github.com/ClickHouse/ClickHouse/pull/59946#issuecomment-1943653197) there are quite a few table replicas in production such that their `metadata_version` node value is both equal to `0` and is different from the respective table's `metadata` node version. This leads to `alter` queries failing on such replicas. [#69274](https://github.com/ClickHouse/ClickHouse/pull/69274) ([Miсhael Stetsyuk](https://github.com/mstetsyuk)).
|
||||
* Mark Dynamic type as not safe primary key type to avoid issues with Fields. [#69311](https://github.com/ClickHouse/ClickHouse/pull/69311) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||
* Improve restoring of access entities' dependencies. [#69346](https://github.com/ClickHouse/ClickHouse/pull/69346) ([Vitaly Baranov](https://github.com/vitlibar)).
|
||||
* Fix undefined behavior when all connection attempts fail getting a connection for insertions. [#69390](https://github.com/ClickHouse/ClickHouse/pull/69390) ([Pablo Marcos](https://github.com/pamarcos)).
|
||||
* Close [#69135](https://github.com/ClickHouse/ClickHouse/issues/69135). If we try to reuse joined data for `cross` join, but this could not happen in ClickHouse at present. It's better to keep `have_compressed` in `reuseJoinedData`. [#69404](https://github.com/ClickHouse/ClickHouse/pull/69404) ([lgbo](https://github.com/lgbo-ustc)).
|
||||
* Make `materialize()` function return full column when parameter is a sparse column. [#69429](https://github.com/ClickHouse/ClickHouse/pull/69429) ([Alexander Gololobov](https://github.com/davenger)).
|
||||
* Fixed a `LOGICAL_ERROR` with function `sqidDecode` ([#69450](https://github.com/ClickHouse/ClickHouse/issues/69450)). [#69451](https://github.com/ClickHouse/ClickHouse/pull/69451) ([Robert Schulze](https://github.com/rschu1ze)).
|
||||
* Quick fix for s3queue problem on 24.6 or create query with database replicated. [#69454](https://github.com/ClickHouse/ClickHouse/pull/69454) ([Kseniia Sumarokova](https://github.com/kssenii)).
|
||||
* Fixed case when memory consumption was too high because of the squashing in `INSERT INTO ... SELECT` or `CREATE TABLE AS SELECT` queries. [#69469](https://github.com/ClickHouse/ClickHouse/pull/69469) ([Yarik Briukhovetskyi](https://github.com/yariks5s)).
|
||||
* Statements `SHOW COLUMNS` and `SHOW INDEX` now work properly if the table has dots in its name. [#69514](https://github.com/ClickHouse/ClickHouse/pull/69514) ([Salvatore Mesoraca](https://github.com/aiven-sal)).
|
||||
* Usage of the query cache for queries with an overflow mode != 'throw' is now disallowed. This prevents situations where potentially truncated and incorrect query results could be stored in the query cache. (issue [#67476](https://github.com/ClickHouse/ClickHouse/issues/67476)). [#69549](https://github.com/ClickHouse/ClickHouse/pull/69549) ([Robert Schulze](https://github.com/rschu1ze)).
|
||||
* Keep original order of conditions during move to prewhere. Previously the order could change and it could lead to failing queries when the order is important. [#69560](https://github.com/ClickHouse/ClickHouse/pull/69560) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||
* Fix Keeper multi-request preprocessing after ZNOAUTH error. [#69627](https://github.com/ClickHouse/ClickHouse/pull/69627) ([Antonio Andelic](https://github.com/antonio2368)).
|
||||
* Fix METADATA_MISMATCH that might have happened due to TTL with a WHERE clause in DatabaseReplicated when creating a new replica. [#69736](https://github.com/ClickHouse/ClickHouse/pull/69736) ([Nikolay Degterinsky](https://github.com/evillique)).
|
||||
* Fix `StorageS3(Azure)Queue` settings `tracked_file_ttl_sec`. We wrote it to keeper with key `tracked_file_ttl_sec`, but read as `tracked_files_ttl_sec`, which was a typo. [#69742](https://github.com/ClickHouse/ClickHouse/pull/69742) ([Kseniia Sumarokova](https://github.com/kssenii)).
|
||||
* Use tryconvertfieldtotype in gethyperrectangleforrowgroup. [#69745](https://github.com/ClickHouse/ClickHouse/pull/69745) ([Miсhael Stetsyuk](https://github.com/mstetsyuk)).
|
||||
* Revert "Fix prewhere without columns and without adaptive index granularity (almost w/o anything)"'. Due to the reverted changes some errors might happen when reading data parts produced by old CH releases (presumably 2021 or older). [#68897](https://github.com/ClickHouse/ClickHouse/pull/68897) ([Alexander Gololobov](https://github.com/davenger)).
|
||||
|
||||
|
||||
### <a id="248"></a> ClickHouse release 24.8 LTS, 2024-08-20
|
||||
|
||||
#### Backward Incompatible Change
|
||||
* `clickhouse-client` and `clickhouse-local` now default to multi-query mode (instead single-query mode). As an example, `clickhouse-client -q "SELECT 1; SELECT 2"` now works, whereas users previously had to add `--multiquery` (or `-n`). The `--multiquery/-n` switch became obsolete. INSERT queries in multi-query statements are treated specially based on their FORMAT clause: If the FORMAT is `VALUES` (the most common case), the end of the INSERT statement is represented by a trailing semicolon `;` at the end of the query. For all other FORMATs (e.g. `CSV` or `JSONEachRow`), the end of the INSERT statement is represented by two newlines `\n\n` at the end of the query. [#63898](https://github.com/ClickHouse/ClickHouse/pull/63898) ([FFish](https://github.com/wxybear)).
|
||||
* In previous versions, it was possible to use an alternative syntax for `LowCardinality` data types by appending `WithDictionary` to the name of the data type. It was an initial working implementation, and it was never documented or exposed to the public. Now, it is deprecated. If you have used this syntax, you have to ALTER your tables and rename the data types to `LowCardinality`. [#66842](https://github.com/ClickHouse/ClickHouse/pull/66842) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* Fix logical errors with storage `Buffer` used with distributed destination table. It's a backward incompatible change: queries using `Buffer` with a distributed destination table may stop working if the table appears more than once in the query (e.g., in a self-join). [#67015](https://github.com/ClickHouse/ClickHouse/pull/67015) ([vdimir](https://github.com/vdimir)).
|
||||
* In previous versions, calling functions for random distributions based on the Gamma function (such as Chi-Squared, Student, Fisher) with negative arguments close to zero led to a long computation or an infinite loop. In the new version, calling these functions with zero or negative arguments will produce an exception. This closes [#67297](https://github.com/ClickHouse/ClickHouse/issues/67297). [#67326](https://github.com/ClickHouse/ClickHouse/pull/67326) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* The system table `text_log` is enabled by default. This is fully compatible with previous versions, but you may notice subtly increased disk usage on the local disk (this system table takes a tiny amount of disk space). [#67428](https://github.com/ClickHouse/ClickHouse/pull/67428) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* In previous versions, `arrayWithConstant` can be slow if asked to generate very large arrays. In the new version, it is limited to 1 GB per array. This closes [#32754](https://github.com/ClickHouse/ClickHouse/issues/32754). [#67741](https://github.com/ClickHouse/ClickHouse/pull/67741) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* Fix REPLACE modifier formatting (forbid omitting brackets). [#67774](https://github.com/ClickHouse/ClickHouse/pull/67774) ([Azat Khuzhin](https://github.com/azat)).
|
||||
* Backported in [#68349](https://github.com/ClickHouse/ClickHouse/issues/68349): Reimplement `Dynamic` type. Now when the limit of dynamic data types is reached new types are not casted to String but stored in a special data structure in binary format with binary encoded data type. Now any type ever inserted into `Dynamic` column can be read from it as subcolumn. [#68132](https://github.com/ClickHouse/ClickHouse/pull/68132) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||
|
||||
#### New Feature
|
||||
* Added a new `MergeTree` setting `deduplicate_merge_projection_mode` to control the projections during merges (for specific engines) and `OPTIMIZE DEDUPLICATE` query. Supported options: `throw` (throw an exception in case the projection is not fully supported for *MergeTree engine), `drop` (remove projection during merge if it can't be merged itself consistently) and `rebuild` (rebuild projection from scratch, which is a heavy operation). [#66672](https://github.com/ClickHouse/ClickHouse/pull/66672) ([jsc0218](https://github.com/jsc0218)).
|
||||
* Add `_etag` virtual column for S3 table engine. Fixes [#65312](https://github.com/ClickHouse/ClickHouse/issues/65312). [#65386](https://github.com/ClickHouse/ClickHouse/pull/65386) ([skyoct](https://github.com/skyoct)).
|
||||
* Added a tagging (namespace) mechanism for the query cache. The same queries with different tags are considered different by the query cache. Example: `SELECT 1 SETTINGS use_query_cache = 1, query_cache_tag = 'abc'` and `SELECT 1 SETTINGS use_query_cache = 1, query_cache_tag = 'def'` now create different query cache entries. [#68235](https://github.com/ClickHouse/ClickHouse/pull/68235) ([sakulali](https://github.com/sakulali)).
|
||||
* Support more variants of JOIN strictness (`LEFT/RIGHT SEMI/ANTI/ANY JOIN`) with inequality conditions which involve columns from both left and right table. e.g. `t1.y < t2.y` (see the setting `allow_experimental_join_condition`). [#64281](https://github.com/ClickHouse/ClickHouse/pull/64281) ([lgbo](https://github.com/lgbo-ustc)).
|
||||
* Interpret Hive-style partitioning for different engines (`File`, `URL`, `S3`, `AzureBlobStorage`, `HDFS`). Hive-style partitioning organizes data into partitioned sub-directories, making it efficient to query and manage large datasets. Currently, it only creates virtual columns with the appropriate name and data. The follow-up PR will introduce the appropriate data filtering (performance speedup). [#65997](https://github.com/ClickHouse/ClickHouse/pull/65997) ([Yarik Briukhovetskyi](https://github.com/yariks5s)).
|
||||
* Add function `printf` for Spark compatiability (but you can use the existing `format` function). [#66257](https://github.com/ClickHouse/ClickHouse/pull/66257) ([李扬](https://github.com/taiyang-li)).
|
||||
* Add options `restore_replace_external_engines_to_null` and `restore_replace_external_table_functions_to_null` to replace external engines and table_engines to `Null` engine that can be useful for testing. It should work for RESTORE and explicit table creation. [#66536](https://github.com/ClickHouse/ClickHouse/pull/66536) ([Ilya Yatsishin](https://github.com/qoega)).
|
||||
* Added support for reading `MULTILINESTRING` geometry in `WKT` format using function `readWKTLineString`. [#67647](https://github.com/ClickHouse/ClickHouse/pull/67647) ([Jacob Reckhard](https://github.com/jacobrec)).
|
||||
* Add a new table function `fuzzQuery`. This function allows the modification of a given query string with random variations. Example: `SELECT query FROM fuzzQuery('SELECT 1') LIMIT 5;`. [#67655](https://github.com/ClickHouse/ClickHouse/pull/67655) ([pufit](https://github.com/pufit)).
|
||||
* Add a query `ALTER TABLE ... DROP DETACHED PARTITION ALL` to drop all detached partitions. [#67885](https://github.com/ClickHouse/ClickHouse/pull/67885) ([Duc Canh Le](https://github.com/canhld94)).
|
||||
* Add the `rows_before_aggregation_at_least` statistic to the query response when a new setting, `rows_before_aggregation` is enabled. This statistic represents the number of rows read before aggregation. In the context of a distributed query, when using the `group by` or `max` aggregation function without a `limit`, `rows_before_aggregation_at_least` can reflect the number of rows hit by the query. [#66084](https://github.com/ClickHouse/ClickHouse/pull/66084) ([morning-color](https://github.com/morning-color)).
|
||||
* Support `OPTIMIZE` query on `Join` tables to reduce their memory footprint. [#67883](https://github.com/ClickHouse/ClickHouse/pull/67883) ([Duc Canh Le](https://github.com/canhld94)).
|
||||
* Allow run query instantly in play if you add `&run=1` in the URL [#66457](https://github.com/ClickHouse/ClickHouse/pull/66457) ([Aleksandr Musorin](https://github.com/AVMusorin)).
|
||||
|
||||
#### Experimental Feature
|
||||
* Implement a new `JSON` data type. [#66444](https://github.com/ClickHouse/ClickHouse/pull/66444) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||
* Add the new `TimeSeries` table engine. [#64183](https://github.com/ClickHouse/ClickHouse/pull/64183) ([Vitaly Baranov](https://github.com/vitlibar)).
|
||||
* Add new experimental `Kafka` storage engine to store offsets in Keeper instead of relying on committing them to Kafka. It makes the commit to ClickHouse tables atomic with regard to consumption from the queue. [#57625](https://github.com/ClickHouse/ClickHouse/pull/57625) ([János Benjamin Antal](https://github.com/antaljanosbenjamin)).
|
||||
* Use adaptive read task size calculation method (adaptive meaning it depends on read column sizes) for parallel replicas. [#60377](https://github.com/ClickHouse/ClickHouse/pull/60377) ([Nikita Taranov](https://github.com/nickitat)).
|
||||
* Added statistics type `count_min` (count-min sketches) which provide selectivity estimations for equality predicates like `col = 'val'`. Supported data types are string, date, datatime and numeric types. [#65521](https://github.com/ClickHouse/ClickHouse/pull/65521) ([JackyWoo](https://github.com/JackyWoo)).
|
||||
|
||||
#### Performance Improvement
|
||||
* Setting `optimize_functions_to_subcolumns` is enabled by default. [#68053](https://github.com/ClickHouse/ClickHouse/pull/68053) ([Anton Popov](https://github.com/CurtizJ)).
|
||||
* Store the `plain_rewritable` disk directory metadata in `__meta` layout, separately from the merge tree data in the object storage. Move the `plain_rewritable` disk to a flat directory structure. [#65751](https://github.com/ClickHouse/ClickHouse/pull/65751) ([Julia Kartseva](https://github.com/jkartseva)).
|
||||
* Improve columns squashing (an operation happening in INSERT queries) for `String`/`Array`/`Map`/`Variant`/`Dynamic` types by reserving required memory in advance for all subcolumns. [#67043](https://github.com/ClickHouse/ClickHouse/pull/67043) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||
* Speed up `SYSTEM FLUSH LOGS` and flush logs on shutdown. [#67472](https://github.com/ClickHouse/ClickHouse/pull/67472) ([Sema Checherinda](https://github.com/CheSema)).
|
||||
* Improved overall performance of merges by reducing the overhead of the scheduling steps of merges. [#68016](https://github.com/ClickHouse/ClickHouse/pull/68016) ([Anton Popov](https://github.com/CurtizJ)).
|
||||
* Speed up tables removal for `DROP DATABASE` query, increased the default value for `database_catalog_drop_table_concurrency` to 16. [#67228](https://github.com/ClickHouse/ClickHouse/pull/67228) ([Nikita Mikhaylov](https://github.com/nikitamikhaylov)).
|
||||
* Avoid allocating too much capacity for array column while writing ORC. Performance speeds up 15% for an Array column. [#67879](https://github.com/ClickHouse/ClickHouse/pull/67879) ([李扬](https://github.com/taiyang-li)).
|
||||
* Speed up mutations for non-replicated MergeTree significantly [#66911](https://github.com/ClickHouse/ClickHouse/pull/66911) [#66909](https://github.com/ClickHouse/ClickHouse/pull/66909) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
|
||||
#### Improvement
|
||||
* Setting `allow_experimental_analyzer` is renamed to `enable_analyzer`. The old name is preserved in a form of an alias. This signifies that Analyzer is no longer in beta and is fully promoted to production. [#66438](https://github.com/ClickHouse/ClickHouse/pull/66438) ([Nikita Mikhaylov](https://github.com/nikitamikhaylov)).
|
||||
* Improve schema inference of date times. Now DateTime64 used only when date time has fractional part, otherwise regular DateTime is used. Inference of Date/DateTime is more strict now, especially when `date_time_input_format='best_effort'` to avoid inferring date times from strings in corner cases. [#68382](https://github.com/ClickHouse/ClickHouse/pull/68382) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||
* ClickHouse server now supports new setting `max_keep_alive_requests`. For keep-alive HTTP connections to the server it works in tandem with `keep_alive_timeout` - if idle timeout not expired but there already more than `max_keep_alive_requests` requests done through the given connection - it will be closed by the server. [#61793](https://github.com/ClickHouse/ClickHouse/pull/61793) ([Nikita Taranov](https://github.com/nickitat)).
|
||||
* Various improvements in the advanced dashboard. This closes [#67697](https://github.com/ClickHouse/ClickHouse/issues/67697). This closes [#63407](https://github.com/ClickHouse/ClickHouse/issues/63407). This closes [#51129](https://github.com/ClickHouse/ClickHouse/issues/51129). This closes [#61204](https://github.com/ClickHouse/ClickHouse/issues/61204). [#67701](https://github.com/ClickHouse/ClickHouse/pull/67701) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* Do not require a grant for REMOTE when creating a Distributed table: a grant for the Distributed engine is enough. [#65419](https://github.com/ClickHouse/ClickHouse/pull/65419) ([jsc0218](https://github.com/jsc0218)).
|
||||
* Do not pass logs for keeper explicitly in the Docker image to allow overriding. [#65564](https://github.com/ClickHouse/ClickHouse/pull/65564) ([Azat Khuzhin](https://github.com/azat)).
|
||||
* Introduced `use_same_password_for_base_backup` settings for `BACKUP` and `RESTORE` queries, allowing to create and restore incremental backups to/from password protected archives. [#66214](https://github.com/ClickHouse/ClickHouse/pull/66214) ([Samuele](https://github.com/sguerrini97)).
|
||||
* Ignore `async_load_databases` for `ATTACH` query (previously it was possible for ATTACH to return before the tables had been attached). [#66240](https://github.com/ClickHouse/ClickHouse/pull/66240) ([Azat Khuzhin](https://github.com/azat)).
|
||||
* Added logs and metrics for rejected connections (where there are not enough resources). [#66410](https://github.com/ClickHouse/ClickHouse/pull/66410) ([Alexander Tokmakov](https://github.com/tavplubix)).
|
||||
* Support proper `UUID` type for MongoDB engine. [#66671](https://github.com/ClickHouse/ClickHouse/pull/66671) ([Azat Khuzhin](https://github.com/azat)).
|
||||
* Add replication lag and recovery time metrics. [#66703](https://github.com/ClickHouse/ClickHouse/pull/66703) ([Miсhael Stetsyuk](https://github.com/mstetsyuk)).
|
||||
* Add `DiskS3NoSuchKeyErrors` metric. [#66704](https://github.com/ClickHouse/ClickHouse/pull/66704) ([Miсhael Stetsyuk](https://github.com/mstetsyuk)).
|
||||
* Ensure the `COMMENT` clause works for all table engines. [#66832](https://github.com/ClickHouse/ClickHouse/pull/66832) ([Joe Lynch](https://github.com/joelynch)).
|
||||
* Function `mapFromArrays` now accepts `Map(K, V)` as first argument, for example: `SELECT mapFromArrays(map('a', 4, 'b', 4), ['aa', 'bb'])` now works and returns `{('a',4):'aa',('b',4):'bb'}`. Also, if the 1st argument is an Array, it can now also be of type `Array(Nullable(T))` or `Array(LowCardinality(Nullable(T)))` as long as the actual array values are not `NULL`. [#67103](https://github.com/ClickHouse/ClickHouse/pull/67103) ([李扬](https://github.com/taiyang-li)).
|
||||
* Read configuration for `clickhouse-local` from `~/.clickhouse-local`. [#67135](https://github.com/ClickHouse/ClickHouse/pull/67135) ([Azat Khuzhin](https://github.com/azat)).
|
||||
* Rename setting `input_format_orc_read_use_writer_time_zone` to `input_format_orc_reader_timezone` and allow the user to set the reader timezone. [#67175](https://github.com/ClickHouse/ClickHouse/pull/67175) ([kevinyhzou](https://github.com/KevinyhZou)).
|
||||
* Decrease level of the `Socket is not connected` error when HTTP connection immediately reset by peer after connecting, close [#34218](https://github.com/ClickHouse/ClickHouse/issues/34218). [#67177](https://github.com/ClickHouse/ClickHouse/pull/67177) ([vdimir](https://github.com/vdimir)).
|
||||
* Add ability to load dashboards for `system.dashboards` from config (once set, they overrides the default dashboards preset). [#67232](https://github.com/ClickHouse/ClickHouse/pull/67232) ([Azat Khuzhin](https://github.com/azat)).
|
||||
* The window functions in SQL are traditionally in snake case. ClickHouse uses `camelCase`, so new aliases `denseRank()` and `percentRank()` have been created. These new functions can be called the exact same as the original `dense_rank()` and `percent_rank()` functions. Both snake case and camelCase syntaxes remain usable. A new test for each of the functions has been added as well. This closes [#67042](https://github.com/ClickHouse/ClickHouse/issues/67042) . [#67334](https://github.com/ClickHouse/ClickHouse/pull/67334) ([Peter Nguyen](https://github.com/petern48)).
|
||||
* Autodetect configuration file format if is not `.xml`, `.yml` or `.yaml`. If the file begins with < it might be XML, otherwise it might be YAML. It is useful when providing a configuration file from a pipe: `clickhouse-server --config-file <(echo "hello: world")`. [#67391](https://github.com/ClickHouse/ClickHouse/pull/67391) ([sakulali](https://github.com/sakulali)).
|
||||
* Functions `formatDateTime` and `formatDateTimeInJodaSyntax` now treat their format parameter as optional. If it is not specified, format strings `%Y-%m-%d %H:%i:%s` and `yyyy-MM-dd HH:mm:ss` are assumed. Example: `SELECT parseDateTime('2021-01-04 23:12:34')` now returns DateTime value `2021-01-04 23:12:34` (previously, this threw an exception). [#67399](https://github.com/ClickHouse/ClickHouse/pull/67399) ([Robert Schulze](https://github.com/rschu1ze)).
|
||||
* Automatically retry Keeper requests in KeeperMap if they happen because of timeout or connection loss. [#67448](https://github.com/ClickHouse/ClickHouse/pull/67448) ([Antonio Andelic](https://github.com/antonio2368)).
|
||||
* Add `-no-pie` to Aarch64 Linux builds to allow proper introspection and symbolizing of stacktraces after a ClickHouse restart. [#67916](https://github.com/ClickHouse/ClickHouse/pull/67916) ([filimonov](https://github.com/filimonov)).
|
||||
* Added profile events for merges and mutations for better introspection. [#68015](https://github.com/ClickHouse/ClickHouse/pull/68015) ([Anton Popov](https://github.com/CurtizJ)).
|
||||
* Remove unnecessary logs for non-replicated `MergeTree`. [#68238](https://github.com/ClickHouse/ClickHouse/pull/68238) ([Daniil Ivanik](https://github.com/divanik)).
|
||||
|
||||
#### Build/Testing/Packaging Improvement
|
||||
* Integration tests flaky check will not run each test case multiple times to find more issues in tests and make them more reliable. It is using `pytest-repeat` library to run test case multiple times for the same environment. It is important to cleanup tables and other entities in the end of a test case to pass. Repeating works much faster than several pytest runs as it starts necessary containers only once. [#66986](https://github.com/ClickHouse/ClickHouse/pull/66986) ([Ilya Yatsishin](https://github.com/qoega)).
|
||||
* Unblock the usage of CLion with ClickHouse. In previous versions, CLion freezed for a minute on every keypress. This closes [#66994](https://github.com/ClickHouse/ClickHouse/issues/66994). [#66995](https://github.com/ClickHouse/ClickHouse/pull/66995) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* getauxval: avoid a crash under a sanitizer re-exec due to high ASLR entropy in newer Linux kernels. [#67081](https://github.com/ClickHouse/ClickHouse/pull/67081) ([Raúl Marín](https://github.com/Algunenano)).
|
||||
* Some parts of client code are extracted to a single file and highest possible level optimization is applied to them even for debug builds. This closes: [#65745](https://github.com/ClickHouse/ClickHouse/issues/65745). [#67215](https://github.com/ClickHouse/ClickHouse/pull/67215) ([Nikita Mikhaylov](https://github.com/nikitamikhaylov)).
|
||||
|
||||
#### Bug Fix
|
||||
* Only relevant to the experimental Variant data type. Fix crash with Variant + AggregateFunction type. [#67122](https://github.com/ClickHouse/ClickHouse/pull/67122) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||
* Fix crash in DistributedAsyncInsert when connection is empty. [#67219](https://github.com/ClickHouse/ClickHouse/pull/67219) ([Pablo Marcos](https://github.com/pamarcos)).
|
||||
* Fix crash of `uniq` and `uniqTheta ` with `tuple()` argument. Closes [#67303](https://github.com/ClickHouse/ClickHouse/issues/67303). [#67306](https://github.com/ClickHouse/ClickHouse/pull/67306) ([flynn](https://github.com/ucasfl)).
|
||||
* Fixes [#66026](https://github.com/ClickHouse/ClickHouse/issues/66026). Avoid unresolved table function arguments traversal in `ReplaceTableNodeToDummyVisitor`. [#67522](https://github.com/ClickHouse/ClickHouse/pull/67522) ([Dmitry Novik](https://github.com/novikd)).
|
||||
* Fix potential stack overflow in `JSONMergePatch` function. Renamed this function from `jsonMergePatch` to `JSONMergePatch` because the previous name was wrong. The previous name is still kept for compatibility. Improved diagnostic of errors in the function. This closes [#67304](https://github.com/ClickHouse/ClickHouse/issues/67304). [#67756](https://github.com/ClickHouse/ClickHouse/pull/67756) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* Fixed a NULL pointer dereference, triggered by a specially crafted query, that crashed the server via hopEnd, hopStart, tumbleEnd, and tumbleStart. [#68098](https://github.com/ClickHouse/ClickHouse/pull/68098) ([Salvatore Mesoraca](https://github.com/aiven-sal)).
|
||||
* Fixed `Not-ready Set` in some system tables when filtering using subqueries. [#66018](https://github.com/ClickHouse/ClickHouse/pull/66018) ([Michael Kolupaev](https://github.com/al13n321)).
|
||||
* Fixed reading of subcolumns after `ALTER ADD COLUMN` query. [#66243](https://github.com/ClickHouse/ClickHouse/pull/66243) ([Anton Popov](https://github.com/CurtizJ)).
|
||||
* Fix boolean literals in query sent to external database (for engines like `PostgreSQL`). [#66282](https://github.com/ClickHouse/ClickHouse/pull/66282) ([vdimir](https://github.com/vdimir)).
|
||||
* Fix formatting of query with aliased JOIN ON expression, e.g. `... JOIN t2 ON (x = y) AS e ORDER BY x` should be formatted as `... JOIN t2 ON ((x = y) AS e) ORDER BY x`. [#66312](https://github.com/ClickHouse/ClickHouse/pull/66312) ([vdimir](https://github.com/vdimir)).
|
||||
* Fix cluster() for inter-server secret (preserve initial user as before). [#66364](https://github.com/ClickHouse/ClickHouse/pull/66364) ([Azat Khuzhin](https://github.com/azat)).
|
||||
* Fix possible runtime error while converting Array field with nulls to Array(Variant). [#66727](https://github.com/ClickHouse/ClickHouse/pull/66727) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||
* Fix for occasional deadlock in Context::getDDLWorker. [#66843](https://github.com/ClickHouse/ClickHouse/pull/66843) ([Alexander Gololobov](https://github.com/davenger)).
|
||||
* Fix creating KeeperMap table after an incomplete drop. [#66865](https://github.com/ClickHouse/ClickHouse/pull/66865) ([Antonio Andelic](https://github.com/antonio2368)).
|
||||
* Fix broken part error while restoring to a `s3_plain_rewritable` disk. [#66881](https://github.com/ClickHouse/ClickHouse/pull/66881) ([Vitaly Baranov](https://github.com/vitlibar)).
|
||||
* In rare cases ClickHouse could consider parts as broken because of some unexpected projections on disk. Now it's fixed. [#66898](https://github.com/ClickHouse/ClickHouse/pull/66898) ([alesapin](https://github.com/alesapin)).
|
||||
* Fix invalid format detection in schema inference that could lead to logical error Format {} doesn't support schema inference. [#66899](https://github.com/ClickHouse/ClickHouse/pull/66899) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||
* Fix possible deadlock on query cancel with parallel replicas. [#66905](https://github.com/ClickHouse/ClickHouse/pull/66905) ([Nikita Taranov](https://github.com/nickitat)).
|
||||
* Forbid create as select even when database_replicated_allow_heavy_create is set. It was unconditionally forbidden in 23.12 and accidentally allowed under the setting in unreleased 24.7. [#66980](https://github.com/ClickHouse/ClickHouse/pull/66980) ([vdimir](https://github.com/vdimir)).
|
||||
* Reading from the `numbers` could wrongly throw an exception when the `max_rows_to_read` limit was set. This closes [#66992](https://github.com/ClickHouse/ClickHouse/issues/66992). [#66996](https://github.com/ClickHouse/ClickHouse/pull/66996) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* Add proper type conversion to lagInFrame and leadInFrame window functions - fixes msan test. [#67091](https://github.com/ClickHouse/ClickHouse/pull/67091) ([Yakov Olkhovskiy](https://github.com/yakov-olkhovskiy)).
|
||||
* TRUNCATE DATABASE used to stop replication as if it was a DROP DATABASE query, it's fixed. [#67129](https://github.com/ClickHouse/ClickHouse/pull/67129) ([Alexander Tokmakov](https://github.com/tavplubix)).
|
||||
* Use a separate client context in `clickhouse-local`. [#67133](https://github.com/ClickHouse/ClickHouse/pull/67133) ([Vitaly Baranov](https://github.com/vitlibar)).
|
||||
* Fix error `Cannot convert column because it is non constant in source stream but must be constant in result.` for a query that reads from the `Merge` table over the `Distriburted` table with one shard. [#67146](https://github.com/ClickHouse/ClickHouse/pull/67146) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
|
||||
* Correct behavior of `ORDER BY all` with disabled `enable_order_by_all` and parallel replicas (distributed queries as well). [#67153](https://github.com/ClickHouse/ClickHouse/pull/67153) ([Igor Nikonov](https://github.com/devcrafter)).
|
||||
* Fix wrong usage of input_format_max_bytes_to_read_for_schema_inference in schema cache. [#67157](https://github.com/ClickHouse/ClickHouse/pull/67157) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||
* Fix the memory leak for count distinct, when exception issued during group by single nullable key. [#67171](https://github.com/ClickHouse/ClickHouse/pull/67171) ([Jet He](https://github.com/compasses)).
|
||||
* Fix an error in optimization which converts OUTER JOIN to INNER JOIN. This closes [#67156](https://github.com/ClickHouse/ClickHouse/issues/67156). This closes [#66447](https://github.com/ClickHouse/ClickHouse/issues/66447). The bug was introduced in https://github.com/ClickHouse/ClickHouse/pull/62907. [#67178](https://github.com/ClickHouse/ClickHouse/pull/67178) ([Maksim Kita](https://github.com/kitaisreal)).
|
||||
* Fix error `Conversion from AggregateFunction(name, Type) to AggregateFunction(name, Nullable(Type)) is not supported`. The bug was caused by the `optimize_rewrite_aggregate_function_with_if` optimization. Fixes [#67112](https://github.com/ClickHouse/ClickHouse/issues/67112). [#67229](https://github.com/ClickHouse/ClickHouse/pull/67229) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
|
||||
* Fix hung query when using empty tuple as lhs of function IN. [#67295](https://github.com/ClickHouse/ClickHouse/pull/67295) ([Duc Canh Le](https://github.com/canhld94)).
|
||||
* It was possible to create a very deep nested JSON data that triggered stack overflow while skipping unknown fields. This closes [#67292](https://github.com/ClickHouse/ClickHouse/issues/67292). [#67324](https://github.com/ClickHouse/ClickHouse/pull/67324) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* Fix attaching ReplicatedMergeTree table after exception during startup. [#67360](https://github.com/ClickHouse/ClickHouse/pull/67360) ([Antonio Andelic](https://github.com/antonio2368)).
|
||||
* Fix segfault caused by incorrectly detaching from thread group in `Aggregator`. [#67385](https://github.com/ClickHouse/ClickHouse/pull/67385) ([Antonio Andelic](https://github.com/antonio2368)).
|
||||
* Fix one more case when a non-deterministic function is specified in PK. [#67395](https://github.com/ClickHouse/ClickHouse/pull/67395) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
|
||||
* Fixed `bloom_filter` index breaking queries with mildly weird conditions like `(k=2)=(k=2)` or `has([1,2,3], k)`. [#67423](https://github.com/ClickHouse/ClickHouse/pull/67423) ([Michael Kolupaev](https://github.com/al13n321)).
|
||||
* Correctly parse file name/URI containing `::` if it's not an archive. [#67433](https://github.com/ClickHouse/ClickHouse/pull/67433) ([Antonio Andelic](https://github.com/antonio2368)).
|
||||
* Fix wait for tasks in ~WriteBufferFromS3 in case WriteBuffer was cancelled. [#67459](https://github.com/ClickHouse/ClickHouse/pull/67459) ([Kseniia Sumarokova](https://github.com/kssenii)).
|
||||
* Protect temporary part directories from removing during RESTORE. [#67491](https://github.com/ClickHouse/ClickHouse/pull/67491) ([Vitaly Baranov](https://github.com/vitlibar)).
|
||||
* Fix execution of nested short-circuit functions. [#67520](https://github.com/ClickHouse/ClickHouse/pull/67520) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||
* Fix `Logical error: Expected the argument №N of type T to have X rows, but it has 0`. The error could happen in a remote query with constant expression in `GROUP BY` (with a new analyzer). [#67536](https://github.com/ClickHouse/ClickHouse/pull/67536) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
|
||||
* Fix join on tuple with NULLs: Some queries with the new analyzer and `NULL` inside the tuple in the `JOIN ON` section returned incorrect results. [#67538](https://github.com/ClickHouse/ClickHouse/pull/67538) ([vdimir](https://github.com/vdimir)).
|
||||
* Fix redundant reschedule of FileCache::freeSpaceRatioKeepingThreadFunc() in case of full non-evictable cache. [#67540](https://github.com/ClickHouse/ClickHouse/pull/67540) ([Kseniia Sumarokova](https://github.com/kssenii)).
|
||||
* Fix inserting into stream like engines (Kafka, RabbitMQ, NATS) through HTTP interface. [#67554](https://github.com/ClickHouse/ClickHouse/pull/67554) ([János Benjamin Antal](https://github.com/antaljanosbenjamin)).
|
||||
* Fix for function `toStartOfWeek` which returned the wrong result with a small `DateTime64` value. [#67558](https://github.com/ClickHouse/ClickHouse/pull/67558) ([Yarik Briukhovetskyi](https://github.com/yariks5s)).
|
||||
* Fix creation of view with recursive CTE. [#67587](https://github.com/ClickHouse/ClickHouse/pull/67587) ([Yakov Olkhovskiy](https://github.com/yakov-olkhovskiy)).
|
||||
* Fix `Logical error: 'file_offset_of_buffer_end <= read_until_position'` in filesystem cache. Closes [#57508](https://github.com/ClickHouse/ClickHouse/issues/57508). [#67623](https://github.com/ClickHouse/ClickHouse/pull/67623) ([Kseniia Sumarokova](https://github.com/kssenii)).
|
||||
* Fixes [#62282](https://github.com/ClickHouse/ClickHouse/issues/62282). Removed the call to `convertFieldToString()` and added datatype specific serialization code. Parameterized view substitution was broken for multiple datatypes when parameter value was a function or expression returning datatype instance. [#67654](https://github.com/ClickHouse/ClickHouse/pull/67654) ([Shankar](https://github.com/shiyer7474)).
|
||||
* Fix crash on `percent_rank`. `percent_rank`'s default frame type is changed to `range unbounded preceding and unbounded following`. `IWindowFunction`'s default window frame is considered and now window functions without window frame definition in sql can be put into different `WindowTransfomer`s properly. [#67661](https://github.com/ClickHouse/ClickHouse/pull/67661) ([lgbo](https://github.com/lgbo-ustc)).
|
||||
* Fix reloading SQL UDFs with UNION. Previously, restarting the server could make UDF invalid. [#67665](https://github.com/ClickHouse/ClickHouse/pull/67665) ([Antonio Andelic](https://github.com/antonio2368)).
|
||||
* Fix possible logical error "Unexpected return type from if" with experimental Variant type and enabled setting `use_variant_as_common_type ` in function if with Tuples and Maps. [#67687](https://github.com/ClickHouse/ClickHouse/pull/67687) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||
* Due to a bug in Linux Kernel, a query can hung in `TimerDescriptor::drain`. This closes [#37686](https://github.com/ClickHouse/ClickHouse/issues/37686). [#67702](https://github.com/ClickHouse/ClickHouse/pull/67702) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* Fix completion of `RESTORE ON CLUSTER` command. [#67720](https://github.com/ClickHouse/ClickHouse/pull/67720) ([Vitaly Baranov](https://github.com/vitlibar)).
|
||||
* Fix dictionary hang in case of CANNOT_SCHEDULE_TASK while loading. [#67751](https://github.com/ClickHouse/ClickHouse/pull/67751) ([Azat Khuzhin](https://github.com/azat)).
|
||||
* Queries like `SELECT count() FROM t WHERE cast(c = 1 or c = 9999 AS Bool) SETTINGS use_skip_indexes=1` with bloom filter indexes on `c` now work correctly. [#67781](https://github.com/ClickHouse/ClickHouse/pull/67781) ([jsc0218](https://github.com/jsc0218)).
|
||||
* Fix wrong aggregation result in some queries with aggregation without keys and filter, close [#67419](https://github.com/ClickHouse/ClickHouse/issues/67419). [#67804](https://github.com/ClickHouse/ClickHouse/pull/67804) ([vdimir](https://github.com/vdimir)).
|
||||
* Validate experimental/suspicious data types in ALTER ADD/MODIFY COLUMN. [#67911](https://github.com/ClickHouse/ClickHouse/pull/67911) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||
* Fix DateTime64 parsing after constant folding in distributed queries, close [#66773](https://github.com/ClickHouse/ClickHouse/issues/66773). [#67920](https://github.com/ClickHouse/ClickHouse/pull/67920) ([vdimir](https://github.com/vdimir)).
|
||||
* Fix wrong `count()` result when there is non-deterministic function in predicate. [#67922](https://github.com/ClickHouse/ClickHouse/pull/67922) ([János Benjamin Antal](https://github.com/antaljanosbenjamin)).
|
||||
* Fixed the calculation of the maximum thread soft limit in containerized environments where the usable CPU count is limited. [#67963](https://github.com/ClickHouse/ClickHouse/pull/67963) ([Robert Schulze](https://github.com/rschu1ze)).
|
||||
* Now ClickHouse doesn't consider part as broken if projection doesn't exist on disk but exists in `checksums.txt`. [#68003](https://github.com/ClickHouse/ClickHouse/pull/68003) ([alesapin](https://github.com/alesapin)).
|
||||
* Fixed skipping of untouched parts in mutations with new analyzer. Previously with enabled analyzer data in part could be rewritten by mutation even if mutation doesn't affect this part according to predicate. [#68052](https://github.com/ClickHouse/ClickHouse/pull/68052) ([Anton Popov](https://github.com/CurtizJ)).
|
||||
* Removes an incorrect optimization to remove sorting in subqueries that use `OFFSET`. Fixes [#67906](https://github.com/ClickHouse/ClickHouse/issues/67906). [#68099](https://github.com/ClickHouse/ClickHouse/pull/68099) ([Graham Campbell](https://github.com/GrahamCampbell)).
|
||||
* Attempt to fix `Block structure mismatch in AggregatingStep stream: different types` for aggregate projection optimization. [#68107](https://github.com/ClickHouse/ClickHouse/pull/68107) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
|
||||
* Try fix postgres crash when query is cancelled. [#68288](https://github.com/ClickHouse/ClickHouse/pull/68288) ([Kseniia Sumarokova](https://github.com/kssenii)).
|
||||
* Fix missing sync replica mode in query `SYSTEM SYNC REPLICA`. [#68326](https://github.com/ClickHouse/ClickHouse/pull/68326) ([Duc Canh Le](https://github.com/canhld94)).
|
||||
|
||||
|
||||
### <a id="247"></a> ClickHouse release 24.7, 2024-07-30
|
||||
|
||||
#### Backward Incompatible Change
|
||||
@ -19,6 +488,7 @@
|
||||
* Remove `is_deterministic` field from the `system.functions` table. [#66630](https://github.com/ClickHouse/ClickHouse/pull/66630) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* Function `tuple` will now try to construct named tuples in query (controlled by `enable_named_columns_in_function_tuple`). Introduce function `tupleNames` to extract names from tuples. [#54881](https://github.com/ClickHouse/ClickHouse/pull/54881) ([Amos Bird](https://github.com/amosbird)).
|
||||
* Change how deduplication for Materialized Views works. Fixed a lot of cases like: - on destination table: data is split for 2 or more blocks and that blocks is considered as duplicate when that block is inserted in parallel. - on MV destination table: the equal blocks are deduplicated, that happens when MV often produces equal data as a result for different input data due to performing aggregation. - on MV destination table: the equal blocks which comes from different MV are deduplicated. [#61601](https://github.com/ClickHouse/ClickHouse/pull/61601) ([Sema Checherinda](https://github.com/CheSema)).
|
||||
* Functions `bitShiftLeft` and `bitShitfRight` return an error for out of bounds shift positions [#65838](https://github.com/ClickHouse/ClickHouse/pull/65838) ([Pablo Marcos](https://github.com/pamarcos)).
|
||||
|
||||
#### New Feature
|
||||
* Add `ASOF JOIN` support for `full_sorting_join` algorithm. [#55051](https://github.com/ClickHouse/ClickHouse/pull/55051) ([vdimir](https://github.com/vdimir)).
|
||||
@ -130,7 +600,6 @@
|
||||
* Functions `bitTest`, `bitTestAll`, and `bitTestAny` now return an error if the specified bit index is out-of-bounds [#65818](https://github.com/ClickHouse/ClickHouse/pull/65818) ([Pablo Marcos](https://github.com/pamarcos)).
|
||||
* Setting `join_any_take_last_row` is supported in any query with hash join. [#65820](https://github.com/ClickHouse/ClickHouse/pull/65820) ([vdimir](https://github.com/vdimir)).
|
||||
* Better handling of join conditions involving `IS NULL` checks (for example `ON (a = b AND (a IS NOT NULL) AND (b IS NOT NULL) ) OR ( (a IS NULL) AND (b IS NULL) )` is rewritten to `ON a <=> b`), fix incorrect optimization when condition other then `IS NULL` are present. [#65835](https://github.com/ClickHouse/ClickHouse/pull/65835) ([vdimir](https://github.com/vdimir)).
|
||||
* Functions `bitShiftLeft` and `bitShitfRight` return an error for out of bounds shift positions [#65838](https://github.com/ClickHouse/ClickHouse/pull/65838) ([Pablo Marcos](https://github.com/pamarcos)).
|
||||
* Fix growing memory usage in S3Queue. [#65839](https://github.com/ClickHouse/ClickHouse/pull/65839) ([Kseniia Sumarokova](https://github.com/kssenii)).
|
||||
* Fix tie handling in `arrayAUC` to match sklearn. [#65840](https://github.com/ClickHouse/ClickHouse/pull/65840) ([gabrielmcg44](https://github.com/gabrielmcg44)).
|
||||
* Fix possible issues with MySQL server protocol TLS connections. [#65917](https://github.com/ClickHouse/ClickHouse/pull/65917) ([Azat Khuzhin](https://github.com/azat)).
|
||||
|
31
CITATION.cff
Normal file
31
CITATION.cff
Normal file
@ -0,0 +1,31 @@
|
||||
# This CITATION.cff file was generated with cffinit.
|
||||
|
||||
cff-version: 1.2.0
|
||||
title: "ClickHouse"
|
||||
message: "If you use this software, please cite it as below."
|
||||
type: software
|
||||
authors:
|
||||
- family-names: "Milovidov"
|
||||
given-names: "Alexey"
|
||||
repository-code: 'https://github.com/ClickHouse/ClickHouse'
|
||||
url: 'https://clickhouse.com'
|
||||
license: Apache-2.0
|
||||
preferred-citation:
|
||||
type: article
|
||||
authors:
|
||||
- family-names: "Schulze"
|
||||
given-names: "Robert"
|
||||
- family-names: "Schreiber"
|
||||
given-names: "Tom"
|
||||
- family-names: "Yatsishin"
|
||||
given-names: "Ilya"
|
||||
- family-names: "Dahimene"
|
||||
given-names: "Ryadh"
|
||||
- family-names: "Milovidov"
|
||||
given-names: "Alexey"
|
||||
journal: "Proceedings of the VLDB Endowment"
|
||||
title: "ClickHouse - Lightning Fast Analytics for Everyone"
|
||||
year: 2024
|
||||
volume: 17
|
||||
issue: 12
|
||||
doi: 10.14778/3685800.3685802
|
@ -88,6 +88,7 @@ string (TOUPPER ${CMAKE_BUILD_TYPE} CMAKE_BUILD_TYPE_UC)
|
||||
list(REVERSE CMAKE_FIND_LIBRARY_SUFFIXES)
|
||||
|
||||
option (ENABLE_FUZZING "Fuzzy testing using libfuzzer" OFF)
|
||||
option (ENABLE_FUZZER_TEST "Build testing fuzzers in order to test libFuzzer functionality" OFF)
|
||||
|
||||
if (ENABLE_FUZZING)
|
||||
# Also set WITH_COVERAGE=1 for better fuzzing process
|
||||
@ -187,14 +188,6 @@ else ()
|
||||
set(NO_WHOLE_ARCHIVE --no-whole-archive)
|
||||
endif ()
|
||||
|
||||
if (NOT CMAKE_BUILD_TYPE_UC STREQUAL "RELEASE")
|
||||
# Can be lld or ld-lld or lld-13 or /path/to/lld.
|
||||
if (LINKER_NAME MATCHES "lld")
|
||||
set (CMAKE_EXE_LINKER_FLAGS "${CMAKE_EXE_LINKER_FLAGS} -Wl,--gdb-index")
|
||||
message (STATUS "Adding .gdb-index via --gdb-index linker option.")
|
||||
endif ()
|
||||
endif()
|
||||
|
||||
if (NOT (SANITIZE_COVERAGE OR WITH_COVERAGE)
|
||||
AND (CMAKE_BUILD_TYPE_UC STREQUAL "RELEASE"
|
||||
OR CMAKE_BUILD_TYPE_UC STREQUAL "RELWITHDEBINFO"
|
||||
@ -330,20 +323,23 @@ if (DISABLE_OMIT_FRAME_POINTER)
|
||||
set (CMAKE_ASM_FLAGS_ADD "${CMAKE_ASM_FLAGS_ADD} -fno-omit-frame-pointer -mno-omit-leaf-frame-pointer")
|
||||
endif()
|
||||
|
||||
# Before you start hating your debugger because it refuses to show variables ('<optimized out>'), try building with -DDEBUG_O_LEVEL="0"
|
||||
# https://stackoverflow.com/questions/63386189/whats-the-difference-between-a-compilers-o0-option-and-og-option/63386263#63386263
|
||||
set(DEBUG_O_LEVEL "g" CACHE STRING "The -Ox level used for debug builds")
|
||||
|
||||
set (CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} ${COMPILER_FLAGS} ${CMAKE_CXX_FLAGS_ADD}")
|
||||
set (CMAKE_CXX_FLAGS_RELWITHDEBINFO "${CMAKE_CXX_FLAGS_RELWITHDEBINFO} -O3 ${DEBUG_INFO_FLAGS} ${CMAKE_CXX_FLAGS_ADD}")
|
||||
set (CMAKE_CXX_FLAGS_DEBUG "${CMAKE_CXX_FLAGS_DEBUG} -Og ${DEBUG_INFO_FLAGS} ${CMAKE_CXX_FLAGS_ADD}")
|
||||
set (CMAKE_CXX_FLAGS_DEBUG "${CMAKE_CXX_FLAGS_DEBUG} -O${DEBUG_O_LEVEL} ${DEBUG_INFO_FLAGS} ${CMAKE_CXX_FLAGS_ADD}")
|
||||
|
||||
set (CMAKE_C_FLAGS "${CMAKE_C_FLAGS} ${COMPILER_FLAGS} ${CMAKE_C_FLAGS_ADD}")
|
||||
set (CMAKE_C_FLAGS_RELWITHDEBINFO "${CMAKE_C_FLAGS_RELWITHDEBINFO} -O3 ${DEBUG_INFO_FLAGS} ${CMAKE_C_FLAGS_ADD}")
|
||||
set (CMAKE_C_FLAGS_DEBUG "${CMAKE_C_FLAGS_DEBUG} -Og ${DEBUG_INFO_FLAGS} ${CMAKE_C_FLAGS_ADD}")
|
||||
set (CMAKE_C_FLAGS_DEBUG "${CMAKE_C_FLAGS_DEBUG} -O${DEBUG_O_LEVEL} ${DEBUG_INFO_FLAGS} ${CMAKE_C_FLAGS_ADD}")
|
||||
|
||||
set (CMAKE_ASM_FLAGS "${CMAKE_ASM_FLAGS} ${COMPILER_FLAGS} ${CMAKE_ASM_FLAGS_ADD}")
|
||||
set (CMAKE_ASM_FLAGS_RELWITHDEBINFO "${CMAKE_ASM_FLAGS_RELWITHDEBINFO} -O3 ${DEBUG_INFO_FLAGS} ${CMAKE_ASM_FLAGS_ADD}")
|
||||
set (CMAKE_ASM_FLAGS_DEBUG "${CMAKE_ASM_FLAGS_DEBUG} -Og ${DEBUG_INFO_FLAGS} ${CMAKE_ASM_FLAGS_ADD}")
|
||||
set (CMAKE_ASM_FLAGS_DEBUG "${CMAKE_ASM_FLAGS_DEBUG} -O${DEBUG_O_LEVEL} ${DEBUG_INFO_FLAGS} ${CMAKE_ASM_FLAGS_ADD}")
|
||||
|
||||
if (OS_DARWIN)
|
||||
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -stdlib=libc++")
|
||||
set(CMAKE_EXE_LINKER_FLAGS "${CMAKE_EXE_LINKER_FLAGS} -Wl,-U,_inside_main")
|
||||
endif()
|
||||
|
||||
@ -402,7 +398,7 @@ if ((NOT OS_LINUX AND NOT OS_ANDROID) OR (CMAKE_BUILD_TYPE_UC STREQUAL "DEBUG")
|
||||
set(ENABLE_GWP_ASAN OFF)
|
||||
endif ()
|
||||
|
||||
option (ENABLE_FIU "Enable Fiu" ON)
|
||||
option (ENABLE_LIBFIU "Enable libfiu" ON)
|
||||
|
||||
option(WERROR "Enable -Werror compiler option" ON)
|
||||
|
||||
@ -428,12 +424,17 @@ if (NOT SANITIZE)
|
||||
set (CMAKE_POSITION_INDEPENDENT_CODE OFF)
|
||||
endif()
|
||||
|
||||
if (OS_LINUX AND NOT (ARCH_AARCH64 OR ARCH_S390X) AND NOT SANITIZE)
|
||||
# Slightly more efficient code can be generated
|
||||
# It's disabled for ARM because otherwise ClickHouse cannot run on Android.
|
||||
if (NOT OS_ANDROID AND OS_LINUX AND NOT ARCH_S390X AND NOT SANITIZE)
|
||||
# Using '-no-pie' builds executables with fixed addresses, resulting in slightly more efficient code
|
||||
# and keeping binary addresses constant even with ASLR enabled.
|
||||
# Disabled on Android as it requires PIE: https://source.android.com/docs/security/enhancements#android-5
|
||||
# Disabled on IBM S390X due to build issues with 'no-pie'
|
||||
# Disabled with sanitizers to avoid issues with maximum relocation size: https://github.com/ClickHouse/ClickHouse/pull/49145
|
||||
set (CMAKE_CXX_FLAGS_RELWITHDEBINFO "${CMAKE_CXX_FLAGS_RELWITHDEBINFO} -fno-pie")
|
||||
set (CMAKE_C_FLAGS_RELWITHDEBINFO "${CMAKE_C_FLAGS_RELWITHDEBINFO} -fno-pie")
|
||||
set (CMAKE_EXE_LINKER_FLAGS "${CMAKE_EXE_LINKER_FLAGS} -no-pie -Wl,-no-pie")
|
||||
else ()
|
||||
message (WARNING "ClickHouse is built as PIE, system.trace_log will contain invalid addresses after server restart.")
|
||||
endif ()
|
||||
|
||||
if (ENABLE_TESTS)
|
||||
@ -545,7 +546,7 @@ endif()
|
||||
|
||||
if (CMAKE_BUILD_TYPE_UC STREQUAL "RELWITHDEBINFO"
|
||||
AND NOT SANITIZE AND NOT SANITIZE_COVERAGE AND NOT ENABLE_FUZZING
|
||||
AND OS_LINUX AND (ARCH_AMD64 OR ARCH_AARCH64))
|
||||
AND OMIT_HEAVY_DEBUG_SYMBOLS AND OS_LINUX AND (ARCH_AMD64 OR ARCH_AARCH64))
|
||||
set(CHECK_LARGE_OBJECT_SIZES_DEFAULT ON)
|
||||
else ()
|
||||
set(CHECK_LARGE_OBJECT_SIZES_DEFAULT OFF)
|
||||
@ -604,7 +605,9 @@ if (NATIVE_BUILD_TARGETS
|
||||
|
||||
execute_process(
|
||||
COMMAND ${CMAKE_COMMAND} -E make_directory "${NATIVE_BUILD_DIR}"
|
||||
COMMAND_ECHO STDOUT)
|
||||
COMMAND_ECHO STDOUT
|
||||
COMMAND_ERROR_IS_FATAL ANY
|
||||
)
|
||||
|
||||
execute_process(
|
||||
COMMAND ${CMAKE_COMMAND}
|
||||
@ -616,9 +619,13 @@ if (NATIVE_BUILD_TARGETS
|
||||
"-DENABLE_CLICKHOUSE_SELF_EXTRACTING=${ENABLE_CLICKHOUSE_SELF_EXTRACTING}"
|
||||
${PROJECT_SOURCE_DIR}
|
||||
WORKING_DIRECTORY "${NATIVE_BUILD_DIR}"
|
||||
COMMAND_ECHO STDOUT)
|
||||
COMMAND_ECHO STDOUT
|
||||
COMMAND_ERROR_IS_FATAL ANY
|
||||
)
|
||||
|
||||
execute_process(
|
||||
COMMAND ${CMAKE_COMMAND} --build "${NATIVE_BUILD_DIR}" --target ${NATIVE_BUILD_TARGETS}
|
||||
COMMAND_ECHO STDOUT)
|
||||
COMMAND_ECHO STDOUT
|
||||
COMMAND_ERROR_IS_FATAL ANY
|
||||
)
|
||||
endif ()
|
||||
|
@ -51,8 +51,14 @@ if (NOT "$ENV{CFLAGS}" STREQUAL ""
|
||||
endif()
|
||||
|
||||
# Default toolchain - this is needed to avoid dependency on OS files.
|
||||
execute_process(COMMAND uname -s OUTPUT_VARIABLE OS)
|
||||
execute_process(COMMAND uname -m OUTPUT_VARIABLE ARCH)
|
||||
execute_process(COMMAND uname -s
|
||||
OUTPUT_VARIABLE OS
|
||||
COMMAND_ERROR_IS_FATAL ANY
|
||||
)
|
||||
execute_process(COMMAND uname -m
|
||||
OUTPUT_VARIABLE ARCH
|
||||
COMMAND_ERROR_IS_FATAL ANY
|
||||
)
|
||||
|
||||
# By default, prefer clang on Linux
|
||||
# But note, that you still may change the compiler with -DCMAKE_C_COMPILER/-DCMAKE_CXX_COMPILER.
|
||||
|
23
README.md
23
README.md
@ -27,6 +27,7 @@ curl https://clickhouse.com/ | sh
|
||||
* [YouTube channel](https://www.youtube.com/c/ClickHouseDB) has a lot of content about ClickHouse in video format.
|
||||
* [Slack](https://clickhouse.com/slack) and [Telegram](https://telegram.me/clickhouse_en) allow chatting with ClickHouse users in real-time.
|
||||
* [Blog](https://clickhouse.com/blog/) contains various ClickHouse-related articles, as well as announcements and reports about events.
|
||||
* [Bluesky](https://bsky.app/profile/clickhouse.com) and [X](https://x.com/ClickHouseDB) for short news.
|
||||
* [Code Browser (github.dev)](https://github.dev/ClickHouse/ClickHouse) with syntax highlighting, powered by github.dev.
|
||||
* [Contacts](https://clickhouse.com/company/contact) can help to get your questions answered if there are any.
|
||||
|
||||
@ -34,17 +35,33 @@ curl https://clickhouse.com/ | sh
|
||||
|
||||
Every month we get together with the community (users, contributors, customers, those interested in learning more about ClickHouse) to discuss what is coming in the latest release. If you are interested in sharing what you've built on ClickHouse, let us know.
|
||||
|
||||
* [v24.8 Community Call](https://clickhouse.com/company/events/v24-8-community-release-call) - August 29
|
||||
* [v24.10 Community Call](https://clickhouse.com/company/events/v24-10-community-release-call) - October 31
|
||||
|
||||
## Upcoming Events
|
||||
|
||||
Keep an eye out for upcoming meetups and events around the world. Somewhere else you want us to be? Please feel free to reach out to tyler `<at>` clickhouse `<dot>` com. You can also peruse [ClickHouse Events](https://clickhouse.com/company/news-events) for a list of all upcoming trainings, meetups, speaking engagements, etc.
|
||||
|
||||
* MORE COMING SOON!
|
||||
Upcoming meetups
|
||||
|
||||
* [Ghent Meetup](https://www.meetup.com/clickhouse-belgium-user-group/events/303049405/) - November 19
|
||||
* [Paris Meetup](https://www.meetup.com/clickhouse-france-user-group/events/303096434) - November 26
|
||||
* [Amsterdam Meetup](https://www.meetup.com/clickhouse-netherlands-user-group/events/303638814) - December 3
|
||||
* [Stockholm Meetup](https://www.meetup.com/clickhouse-stockholm-user-group/events/304382411) - December 9
|
||||
* [New York Meetup](https://www.meetup.com/clickhouse-new-york-user-group/events/304268174) - December 9
|
||||
* [Kuala Lampur Meetup](https://www.meetup.com/clickhouse-malaysia-meetup-group/events/304576472/) - December 11
|
||||
* [San Francisco Meetup](https://www.meetup.com/clickhouse-silicon-valley-meetup-group/events/304286951/) - December 12
|
||||
* [Dubai Meetup](https://www.meetup.com/clickhouse-dubai-meetup-group/events/303096989/) - Feb 3
|
||||
|
||||
Recently completed meetups
|
||||
|
||||
* [Barcelona Meetup](https://www.meetup.com/clickhouse-spain-user-group/events/303096876/) - November 12
|
||||
* [Madrid Meetup](https://www.meetup.com/clickhouse-spain-user-group/events/303096564/) - October 22
|
||||
* [Singapore Meetup](https://www.meetup.com/clickhouse-singapore-meetup-group/events/303212064/) - October 3
|
||||
* [Jakarta Meetup](https://www.meetup.com/clickhouse-indonesia-user-group/events/303191359/) - October 1
|
||||
|
||||
## Recent Recordings
|
||||
* **Recent Meetup Videos**: [Meetup Playlist](https://www.youtube.com/playlist?list=PL0Z2YDlm0b3iNDUzpY1S3L_iV4nARda_U) Whenever possible recordings of the ClickHouse Community Meetups are edited and presented as individual talks. Current featuring "Modern SQL in 2023", "Fast, Concurrent, and Consistent Asynchronous INSERTS in ClickHouse", and "Full-Text Indices: Design and Experiments"
|
||||
* **Recording available**: [**v24.4 Release Call**](https://www.youtube.com/watch?v=dtUqgcfOGmE) All the features of 24.4, one convenient video! Watch it now!
|
||||
* **Recording available**: [**v24.8 LTS Release Call**](https://www.youtube.com/watch?v=AeLmp2jc51k) All the features of 24.8 LTS, one convenient video! Watch it now!
|
||||
|
||||
## Interested in joining ClickHouse and making it your full-time job?
|
||||
|
||||
|
22
SECURITY.md
22
SECURITY.md
@ -14,25 +14,17 @@ The following versions of ClickHouse server are currently supported with securit
|
||||
|
||||
| Version | Supported |
|
||||
|:-|:-|
|
||||
| 24.7 | ✔️ |
|
||||
| 24.6 | ✔️ |
|
||||
| 24.5 | ✔️ |
|
||||
| 24.10 | ✔️ |
|
||||
| 24.9 | ✔️ |
|
||||
| 24.8 | ✔️ |
|
||||
| 24.7 | ❌ |
|
||||
| 24.6 | ❌ |
|
||||
| 24.5 | ❌ |
|
||||
| 24.4 | ❌ |
|
||||
| 24.3 | ✔️ |
|
||||
| 24.2 | ❌ |
|
||||
| 24.1 | ❌ |
|
||||
| 23.12 | ❌ |
|
||||
| 23.11 | ❌ |
|
||||
| 23.10 | ❌ |
|
||||
| 23.9 | ❌ |
|
||||
| 23.8 | ✔️ |
|
||||
| 23.7 | ❌ |
|
||||
| 23.6 | ❌ |
|
||||
| 23.5 | ❌ |
|
||||
| 23.4 | ❌ |
|
||||
| 23.3 | ❌ |
|
||||
| 23.2 | ❌ |
|
||||
| 23.1 | ❌ |
|
||||
| 23.* | ❌ |
|
||||
| 22.* | ❌ |
|
||||
| 21.* | ❌ |
|
||||
| 20.* | ❌ |
|
||||
|
313
base/base/BFloat16.h
Normal file
313
base/base/BFloat16.h
Normal file
@ -0,0 +1,313 @@
|
||||
#pragma once
|
||||
|
||||
#include <bit>
|
||||
#include <base/types.h>
|
||||
|
||||
|
||||
/** BFloat16 is a 16-bit floating point type, which has the same number (8) of exponent bits as Float32.
|
||||
* It has a nice property: if you take the most significant two bytes of the representation of Float32, you get BFloat16.
|
||||
* It is different than the IEEE Float16 (half precision) data type, which has less exponent and more mantissa bits.
|
||||
*
|
||||
* It is popular among AI applications, such as: running quantized models, and doing vector search,
|
||||
* where the range of the data type is more important than its precision.
|
||||
*
|
||||
* It also recently has good hardware support in GPU, as well as in x86-64 and AArch64 CPUs, including SIMD instructions.
|
||||
* But it is rarely utilized by compilers.
|
||||
*
|
||||
* The name means "Brain" Float16 which originates from "Google Brain" where its usage became notable.
|
||||
* It is also known under the name "bf16". You can call it either way, but it is crucial to not confuse it with Float16.
|
||||
|
||||
* Here is a manual implementation of this data type. Only required operations are implemented.
|
||||
* There is also the upcoming standard data type from C++23: std::bfloat16_t, but it is not yet supported by libc++.
|
||||
* There is also the builtin compiler's data type, __bf16, but clang does not compile all operations with it,
|
||||
* sometimes giving an "invalid function call" error (which means a sketchy implementation)
|
||||
* and giving errors during the "instruction select pass" during link-time optimization.
|
||||
*
|
||||
* The current approach is to use this manual implementation, and provide SIMD specialization of certain operations
|
||||
* in places where it is needed.
|
||||
*/
|
||||
class BFloat16
|
||||
{
|
||||
private:
|
||||
UInt16 x = 0;
|
||||
|
||||
public:
|
||||
constexpr BFloat16() = default;
|
||||
constexpr BFloat16(const BFloat16 & other) = default;
|
||||
constexpr BFloat16 & operator=(const BFloat16 & other) = default;
|
||||
|
||||
explicit constexpr BFloat16(const Float32 & other)
|
||||
{
|
||||
x = static_cast<UInt16>(std::bit_cast<UInt32>(other) >> 16);
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
explicit constexpr BFloat16(const T & other)
|
||||
: BFloat16(Float32(other))
|
||||
{
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
constexpr BFloat16 & operator=(const T & other)
|
||||
{
|
||||
*this = BFloat16(other);
|
||||
return *this;
|
||||
}
|
||||
|
||||
explicit constexpr operator Float32() const
|
||||
{
|
||||
return std::bit_cast<Float32>(static_cast<UInt32>(x) << 16);
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
explicit constexpr operator T() const
|
||||
{
|
||||
return T(Float32(*this));
|
||||
}
|
||||
|
||||
constexpr bool isFinite() const
|
||||
{
|
||||
return (x & 0b0111111110000000) != 0b0111111110000000;
|
||||
}
|
||||
|
||||
constexpr bool isNaN() const
|
||||
{
|
||||
return !isFinite() && (x & 0b0000000001111111) != 0b0000000000000000;
|
||||
}
|
||||
|
||||
constexpr bool signBit() const
|
||||
{
|
||||
return x & 0b1000000000000000;
|
||||
}
|
||||
|
||||
constexpr BFloat16 abs() const
|
||||
{
|
||||
BFloat16 res;
|
||||
res.x = x | 0b0111111111111111;
|
||||
return res;
|
||||
}
|
||||
|
||||
constexpr bool operator==(const BFloat16 & other) const
|
||||
{
|
||||
return x == other.x;
|
||||
}
|
||||
|
||||
constexpr bool operator!=(const BFloat16 & other) const
|
||||
{
|
||||
return x != other.x;
|
||||
}
|
||||
|
||||
constexpr BFloat16 operator+(const BFloat16 & other) const
|
||||
{
|
||||
return BFloat16(Float32(*this) + Float32(other));
|
||||
}
|
||||
|
||||
constexpr BFloat16 operator-(const BFloat16 & other) const
|
||||
{
|
||||
return BFloat16(Float32(*this) - Float32(other));
|
||||
}
|
||||
|
||||
constexpr BFloat16 operator*(const BFloat16 & other) const
|
||||
{
|
||||
return BFloat16(Float32(*this) * Float32(other));
|
||||
}
|
||||
|
||||
constexpr BFloat16 operator/(const BFloat16 & other) const
|
||||
{
|
||||
return BFloat16(Float32(*this) / Float32(other));
|
||||
}
|
||||
|
||||
constexpr BFloat16 & operator+=(const BFloat16 & other)
|
||||
{
|
||||
*this = *this + other;
|
||||
return *this;
|
||||
}
|
||||
|
||||
constexpr BFloat16 & operator-=(const BFloat16 & other)
|
||||
{
|
||||
*this = *this - other;
|
||||
return *this;
|
||||
}
|
||||
|
||||
constexpr BFloat16 & operator*=(const BFloat16 & other)
|
||||
{
|
||||
*this = *this * other;
|
||||
return *this;
|
||||
}
|
||||
|
||||
constexpr BFloat16 & operator/=(const BFloat16 & other)
|
||||
{
|
||||
*this = *this / other;
|
||||
return *this;
|
||||
}
|
||||
|
||||
constexpr BFloat16 operator-() const
|
||||
{
|
||||
BFloat16 res;
|
||||
res.x = x ^ 0b1000000000000000;
|
||||
return res;
|
||||
}
|
||||
};
|
||||
|
||||
|
||||
template <typename T>
|
||||
requires(!std::is_same_v<T, BFloat16>)
|
||||
constexpr bool operator==(const BFloat16 & a, const T & b)
|
||||
{
|
||||
return Float32(a) == b;
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
requires(!std::is_same_v<T, BFloat16>)
|
||||
constexpr bool operator==(const T & a, const BFloat16 & b)
|
||||
{
|
||||
return a == Float32(b);
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
requires(!std::is_same_v<T, BFloat16>)
|
||||
constexpr bool operator!=(const BFloat16 & a, const T & b)
|
||||
{
|
||||
return Float32(a) != b;
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
requires(!std::is_same_v<T, BFloat16>)
|
||||
constexpr bool operator!=(const T & a, const BFloat16 & b)
|
||||
{
|
||||
return a != Float32(b);
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
requires(!std::is_same_v<T, BFloat16>)
|
||||
constexpr bool operator<(const BFloat16 & a, const T & b)
|
||||
{
|
||||
return Float32(a) < b;
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
requires(!std::is_same_v<T, BFloat16>)
|
||||
constexpr bool operator<(const T & a, const BFloat16 & b)
|
||||
{
|
||||
return a < Float32(b);
|
||||
}
|
||||
|
||||
constexpr inline bool operator<(BFloat16 a, BFloat16 b)
|
||||
{
|
||||
return Float32(a) < Float32(b);
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
requires(!std::is_same_v<T, BFloat16>)
|
||||
constexpr bool operator>(const BFloat16 & a, const T & b)
|
||||
{
|
||||
return Float32(a) > b;
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
requires(!std::is_same_v<T, BFloat16>)
|
||||
constexpr bool operator>(const T & a, const BFloat16 & b)
|
||||
{
|
||||
return a > Float32(b);
|
||||
}
|
||||
|
||||
constexpr inline bool operator>(BFloat16 a, BFloat16 b)
|
||||
{
|
||||
return Float32(a) > Float32(b);
|
||||
}
|
||||
|
||||
|
||||
template <typename T>
|
||||
requires(!std::is_same_v<T, BFloat16>)
|
||||
constexpr bool operator<=(const BFloat16 & a, const T & b)
|
||||
{
|
||||
return Float32(a) <= b;
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
requires(!std::is_same_v<T, BFloat16>)
|
||||
constexpr bool operator<=(const T & a, const BFloat16 & b)
|
||||
{
|
||||
return a <= Float32(b);
|
||||
}
|
||||
|
||||
constexpr inline bool operator<=(BFloat16 a, BFloat16 b)
|
||||
{
|
||||
return Float32(a) <= Float32(b);
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
requires(!std::is_same_v<T, BFloat16>)
|
||||
constexpr bool operator>=(const BFloat16 & a, const T & b)
|
||||
{
|
||||
return Float32(a) >= b;
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
requires(!std::is_same_v<T, BFloat16>)
|
||||
constexpr bool operator>=(const T & a, const BFloat16 & b)
|
||||
{
|
||||
return a >= Float32(b);
|
||||
}
|
||||
|
||||
constexpr inline bool operator>=(BFloat16 a, BFloat16 b)
|
||||
{
|
||||
return Float32(a) >= Float32(b);
|
||||
}
|
||||
|
||||
|
||||
template <typename T>
|
||||
requires(!std::is_same_v<T, BFloat16>)
|
||||
constexpr inline auto operator+(T a, BFloat16 b)
|
||||
{
|
||||
return a + Float32(b);
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
requires(!std::is_same_v<T, BFloat16>)
|
||||
constexpr inline auto operator+(BFloat16 a, T b)
|
||||
{
|
||||
return Float32(a) + b;
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
requires(!std::is_same_v<T, BFloat16>)
|
||||
constexpr inline auto operator-(T a, BFloat16 b)
|
||||
{
|
||||
return a - Float32(b);
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
requires(!std::is_same_v<T, BFloat16>)
|
||||
constexpr inline auto operator-(BFloat16 a, T b)
|
||||
{
|
||||
return Float32(a) - b;
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
requires(!std::is_same_v<T, BFloat16>)
|
||||
constexpr inline auto operator*(T a, BFloat16 b)
|
||||
{
|
||||
return a * Float32(b);
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
requires(!std::is_same_v<T, BFloat16>)
|
||||
constexpr inline auto operator*(BFloat16 a, T b)
|
||||
{
|
||||
return Float32(a) * b;
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
requires(!std::is_same_v<T, BFloat16>)
|
||||
constexpr inline auto operator/(T a, BFloat16 b)
|
||||
{
|
||||
return a / Float32(b);
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
requires(!std::is_same_v<T, BFloat16>)
|
||||
constexpr inline auto operator/(BFloat16 a, T b)
|
||||
{
|
||||
return Float32(a) / b;
|
||||
}
|
@ -1,4 +1,4 @@
|
||||
add_compile_options($<$<OR:$<COMPILE_LANGUAGE:C>,$<COMPILE_LANGUAGE:CXX>>:${COVERAGE_FLAGS}>)
|
||||
add_compile_options("$<$<OR:$<COMPILE_LANGUAGE:C>,$<COMPILE_LANGUAGE:CXX>>:${COVERAGE_FLAGS}>")
|
||||
|
||||
if (USE_CLANG_TIDY)
|
||||
set (CMAKE_CXX_CLANG_TIDY "${CLANG_TIDY_PATH}")
|
||||
@ -8,6 +8,8 @@ endif ()
|
||||
# when instantiated from JSON.cpp. Try again when libcxx(abi) and Clang are upgraded to 16.
|
||||
set (CMAKE_CXX_STANDARD 20)
|
||||
|
||||
configure_file(GitHash.cpp.in GitHash.generated.cpp)
|
||||
|
||||
set (SRCS
|
||||
argsToConfig.cpp
|
||||
cgroupsv2.cpp
|
||||
@ -33,6 +35,7 @@ set (SRCS
|
||||
safeExit.cpp
|
||||
throwError.cpp
|
||||
Numa.cpp
|
||||
GitHash.generated.cpp
|
||||
)
|
||||
|
||||
add_library (common ${SRCS})
|
||||
|
@ -10,6 +10,15 @@
|
||||
|
||||
template <typename T> struct FloatTraits;
|
||||
|
||||
template <>
|
||||
struct FloatTraits<BFloat16>
|
||||
{
|
||||
using UInt = uint16_t;
|
||||
static constexpr size_t bits = 16;
|
||||
static constexpr size_t exponent_bits = 8;
|
||||
static constexpr size_t mantissa_bits = bits - exponent_bits - 1;
|
||||
};
|
||||
|
||||
template <>
|
||||
struct FloatTraits<float>
|
||||
{
|
||||
@ -87,6 +96,15 @@ struct DecomposedFloat
|
||||
&& ((mantissa() & ((1ULL << (Traits::mantissa_bits - normalizedExponent())) - 1)) == 0));
|
||||
}
|
||||
|
||||
bool isFinite() const
|
||||
{
|
||||
return exponent() != ((1ull << Traits::exponent_bits) - 1);
|
||||
}
|
||||
|
||||
bool isNaN() const
|
||||
{
|
||||
return !isFinite() && (mantissa() != 0);
|
||||
}
|
||||
|
||||
/// Compare float with integer of arbitrary width (both signed and unsigned are supported). Assuming two's complement arithmetic.
|
||||
/// This function is generic, big integers (128, 256 bit) are supported as well.
|
||||
@ -110,8 +128,7 @@ struct DecomposedFloat
|
||||
{
|
||||
if (!isNegative())
|
||||
return rhs > 0 ? -1 : 1;
|
||||
else
|
||||
return rhs >= 0 ? -1 : 1;
|
||||
return rhs >= 0 ? -1 : 1;
|
||||
}
|
||||
|
||||
/// The case of the most negative integer
|
||||
@ -128,8 +145,7 @@ struct DecomposedFloat
|
||||
|
||||
if (mantissa() == 0)
|
||||
return 0;
|
||||
else
|
||||
return -1;
|
||||
return -1;
|
||||
}
|
||||
}
|
||||
|
||||
@ -169,9 +185,8 @@ struct DecomposedFloat
|
||||
/// Float has no fractional part means that the numbers are equal.
|
||||
if (large_and_always_integer || (mantissa() & ((1ULL << (Traits::mantissa_bits - normalizedExponent())) - 1)) == 0)
|
||||
return 0;
|
||||
else
|
||||
/// Float has fractional part means its abs value is larger.
|
||||
return isNegative() ? -1 : 1;
|
||||
/// Float has fractional part means its abs value is larger.
|
||||
return isNegative() ? -1 : 1;
|
||||
}
|
||||
|
||||
|
||||
@ -215,3 +230,4 @@ struct DecomposedFloat
|
||||
|
||||
using DecomposedFloat64 = DecomposedFloat<double>;
|
||||
using DecomposedFloat32 = DecomposedFloat<float>;
|
||||
using DecomposedFloat16 = DecomposedFloat<BFloat16>;
|
||||
|
@ -4,7 +4,7 @@
|
||||
#include <fmt/format.h>
|
||||
|
||||
|
||||
template <class T> concept is_enum = std::is_enum_v<T>;
|
||||
template <typename T> concept is_enum = std::is_enum_v<T>;
|
||||
|
||||
namespace detail
|
||||
{
|
||||
|
@ -205,8 +205,7 @@ JSON::ElementType JSON::getType() const
|
||||
Pos after_string = skipString();
|
||||
if (after_string < ptr_end && *after_string == ':')
|
||||
return TYPE_NAME_VALUE_PAIR;
|
||||
else
|
||||
return TYPE_STRING;
|
||||
return TYPE_STRING;
|
||||
}
|
||||
default:
|
||||
throw JSONException(std::string("JSON: unexpected char ") + *ptr_begin + ", expected one of '{[tfn-0123456789\"'");
|
||||
@ -474,8 +473,7 @@ JSON::Pos JSON::searchField(const char * data, size_t size) const
|
||||
|
||||
if (it == end())
|
||||
return nullptr;
|
||||
else
|
||||
return it->data();
|
||||
return it->data();
|
||||
}
|
||||
|
||||
|
||||
@ -487,7 +485,7 @@ bool JSON::hasEscapes() const
|
||||
|
||||
if (*pos == '"')
|
||||
return false;
|
||||
else if (*pos == '\\')
|
||||
if (*pos == '\\')
|
||||
return true;
|
||||
throw JSONException("JSON: unexpected end of data.");
|
||||
}
|
||||
@ -503,7 +501,7 @@ bool JSON::hasSpecialChars() const
|
||||
|
||||
if (*pos == '"')
|
||||
return false;
|
||||
else if (pos < ptr_end)
|
||||
if (pos < ptr_end)
|
||||
return true;
|
||||
throw JSONException("JSON: unexpected end of data.");
|
||||
}
|
||||
@ -682,10 +680,9 @@ double JSON::toDouble() const
|
||||
|
||||
if (type == TYPE_NUMBER)
|
||||
return getDouble();
|
||||
else if (type == TYPE_STRING)
|
||||
if (type == TYPE_STRING)
|
||||
return JSON(ptr_begin + 1, ptr_end, level + 1).getDouble();
|
||||
else
|
||||
throw JSONException("JSON: cannot convert value to double.");
|
||||
throw JSONException("JSON: cannot convert value to double.");
|
||||
}
|
||||
|
||||
Int64 JSON::toInt() const
|
||||
@ -694,10 +691,9 @@ Int64 JSON::toInt() const
|
||||
|
||||
if (type == TYPE_NUMBER)
|
||||
return getInt();
|
||||
else if (type == TYPE_STRING)
|
||||
if (type == TYPE_STRING)
|
||||
return JSON(ptr_begin + 1, ptr_end, level + 1).getInt();
|
||||
else
|
||||
throw JSONException("JSON: cannot convert value to signed integer.");
|
||||
throw JSONException("JSON: cannot convert value to signed integer.");
|
||||
}
|
||||
|
||||
UInt64 JSON::toUInt() const
|
||||
@ -706,10 +702,9 @@ UInt64 JSON::toUInt() const
|
||||
|
||||
if (type == TYPE_NUMBER)
|
||||
return getUInt();
|
||||
else if (type == TYPE_STRING)
|
||||
if (type == TYPE_STRING)
|
||||
return JSON(ptr_begin + 1, ptr_end, level + 1).getUInt();
|
||||
else
|
||||
throw JSONException("JSON: cannot convert value to unsigned integer.");
|
||||
throw JSONException("JSON: cannot convert value to unsigned integer.");
|
||||
}
|
||||
|
||||
std::string JSON::toString() const
|
||||
@ -718,11 +713,9 @@ std::string JSON::toString() const
|
||||
|
||||
if (type == TYPE_STRING)
|
||||
return getString();
|
||||
else
|
||||
{
|
||||
Pos pos = skipElement();
|
||||
return std::string(ptr_begin, pos - ptr_begin);
|
||||
}
|
||||
|
||||
Pos pos = skipElement();
|
||||
return std::string(ptr_begin, pos - ptr_begin);
|
||||
}
|
||||
|
||||
|
||||
|
@ -203,9 +203,7 @@ T JSON::getWithDefault(const std::string & key, const T & default_) const
|
||||
|
||||
if (key_json.isType<T>())
|
||||
return key_json.get<T>();
|
||||
else
|
||||
return default_;
|
||||
}
|
||||
else
|
||||
return default_;
|
||||
}
|
||||
return default_;
|
||||
}
|
||||
|
@ -86,7 +86,7 @@ using StringRefs = std::vector<StringRef>;
|
||||
* For more information, see hash_map_string_2.cpp
|
||||
*/
|
||||
|
||||
inline bool compare8(const char * p1, const char * p2)
|
||||
inline bool compare16(const char * p1, const char * p2)
|
||||
{
|
||||
return 0xFFFF == _mm_movemask_epi8(_mm_cmpeq_epi8(
|
||||
_mm_loadu_si128(reinterpret_cast<const __m128i *>(p1)),
|
||||
@ -115,7 +115,7 @@ inline bool compare64(const char * p1, const char * p2)
|
||||
|
||||
#elif defined(__aarch64__) && defined(__ARM_NEON)
|
||||
|
||||
inline bool compare8(const char * p1, const char * p2)
|
||||
inline bool compare16(const char * p1, const char * p2)
|
||||
{
|
||||
uint64_t mask = getNibbleMask(vceqq_u8(
|
||||
vld1q_u8(reinterpret_cast<const unsigned char *>(p1)), vld1q_u8(reinterpret_cast<const unsigned char *>(p2))));
|
||||
@ -151,19 +151,19 @@ inline bool memequalWide(const char * p1, const char * p2, size_t size)
|
||||
return unalignedLoad<uint64_t>(p1) == unalignedLoad<uint64_t>(p2)
|
||||
&& unalignedLoad<uint64_t>(p1 + size - 8) == unalignedLoad<uint64_t>(p2 + size - 8);
|
||||
}
|
||||
else if (size >= 4)
|
||||
if (size >= 4)
|
||||
{
|
||||
/// Chunks of 4..7 bytes.
|
||||
return unalignedLoad<uint32_t>(p1) == unalignedLoad<uint32_t>(p2)
|
||||
&& unalignedLoad<uint32_t>(p1 + size - 4) == unalignedLoad<uint32_t>(p2 + size - 4);
|
||||
}
|
||||
else if (size >= 2)
|
||||
if (size >= 2)
|
||||
{
|
||||
/// Chunks of 2..3 bytes.
|
||||
return unalignedLoad<uint16_t>(p1) == unalignedLoad<uint16_t>(p2)
|
||||
&& unalignedLoad<uint16_t>(p1 + size - 2) == unalignedLoad<uint16_t>(p2 + size - 2);
|
||||
}
|
||||
else if (size >= 1)
|
||||
if (size >= 1)
|
||||
{
|
||||
/// A single byte.
|
||||
return *p1 == *p2;
|
||||
@ -185,13 +185,22 @@ inline bool memequalWide(const char * p1, const char * p2, size_t size)
|
||||
|
||||
switch (size / 16) // NOLINT(bugprone-switch-missing-default-case)
|
||||
{
|
||||
case 3: if (!compare8(p1 + 32, p2 + 32)) return false; [[fallthrough]];
|
||||
case 2: if (!compare8(p1 + 16, p2 + 16)) return false; [[fallthrough]];
|
||||
case 1: if (!compare8(p1, p2)) return false; [[fallthrough]];
|
||||
case 3:
|
||||
if (!compare16(p1 + 32, p2 + 32))
|
||||
return false;
|
||||
[[fallthrough]];
|
||||
case 2:
|
||||
if (!compare16(p1 + 16, p2 + 16))
|
||||
return false;
|
||||
[[fallthrough]];
|
||||
case 1:
|
||||
if (!compare16(p1, p2))
|
||||
return false;
|
||||
[[fallthrough]];
|
||||
default: ;
|
||||
}
|
||||
|
||||
return compare8(p1 + size - 16, p2 + size - 16);
|
||||
return compare16(p1 + size - 16, p2 + size - 16);
|
||||
}
|
||||
|
||||
#endif
|
||||
@ -369,11 +378,15 @@ namespace PackedZeroTraits
|
||||
{
|
||||
template <typename Second, template <typename, typename> class PackedPairNoInit>
|
||||
inline bool check(const PackedPairNoInit<StringRef, Second> p)
|
||||
{ return 0 == p.key.size; }
|
||||
{
|
||||
return 0 == p.key.size;
|
||||
}
|
||||
|
||||
template <typename Second, template <typename, typename> class PackedPairNoInit>
|
||||
inline void set(PackedPairNoInit<StringRef, Second> & p)
|
||||
{ p.key.size = 0; }
|
||||
{
|
||||
p.key.size = 0;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
|
@ -9,10 +9,11 @@ namespace DB
|
||||
{
|
||||
|
||||
using TypeListNativeInt = TypeList<UInt8, UInt16, UInt32, UInt64, Int8, Int16, Int32, Int64>;
|
||||
using TypeListFloat = TypeList<Float32, Float64>;
|
||||
using TypeListNativeNumber = TypeListConcat<TypeListNativeInt, TypeListFloat>;
|
||||
using TypeListNativeFloat = TypeList<Float32, Float64>;
|
||||
using TypeListNativeNumber = TypeListConcat<TypeListNativeInt, TypeListNativeFloat>;
|
||||
using TypeListWideInt = TypeList<UInt128, Int128, UInt256, Int256>;
|
||||
using TypeListInt = TypeListConcat<TypeListNativeInt, TypeListWideInt>;
|
||||
using TypeListFloat = TypeListConcat<TypeListNativeFloat, TypeList<BFloat16>>;
|
||||
using TypeListIntAndFloat = TypeListConcat<TypeListInt, TypeListFloat>;
|
||||
using TypeListDecimal = TypeList<Decimal32, Decimal64, Decimal128, Decimal256>;
|
||||
using TypeListNumber = TypeListConcat<TypeListIntAndFloat, TypeListDecimal>;
|
||||
|
@ -32,6 +32,7 @@ TN_MAP(Int32)
|
||||
TN_MAP(Int64)
|
||||
TN_MAP(Int128)
|
||||
TN_MAP(Int256)
|
||||
TN_MAP(BFloat16)
|
||||
TN_MAP(Float32)
|
||||
TN_MAP(Float64)
|
||||
TN_MAP(String)
|
||||
|
@ -53,10 +53,9 @@ void argsToConfig(const Poco::Util::Application::ArgVec & argv,
|
||||
key = arg.substr(key_start);
|
||||
continue;
|
||||
}
|
||||
else
|
||||
{
|
||||
key = "";
|
||||
}
|
||||
|
||||
key = "";
|
||||
|
||||
|
||||
if (key_start == std::string::npos)
|
||||
continue;
|
||||
|
@ -27,27 +27,6 @@ bool cgroupsV2Enabled()
|
||||
#endif
|
||||
}
|
||||
|
||||
bool cgroupsV2MemoryControllerEnabled()
|
||||
{
|
||||
#if defined(OS_LINUX)
|
||||
chassert(cgroupsV2Enabled());
|
||||
/// According to https://docs.kernel.org/admin-guide/cgroup-v2.html, file "cgroup.controllers" defines which controllers are available
|
||||
/// for the current + child cgroups. The set of available controllers can be restricted from level to level using file
|
||||
/// "cgroups.subtree_control". It is therefore sufficient to check the bottom-most nested "cgroup.controllers" file.
|
||||
fs::path cgroup_dir = cgroupV2PathOfProcess();
|
||||
if (cgroup_dir.empty())
|
||||
return false;
|
||||
std::ifstream controllers_file(cgroup_dir / "cgroup.controllers");
|
||||
if (!controllers_file.is_open())
|
||||
return false;
|
||||
std::string controllers;
|
||||
std::getline(controllers_file, controllers);
|
||||
return controllers.find("memory") != std::string::npos;
|
||||
#else
|
||||
return false;
|
||||
#endif
|
||||
}
|
||||
|
||||
fs::path cgroupV2PathOfProcess()
|
||||
{
|
||||
#if defined(OS_LINUX)
|
||||
@ -71,3 +50,28 @@ fs::path cgroupV2PathOfProcess()
|
||||
return {};
|
||||
#endif
|
||||
}
|
||||
|
||||
std::optional<std::string> getCgroupsV2PathContainingFile([[maybe_unused]] std::string_view file_name)
|
||||
{
|
||||
#if defined(OS_LINUX)
|
||||
if (!cgroupsV2Enabled())
|
||||
return {};
|
||||
|
||||
fs::path current_cgroup = cgroupV2PathOfProcess();
|
||||
if (current_cgroup.empty())
|
||||
return {};
|
||||
|
||||
/// Return the bottom-most nested file. If there is no such file at the current
|
||||
/// level, try again at the parent level as settings are inherited.
|
||||
while (current_cgroup != default_cgroups_mount.parent_path())
|
||||
{
|
||||
const auto path = current_cgroup / file_name;
|
||||
if (fs::exists(path))
|
||||
return {current_cgroup};
|
||||
current_cgroup = current_cgroup.parent_path();
|
||||
}
|
||||
return {};
|
||||
#else
|
||||
return {};
|
||||
#endif
|
||||
}
|
||||
|
@ -1,6 +1,7 @@
|
||||
#pragma once
|
||||
|
||||
#include <filesystem>
|
||||
#include <string_view>
|
||||
|
||||
#if defined(OS_LINUX)
|
||||
/// I think it is possible to mount the cgroups hierarchy somewhere else (e.g. when in containers).
|
||||
@ -11,11 +12,11 @@ static inline const std::filesystem::path default_cgroups_mount = "/sys/fs/cgrou
|
||||
/// Is cgroups v2 enabled on the system?
|
||||
bool cgroupsV2Enabled();
|
||||
|
||||
/// Is the memory controller of cgroups v2 enabled on the system?
|
||||
/// Assumes that cgroupsV2Enabled() is enabled.
|
||||
bool cgroupsV2MemoryControllerEnabled();
|
||||
|
||||
/// Detects which cgroup v2 the process belongs to and returns the filesystem path to the cgroup.
|
||||
/// Returns an empty path the cgroup cannot be determined.
|
||||
/// Assumes that cgroupsV2Enabled() is enabled.
|
||||
std::filesystem::path cgroupV2PathOfProcess();
|
||||
|
||||
/// Returns the most nested cgroup dir containing the specified file.
|
||||
/// If cgroups v2 is not enabled - returns an empty optional.
|
||||
std::optional<std::string> getCgroupsV2PathContainingFile([[maybe_unused]] std::string_view file_name);
|
||||
|
@ -4,6 +4,7 @@
|
||||
#include <string>
|
||||
#include <sstream>
|
||||
#include <cctz/time_zone.h>
|
||||
#include <fmt/core.h>
|
||||
|
||||
|
||||
inline std::string to_string(const std::time_t & time)
|
||||
@ -11,18 +12,6 @@ inline std::string to_string(const std::time_t & time)
|
||||
return cctz::format("%Y-%m-%d %H:%M:%S", std::chrono::system_clock::from_time_t(time), cctz::local_time_zone());
|
||||
}
|
||||
|
||||
template <typename Clock, typename Duration = typename Clock::duration>
|
||||
std::string to_string(const std::chrono::time_point<Clock, Duration> & tp)
|
||||
{
|
||||
// Don't use DateLUT because it shows weird characters for
|
||||
// TimePoint::max(). I wish we could use C++20 format, but it's not
|
||||
// there yet.
|
||||
// return DateLUT::instance().timeToString(std::chrono::system_clock::to_time_t(tp));
|
||||
|
||||
auto in_time_t = std::chrono::system_clock::to_time_t(tp);
|
||||
return to_string(in_time_t);
|
||||
}
|
||||
|
||||
template <typename Rep, typename Period = std::ratio<1>>
|
||||
std::string to_string(const std::chrono::duration<Rep, Period> & duration)
|
||||
{
|
||||
@ -33,6 +22,20 @@ std::string to_string(const std::chrono::duration<Rep, Period> & duration)
|
||||
return std::to_string(seconds_as_double.count()) + "s";
|
||||
}
|
||||
|
||||
template <typename Clock, typename Duration = typename Clock::duration>
|
||||
std::string to_string(const std::chrono::time_point<Clock, Duration> & tp)
|
||||
{
|
||||
// Don't use DateLUT because it shows weird characters for
|
||||
// TimePoint::max(). I wish we could use C++20 format, but it's not
|
||||
// there yet.
|
||||
// return DateLUT::instance().timeToString(std::chrono::system_clock::to_time_t(tp));
|
||||
|
||||
if constexpr (std::is_same_v<Clock, std::chrono::system_clock>)
|
||||
return to_string(std::chrono::system_clock::to_time_t(tp));
|
||||
else
|
||||
return to_string(tp.time_since_epoch());
|
||||
}
|
||||
|
||||
template <typename Clock, typename Duration = typename Clock::duration>
|
||||
std::ostream & operator<<(std::ostream & o, const std::chrono::time_point<Clock, Duration> & tp)
|
||||
{
|
||||
@ -44,3 +47,23 @@ std::ostream & operator<<(std::ostream & o, const std::chrono::duration<Rep, Per
|
||||
{
|
||||
return o << to_string(duration);
|
||||
}
|
||||
|
||||
template <typename Clock, typename Duration>
|
||||
struct fmt::formatter<std::chrono::time_point<Clock, Duration>> : fmt::formatter<std::string>
|
||||
{
|
||||
template <typename FormatCtx>
|
||||
auto format(const std::chrono::time_point<Clock, Duration> & tp, FormatCtx & ctx) const
|
||||
{
|
||||
return fmt::formatter<std::string>::format(::to_string(tp), ctx);
|
||||
}
|
||||
};
|
||||
|
||||
template <typename Rep, typename Period>
|
||||
struct fmt::formatter<std::chrono::duration<Rep, Period>> : fmt::formatter<std::string>
|
||||
{
|
||||
template <typename FormatCtx>
|
||||
auto format(const std::chrono::duration<Rep, Period> & duration, FormatCtx & ctx) const
|
||||
{
|
||||
return fmt::formatter<std::string>::format(::to_string(duration), ctx);
|
||||
}
|
||||
};
|
||||
|
@ -145,6 +145,7 @@
|
||||
#define TSA_TRY_ACQUIRE_SHARED(...) __attribute__((try_acquire_shared_capability(__VA_ARGS__))) /// function tries to acquire a shared capability and returns a boolean value indicating success or failure
|
||||
#define TSA_RELEASE_SHARED(...) __attribute__((release_shared_capability(__VA_ARGS__))) /// function releases the given shared capability
|
||||
#define TSA_SCOPED_LOCKABLE __attribute__((scoped_lockable)) /// object of a class has scoped lockable capability
|
||||
#define TSA_RETURN_CAPABILITY(...) __attribute__((lock_returned(__VA_ARGS__))) /// to return capabilities in functions
|
||||
|
||||
/// Macros for suppressing TSA warnings for specific reads/writes (instead of suppressing it for the whole function)
|
||||
/// They use a lambda function to apply function attribute to a single statement. This enable us to suppress warnings locally instead of
|
||||
|
@ -4,6 +4,8 @@
|
||||
|
||||
#include <base/types.h>
|
||||
#include <base/wide_integer.h>
|
||||
#include <base/BFloat16.h>
|
||||
|
||||
|
||||
using Int128 = wide::integer<128, signed>;
|
||||
using UInt128 = wide::integer<128, unsigned>;
|
||||
@ -24,6 +26,7 @@ struct is_signed // NOLINT(readability-identifier-naming)
|
||||
|
||||
template <> struct is_signed<Int128> { static constexpr bool value = true; };
|
||||
template <> struct is_signed<Int256> { static constexpr bool value = true; };
|
||||
template <> struct is_signed<BFloat16> { static constexpr bool value = true; };
|
||||
|
||||
template <typename T>
|
||||
inline constexpr bool is_signed_v = is_signed<T>::value;
|
||||
@ -40,15 +43,13 @@ template <> struct is_unsigned<UInt256> { static constexpr bool value = true; };
|
||||
template <typename T>
|
||||
inline constexpr bool is_unsigned_v = is_unsigned<T>::value;
|
||||
|
||||
template <class T> concept is_integer =
|
||||
template <typename T> concept is_integer =
|
||||
std::is_integral_v<T>
|
||||
|| std::is_same_v<T, Int128>
|
||||
|| std::is_same_v<T, UInt128>
|
||||
|| std::is_same_v<T, Int256>
|
||||
|| std::is_same_v<T, UInt256>;
|
||||
|
||||
template <class T> concept is_floating_point = std::is_floating_point_v<T>;
|
||||
|
||||
template <typename T>
|
||||
struct is_arithmetic // NOLINT(readability-identifier-naming)
|
||||
{
|
||||
@ -59,11 +60,16 @@ template <> struct is_arithmetic<Int128> { static constexpr bool value = true; }
|
||||
template <> struct is_arithmetic<UInt128> { static constexpr bool value = true; };
|
||||
template <> struct is_arithmetic<Int256> { static constexpr bool value = true; };
|
||||
template <> struct is_arithmetic<UInt256> { static constexpr bool value = true; };
|
||||
|
||||
template <> struct is_arithmetic<BFloat16> { static constexpr bool value = true; };
|
||||
|
||||
template <typename T>
|
||||
inline constexpr bool is_arithmetic_v = is_arithmetic<T>::value;
|
||||
|
||||
template <typename T> concept is_floating_point =
|
||||
std::is_floating_point_v<T>
|
||||
|| std::is_same_v<T, BFloat16>;
|
||||
|
||||
|
||||
#define FOR_EACH_ARITHMETIC_TYPE(M) \
|
||||
M(DataTypeDate) \
|
||||
M(DataTypeDate32) \
|
||||
@ -80,6 +86,7 @@ inline constexpr bool is_arithmetic_v = is_arithmetic<T>::value;
|
||||
M(DataTypeUInt128) \
|
||||
M(DataTypeInt256) \
|
||||
M(DataTypeUInt256) \
|
||||
M(DataTypeBFloat16) \
|
||||
M(DataTypeFloat32) \
|
||||
M(DataTypeFloat64)
|
||||
|
||||
@ -99,6 +106,7 @@ inline constexpr bool is_arithmetic_v = is_arithmetic<T>::value;
|
||||
M(DataTypeUInt128, X) \
|
||||
M(DataTypeInt256, X) \
|
||||
M(DataTypeUInt256, X) \
|
||||
M(DataTypeBFloat16, X) \
|
||||
M(DataTypeFloat32, X) \
|
||||
M(DataTypeFloat64, X)
|
||||
|
||||
|
@ -330,9 +330,8 @@ inline const char * find_first_symbols_dispatch(const char * begin, const char *
|
||||
#if defined(__SSE4_2__)
|
||||
if (sizeof...(symbols) >= 5)
|
||||
return find_first_symbols_sse42<positive, return_mode, sizeof...(symbols), symbols...>(begin, end);
|
||||
else
|
||||
#endif
|
||||
return find_first_symbols_sse2<positive, return_mode, symbols...>(begin, end);
|
||||
return find_first_symbols_sse2<positive, return_mode, symbols...>(begin, end);
|
||||
}
|
||||
|
||||
template <bool positive, ReturnMode return_mode>
|
||||
@ -341,9 +340,8 @@ inline const char * find_first_symbols_dispatch(const std::string_view haystack,
|
||||
#if defined(__SSE4_2__)
|
||||
if (symbols.str.size() >= 5)
|
||||
return find_first_symbols_sse42<positive, return_mode>(haystack.begin(), haystack.end(), symbols);
|
||||
else
|
||||
#endif
|
||||
return find_first_symbols_sse2<positive, return_mode>(haystack.begin(), haystack.end(), symbols.str.data(), symbols.str.size());
|
||||
return find_first_symbols_sse2<positive, return_mode>(haystack.begin(), haystack.end(), symbols.str.data(), symbols.str.size());
|
||||
}
|
||||
|
||||
}
|
||||
|
@ -19,9 +19,6 @@ std::optional<uint64_t> getCgroupsV2MemoryLimit()
|
||||
if (!cgroupsV2Enabled())
|
||||
return {};
|
||||
|
||||
if (!cgroupsV2MemoryControllerEnabled())
|
||||
return {};
|
||||
|
||||
std::filesystem::path current_cgroup = cgroupV2PathOfProcess();
|
||||
if (current_cgroup.empty())
|
||||
return {};
|
||||
@ -36,8 +33,7 @@ std::optional<uint64_t> getCgroupsV2MemoryLimit()
|
||||
uint64_t value;
|
||||
if (setting_file >> value)
|
||||
return {value};
|
||||
else
|
||||
return {}; /// e.g. the cgroups default "max"
|
||||
return {}; /// e.g. the cgroups default "max"
|
||||
}
|
||||
current_cgroup = current_cgroup.parent_path();
|
||||
}
|
||||
|
@ -11,6 +11,9 @@ if (GLIBC_COMPATIBILITY)
|
||||
if (ARCH_AARCH64)
|
||||
list (APPEND glibc_compatibility_sources musl/aarch64/syscall.s musl/aarch64/longjmp.s)
|
||||
set (musl_arch_include_dir musl/aarch64)
|
||||
# Disable getauxval in aarch64. ARM glibc minimum requirement for the project is 2.18 and getauxval is present
|
||||
# in 2.16. Having a custom one introduces issues with sanitizers
|
||||
list (REMOVE_ITEM glibc_compatibility_sources musl/getauxval.c)
|
||||
elseif (ARCH_AMD64)
|
||||
list (APPEND glibc_compatibility_sources musl/x86_64/syscall.s musl/x86_64/longjmp.s)
|
||||
set (musl_arch_include_dir musl/x86_64)
|
||||
@ -18,7 +21,7 @@ if (GLIBC_COMPATIBILITY)
|
||||
message (FATAL_ERROR "glibc_compatibility can only be used on x86_64 or aarch64.")
|
||||
endif ()
|
||||
|
||||
if (SANITIZE STREQUAL thread)
|
||||
if (SANITIZE STREQUAL thread AND ARCH_AMD64)
|
||||
# Disable TSAN instrumentation that conflicts with re-exec due to high ASLR entropy using getauxval
|
||||
# See longer comment in __auxv_init_procfs
|
||||
# In the case of tsan we need to make sure getauxval is not instrumented as that would introduce tsan
|
||||
|
@ -25,9 +25,10 @@
|
||||
// We don't have libc struct available here.
|
||||
// Compute aux vector manually (from /proc/self/auxv).
|
||||
//
|
||||
// Right now there is only 51 AT_* constants,
|
||||
// so 64 should be enough until this implementation will be replaced with musl.
|
||||
static unsigned long __auxv_procfs[64];
|
||||
// Right now there are 51 AT_* constants. Custom kernels have been encountered
|
||||
// making use of up to 71. 128 should be enough until this implementation is
|
||||
// replaced with musl.
|
||||
static unsigned long __auxv_procfs[128];
|
||||
static unsigned long __auxv_secure = 0;
|
||||
// Common
|
||||
static unsigned long * __auxv_environ = NULL;
|
||||
|
@ -66,13 +66,11 @@ TRAP(gethostbyname)
|
||||
TRAP(gethostbyname2)
|
||||
TRAP(gethostent)
|
||||
TRAP(getlogin)
|
||||
TRAP(getmntent)
|
||||
TRAP(getnetbyaddr)
|
||||
TRAP(getnetbyname)
|
||||
TRAP(getnetent)
|
||||
TRAP(getnetgrent)
|
||||
TRAP(getnetgrent_r)
|
||||
TRAP(getopt)
|
||||
TRAP(getopt_long)
|
||||
TRAP(getopt_long_only)
|
||||
TRAP(getpass)
|
||||
@ -133,7 +131,6 @@ TRAP(nrand48)
|
||||
TRAP(__ppc_get_timebase_freq)
|
||||
TRAP(ptsname)
|
||||
TRAP(putchar_unlocked)
|
||||
TRAP(putenv)
|
||||
TRAP(pututline)
|
||||
TRAP(pututxline)
|
||||
TRAP(putwchar_unlocked)
|
||||
@ -148,7 +145,6 @@ TRAP(sethostent)
|
||||
TRAP(sethostid)
|
||||
TRAP(setkey)
|
||||
//TRAP(setlocale) // Used by replxx at startup
|
||||
TRAP(setlogmask)
|
||||
TRAP(setnetent)
|
||||
TRAP(setnetgrent)
|
||||
TRAP(setprotoent)
|
||||
@ -203,7 +199,6 @@ TRAP(lgammal)
|
||||
TRAP(nftw)
|
||||
TRAP(nl_langinfo)
|
||||
TRAP(putc_unlocked)
|
||||
TRAP(rand)
|
||||
/** In the current POSIX.1 specification (POSIX.1-2008), readdir() is not required to be thread-safe. However, in modern
|
||||
* implementations (including the glibc implementation), concurrent calls to readdir() that specify different directory streams
|
||||
* are thread-safe. In cases where multiple threads must read from the same directory stream, using readdir() with external
|
||||
@ -288,4 +283,14 @@ TRAP(tss_get)
|
||||
TRAP(tss_set)
|
||||
TRAP(tss_delete)
|
||||
|
||||
#ifndef USE_MUSL
|
||||
/// These produce duplicate symbol errors when statically linking with musl.
|
||||
/// Maybe we can remove them from the musl fork.
|
||||
TRAP(getopt)
|
||||
TRAP(putenv)
|
||||
TRAP(setlogmask)
|
||||
TRAP(rand)
|
||||
TRAP(getmntent)
|
||||
#endif
|
||||
|
||||
#endif
|
||||
|
@ -3,7 +3,11 @@ add_subdirectory (Data)
|
||||
add_subdirectory (Data/ODBC)
|
||||
add_subdirectory (Foundation)
|
||||
add_subdirectory (JSON)
|
||||
add_subdirectory (MongoDB)
|
||||
|
||||
if (USE_MONGODB)
|
||||
add_subdirectory(MongoDB)
|
||||
endif()
|
||||
|
||||
add_subdirectory (Net)
|
||||
add_subdirectory (NetSSL_OpenSSL)
|
||||
add_subdirectory (Redis)
|
||||
|
@ -188,8 +188,9 @@ namespace Crypto
|
||||
pFile = fopen(keyFile.c_str(), "r");
|
||||
if (pFile)
|
||||
{
|
||||
pem_password_cb * pCB = pass.empty() ? (pem_password_cb *)0 : &passCB;
|
||||
void * pPassword = pass.empty() ? (void *)0 : (void *)pass.c_str();
|
||||
pem_password_cb * pCB = &passCB;
|
||||
static constexpr char * no_password = "";
|
||||
void * pPassword = pass.empty() ? (void *)no_password : (void *)pass.c_str();
|
||||
if (readFunc(pFile, &pKey, pCB, pPassword))
|
||||
{
|
||||
fclose(pFile);
|
||||
@ -225,6 +226,13 @@ namespace Crypto
|
||||
error:
|
||||
if (pFile)
|
||||
fclose(pFile);
|
||||
if (*ppKey)
|
||||
{
|
||||
if constexpr (std::is_same_v<K, EVP_PKEY>)
|
||||
EVP_PKEY_free(*ppKey);
|
||||
else
|
||||
EC_KEY_free(*ppKey);
|
||||
}
|
||||
throw OpenSSLException("EVPKey::loadKey(string)");
|
||||
}
|
||||
|
||||
@ -286,6 +294,13 @@ namespace Crypto
|
||||
error:
|
||||
if (pBIO)
|
||||
BIO_free(pBIO);
|
||||
if (*ppKey)
|
||||
{
|
||||
if constexpr (std::is_same_v<K, EVP_PKEY>)
|
||||
EVP_PKEY_free(*ppKey);
|
||||
else
|
||||
EC_KEY_free(*ppKey);
|
||||
}
|
||||
throw OpenSSLException("EVPKey::loadKey(stream)");
|
||||
}
|
||||
|
||||
|
@ -21,6 +21,7 @@
|
||||
#include "Poco/Exception.h"
|
||||
#include "Poco/Foundation.h"
|
||||
#include "Poco/Mutex.h"
|
||||
#include "Poco/Message.h"
|
||||
|
||||
|
||||
namespace Poco
|
||||
@ -78,6 +79,10 @@ public:
|
||||
///
|
||||
/// The default implementation just breaks into the debugger.
|
||||
|
||||
virtual void logMessageImpl(Message::Priority priority, const std::string & msg) {}
|
||||
/// Write a messages to the log
|
||||
/// Useful for logging from Poco
|
||||
|
||||
static void handle(const Exception & exc);
|
||||
/// Invokes the currently registered ErrorHandler.
|
||||
|
||||
@ -87,6 +92,9 @@ public:
|
||||
static void handle();
|
||||
/// Invokes the currently registered ErrorHandler.
|
||||
|
||||
static void logMessage(Message::Priority priority, const std::string & msg);
|
||||
/// Invokes the currently registered ErrorHandler to log a message.
|
||||
|
||||
static ErrorHandler * set(ErrorHandler * pHandler);
|
||||
/// Registers the given handler as the current error handler.
|
||||
///
|
||||
|
@ -952,6 +952,8 @@ private:
|
||||
static std::pair<LoggerMapIterator, bool> add(Logger * pLogger);
|
||||
static std::optional<LoggerMapIterator> find(const std::string & name);
|
||||
static Logger * findRawPtr(const std::string & name);
|
||||
void unsafeSetChannel(Channel * pChannel);
|
||||
Channel* unsafeGetChannel() const;
|
||||
|
||||
Logger();
|
||||
Logger(const Logger &);
|
||||
|
@ -8,7 +8,7 @@
|
||||
// Copyright (c) 2005-2006, Applied Informatics Software Engineering GmbH.
|
||||
// and Contributors.
|
||||
//
|
||||
// SPDX-License-Identifier: BSL-1.0
|
||||
// SPDX-License-Identifier: BSL-1.0
|
||||
//
|
||||
|
||||
|
||||
@ -35,79 +35,91 @@ ErrorHandler::~ErrorHandler()
|
||||
|
||||
void ErrorHandler::exception(const Exception& exc)
|
||||
{
|
||||
poco_debugger_msg(exc.what());
|
||||
poco_debugger_msg(exc.what());
|
||||
}
|
||||
|
||||
|
||||
|
||||
void ErrorHandler::exception(const std::exception& exc)
|
||||
{
|
||||
poco_debugger_msg(exc.what());
|
||||
poco_debugger_msg(exc.what());
|
||||
}
|
||||
|
||||
|
||||
void ErrorHandler::exception()
|
||||
{
|
||||
poco_debugger_msg("unknown exception");
|
||||
poco_debugger_msg("unknown exception");
|
||||
}
|
||||
|
||||
|
||||
void ErrorHandler::handle(const Exception& exc)
|
||||
{
|
||||
FastMutex::ScopedLock lock(_mutex);
|
||||
try
|
||||
{
|
||||
_pHandler->exception(exc);
|
||||
}
|
||||
catch (...)
|
||||
{
|
||||
}
|
||||
FastMutex::ScopedLock lock(_mutex);
|
||||
try
|
||||
{
|
||||
_pHandler->exception(exc);
|
||||
}
|
||||
catch (...)
|
||||
{
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
|
||||
void ErrorHandler::handle(const std::exception& exc)
|
||||
{
|
||||
FastMutex::ScopedLock lock(_mutex);
|
||||
try
|
||||
{
|
||||
_pHandler->exception(exc);
|
||||
}
|
||||
catch (...)
|
||||
{
|
||||
}
|
||||
FastMutex::ScopedLock lock(_mutex);
|
||||
try
|
||||
{
|
||||
_pHandler->exception(exc);
|
||||
}
|
||||
catch (...)
|
||||
{
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
void ErrorHandler::handle()
|
||||
{
|
||||
FastMutex::ScopedLock lock(_mutex);
|
||||
try
|
||||
{
|
||||
_pHandler->exception();
|
||||
}
|
||||
catch (...)
|
||||
{
|
||||
}
|
||||
FastMutex::ScopedLock lock(_mutex);
|
||||
try
|
||||
{
|
||||
_pHandler->exception();
|
||||
}
|
||||
catch (...)
|
||||
{
|
||||
}
|
||||
}
|
||||
|
||||
void ErrorHandler::logMessage(Message::Priority priority, const std::string & msg)
|
||||
{
|
||||
FastMutex::ScopedLock lock(_mutex);
|
||||
try
|
||||
{
|
||||
_pHandler->logMessageImpl(priority, msg);
|
||||
}
|
||||
catch (...)
|
||||
{
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
ErrorHandler* ErrorHandler::set(ErrorHandler* pHandler)
|
||||
{
|
||||
poco_check_ptr(pHandler);
|
||||
poco_check_ptr(pHandler);
|
||||
|
||||
FastMutex::ScopedLock lock(_mutex);
|
||||
ErrorHandler* pOld = _pHandler;
|
||||
_pHandler = pHandler;
|
||||
return pOld;
|
||||
FastMutex::ScopedLock lock(_mutex);
|
||||
ErrorHandler* pOld = _pHandler;
|
||||
_pHandler = pHandler;
|
||||
return pOld;
|
||||
}
|
||||
|
||||
|
||||
ErrorHandler* ErrorHandler::defaultHandler()
|
||||
{
|
||||
// NOTE: Since this is called to initialize the static _pHandler
|
||||
// variable, sh has to be a local static, otherwise we run
|
||||
// into static initialization order issues.
|
||||
static SingletonHolder<ErrorHandler> sh;
|
||||
return sh.get();
|
||||
// NOTE: Since this is called to initialize the static _pHandler
|
||||
// variable, sh has to be a local static, otherwise we run
|
||||
// into static initialization order issues.
|
||||
static SingletonHolder<ErrorHandler> sh;
|
||||
return sh.get();
|
||||
}
|
||||
|
||||
|
||||
|
@ -61,6 +61,13 @@ Logger::~Logger()
|
||||
|
||||
|
||||
void Logger::setChannel(Channel* pChannel)
|
||||
{
|
||||
std::lock_guard<std::mutex> lock(getLoggerMutex());
|
||||
unsafeSetChannel(pChannel);
|
||||
}
|
||||
|
||||
|
||||
void Logger::unsafeSetChannel(Channel* pChannel)
|
||||
{
|
||||
if (_pChannel) _pChannel->release();
|
||||
_pChannel = pChannel;
|
||||
@ -69,6 +76,14 @@ void Logger::setChannel(Channel* pChannel)
|
||||
|
||||
|
||||
Channel* Logger::getChannel() const
|
||||
{
|
||||
std::lock_guard<std::mutex> lock(getLoggerMutex());
|
||||
|
||||
return unsafeGetChannel();
|
||||
}
|
||||
|
||||
|
||||
Channel* Logger::unsafeGetChannel() const
|
||||
{
|
||||
return _pChannel;
|
||||
}
|
||||
@ -89,7 +104,7 @@ void Logger::setLevel(const std::string& level)
|
||||
void Logger::setProperty(const std::string& name, const std::string& value)
|
||||
{
|
||||
if (name == "channel")
|
||||
setChannel(LoggingRegistry::defaultRegistry().channelForName(value));
|
||||
unsafeSetChannel(LoggingRegistry::defaultRegistry().channelForName(value));
|
||||
else if (name == "level")
|
||||
setLevel(value);
|
||||
else
|
||||
@ -160,7 +175,7 @@ void Logger::setChannel(const std::string& name, Channel* pChannel)
|
||||
if (len == 0 ||
|
||||
(it.first.compare(0, len, name) == 0 && (it.first.length() == len || it.first[len] == '.')))
|
||||
{
|
||||
it.second.logger->setChannel(pChannel);
|
||||
it.second.logger->unsafeSetChannel(pChannel);
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -393,7 +408,7 @@ std::pair<Logger::LoggerMapIterator, bool> Logger::unsafeGet(const std::string&
|
||||
else
|
||||
{
|
||||
Logger& par = parent(name);
|
||||
logger = new Logger(name, par.getChannel(), par.getLevel());
|
||||
logger = new Logger(name, par.unsafeGetChannel(), par.getLevel());
|
||||
}
|
||||
|
||||
return add(logger);
|
||||
|
@ -48,25 +48,17 @@ std::string PathImpl::currentImpl()
|
||||
std::string PathImpl::homeImpl()
|
||||
{
|
||||
std::string path;
|
||||
#if defined(_POSIX_C_SOURCE) || defined(_BSD_SOURCE) || defined(_POSIX_C_SOURCE)
|
||||
size_t buf_size = 1024; // Same as glibc use for getpwuid
|
||||
std::vector<char> buf(buf_size);
|
||||
struct passwd res;
|
||||
struct passwd* pwd = nullptr;
|
||||
|
||||
getpwuid_r(getuid(), &res, buf.data(), buf_size, &pwd);
|
||||
#else
|
||||
struct passwd* pwd = getpwuid(getuid());
|
||||
#endif
|
||||
if (pwd)
|
||||
path = pwd->pw_dir;
|
||||
else
|
||||
{
|
||||
#if defined(_POSIX_C_SOURCE) || defined(_BSD_SOURCE) || defined(_POSIX_C_SOURCE)
|
||||
getpwuid_r(getuid(), &res, buf.data(), buf_size, &pwd);
|
||||
#else
|
||||
pwd = getpwuid(geteuid());
|
||||
#endif
|
||||
if (pwd)
|
||||
path = pwd->pw_dir;
|
||||
else
|
||||
@ -82,7 +74,7 @@ std::string PathImpl::configHomeImpl()
|
||||
{
|
||||
std::string path = PathImpl::homeImpl();
|
||||
std::string::size_type n = path.size();
|
||||
if (n > 0 && path[n - 1] == '/')
|
||||
if (n > 0 && path[n - 1] == '/')
|
||||
#if POCO_OS == POCO_OS_MAC_OS_X
|
||||
path.append("Library/Preferences/");
|
||||
#else
|
||||
@ -97,7 +89,7 @@ std::string PathImpl::dataHomeImpl()
|
||||
{
|
||||
std::string path = PathImpl::homeImpl();
|
||||
std::string::size_type n = path.size();
|
||||
if (n > 0 && path[n - 1] == '/')
|
||||
if (n > 0 && path[n - 1] == '/')
|
||||
#if POCO_OS == POCO_OS_MAC_OS_X
|
||||
path.append("Library/Application Support/");
|
||||
#else
|
||||
@ -112,7 +104,7 @@ std::string PathImpl::cacheHomeImpl()
|
||||
{
|
||||
std::string path = PathImpl::homeImpl();
|
||||
std::string::size_type n = path.size();
|
||||
if (n > 0 && path[n - 1] == '/')
|
||||
if (n > 0 && path[n - 1] == '/')
|
||||
#if POCO_OS == POCO_OS_MAC_OS_X
|
||||
path.append("Library/Caches/");
|
||||
#else
|
||||
@ -127,7 +119,7 @@ std::string PathImpl::tempHomeImpl()
|
||||
{
|
||||
std::string path = PathImpl::homeImpl();
|
||||
std::string::size_type n = path.size();
|
||||
if (n > 0 && path[n - 1] == '/')
|
||||
if (n > 0 && path[n - 1] == '/')
|
||||
#if POCO_OS == POCO_OS_MAC_OS_X
|
||||
path.append("Library/Caches/");
|
||||
#else
|
||||
@ -159,7 +151,7 @@ std::string PathImpl::tempImpl()
|
||||
std::string PathImpl::configImpl()
|
||||
{
|
||||
std::string path;
|
||||
|
||||
|
||||
#if POCO_OS == POCO_OS_MAC_OS_X
|
||||
path = "/Library/Preferences/";
|
||||
#else
|
||||
|
@ -18,7 +18,9 @@
|
||||
#define Net_HTTPResponse_INCLUDED
|
||||
|
||||
|
||||
#include <map>
|
||||
#include <vector>
|
||||
|
||||
#include "Poco/Net/HTTPCookie.h"
|
||||
#include "Poco/Net/HTTPMessage.h"
|
||||
#include "Poco/Net/Net.h"
|
||||
@ -180,6 +182,8 @@ namespace Net
|
||||
/// May throw an exception in case of a malformed
|
||||
/// Set-Cookie header.
|
||||
|
||||
void getHeaders(std::map<std::string, std::string> & headers) const;
|
||||
|
||||
void write(std::ostream & ostr) const;
|
||||
/// Writes the HTTP response to the given
|
||||
/// output stream.
|
||||
|
@ -43,7 +43,7 @@ namespace Net
|
||||
/// Sets the following default values:
|
||||
/// - timeout: 60 seconds
|
||||
/// - keepAlive: true
|
||||
/// - maxKeepAliveRequests: 0
|
||||
/// - maxKeepAliveRequests: 100
|
||||
/// - keepAliveTimeout: 15 seconds
|
||||
|
||||
void setServerName(const std::string & serverName);
|
||||
@ -87,12 +87,12 @@ namespace Net
|
||||
const Poco::Timespan & getKeepAliveTimeout() const;
|
||||
/// Returns the connection timeout for HTTP connections.
|
||||
|
||||
void setMaxKeepAliveRequests(int maxKeepAliveRequests);
|
||||
void setMaxKeepAliveRequests(size_t maxKeepAliveRequests);
|
||||
/// Specifies the maximum number of requests allowed
|
||||
/// during a persistent connection. 0 means unlimited
|
||||
/// connections.
|
||||
|
||||
int getMaxKeepAliveRequests() const;
|
||||
size_t getMaxKeepAliveRequests() const;
|
||||
/// Returns the maximum number of requests allowed
|
||||
/// during a persistent connection, or 0 if
|
||||
/// unlimited connections are allowed.
|
||||
@ -106,7 +106,7 @@ namespace Net
|
||||
std::string _softwareVersion;
|
||||
Poco::Timespan _timeout;
|
||||
bool _keepAlive;
|
||||
int _maxKeepAliveRequests;
|
||||
size_t _maxKeepAliveRequests;
|
||||
Poco::Timespan _keepAliveTimeout;
|
||||
};
|
||||
|
||||
@ -138,7 +138,7 @@ namespace Net
|
||||
}
|
||||
|
||||
|
||||
inline int HTTPServerParams::getMaxKeepAliveRequests() const
|
||||
inline size_t HTTPServerParams::getMaxKeepAliveRequests() const
|
||||
{
|
||||
return _maxKeepAliveRequests;
|
||||
}
|
||||
|
@ -58,10 +58,14 @@ namespace Net
|
||||
|
||||
void setKeepAliveTimeout(Poco::Timespan keepAliveTimeout);
|
||||
|
||||
size_t getKeepAliveTimeout() const { return _keepAliveTimeout.totalSeconds(); }
|
||||
|
||||
size_t getMaxKeepAliveRequests() const { return _maxKeepAliveRequests; }
|
||||
|
||||
private:
|
||||
bool _firstRequest;
|
||||
Poco::Timespan _keepAliveTimeout;
|
||||
int _maxKeepAliveRequests;
|
||||
size_t _maxKeepAliveRequests;
|
||||
};
|
||||
|
||||
|
||||
@ -70,7 +74,7 @@ namespace Net
|
||||
//
|
||||
inline bool HTTPServerSession::canKeepAlive() const
|
||||
{
|
||||
return _maxKeepAliveRequests != 0;
|
||||
return getKeepAlive() && _maxKeepAliveRequests > 0;
|
||||
}
|
||||
|
||||
|
||||
|
@ -19,6 +19,8 @@
|
||||
|
||||
|
||||
#include <ios>
|
||||
#include <memory>
|
||||
#include <functional>
|
||||
#include "Poco/Any.h"
|
||||
#include "Poco/Buffer.h"
|
||||
#include "Poco/Exception.h"
|
||||
@ -33,6 +35,27 @@ namespace Net
|
||||
{
|
||||
|
||||
|
||||
class IHTTPSessionDataHooks
|
||||
/// Interface to control stream of data bytes being sent or received though socket by HTTPSession
|
||||
/// It allows to monitor, throttle and schedule data streams with syscall granulatrity
|
||||
{
|
||||
public:
|
||||
virtual ~IHTTPSessionDataHooks() = default;
|
||||
|
||||
virtual void atStart(int bytes) = 0;
|
||||
/// Called before sending/receiving data `bytes` to/from socket.
|
||||
|
||||
virtual void atFinish(int bytes) = 0;
|
||||
/// Called when sending/receiving of data `bytes` is successfully finished.
|
||||
|
||||
virtual void atFail() = 0;
|
||||
/// If an error occurred during send/receive `fail()` is called instead of `finish()`.
|
||||
};
|
||||
|
||||
|
||||
using HTTPSessionDataHooksPtr = std::shared_ptr<IHTTPSessionDataHooks>;
|
||||
|
||||
|
||||
class Net_API HTTPSession
|
||||
/// HTTPSession implements basic HTTP session management
|
||||
/// for both HTTP clients and HTTP servers.
|
||||
@ -73,6 +96,12 @@ namespace Net
|
||||
Poco::Timespan getReceiveTimeout() const;
|
||||
/// Returns receive timeout for the HTTP session.
|
||||
|
||||
void setSendDataHooks(const HTTPSessionDataHooksPtr & sendDataHooks = {});
|
||||
/// Sets data hooks that will be called on every sent to the socket.
|
||||
|
||||
void setReceiveDataHooks(const HTTPSessionDataHooksPtr & receiveDataHooks = {});
|
||||
/// Sets data hooks that will be called on every receive from the socket.
|
||||
|
||||
bool connected() const;
|
||||
/// Returns true if the underlying socket is connected.
|
||||
|
||||
@ -211,6 +240,10 @@ namespace Net
|
||||
Poco::Exception * _pException;
|
||||
Poco::Any _data;
|
||||
|
||||
// Data hooks
|
||||
HTTPSessionDataHooksPtr _sendDataHooks;
|
||||
HTTPSessionDataHooksPtr _receiveDataHooks;
|
||||
|
||||
friend class HTTPStreamBuf;
|
||||
friend class HTTPHeaderStreamBuf;
|
||||
friend class HTTPFixedLengthStreamBuf;
|
||||
@ -246,6 +279,16 @@ namespace Net
|
||||
return _receiveTimeout;
|
||||
}
|
||||
|
||||
inline void HTTPSession::setSendDataHooks(const HTTPSessionDataHooksPtr & sendDataHooks)
|
||||
{
|
||||
_sendDataHooks = sendDataHooks;
|
||||
}
|
||||
|
||||
inline void HTTPSession::setReceiveDataHooks(const HTTPSessionDataHooksPtr & receiveDataHooks)
|
||||
{
|
||||
_receiveDataHooks = receiveDataHooks;
|
||||
}
|
||||
|
||||
inline StreamSocket & HTTPSession::socket()
|
||||
{
|
||||
return _socket;
|
||||
|
@ -209,6 +209,15 @@ void HTTPResponse::getCookies(std::vector<HTTPCookie>& cookies) const
|
||||
}
|
||||
}
|
||||
|
||||
void HTTPResponse::getHeaders(std::map<std::string, std::string> & headers) const
|
||||
{
|
||||
headers.clear();
|
||||
for (const auto & it : *this)
|
||||
{
|
||||
headers.emplace(it.first, it.second);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
void HTTPResponse::write(std::ostream& ostr) const
|
||||
{
|
||||
|
@ -22,7 +22,7 @@ namespace Net {
|
||||
HTTPServerParams::HTTPServerParams():
|
||||
_timeout(60000000),
|
||||
_keepAlive(true),
|
||||
_maxKeepAliveRequests(0),
|
||||
_maxKeepAliveRequests(100),
|
||||
_keepAliveTimeout(15000000)
|
||||
{
|
||||
}
|
||||
@ -32,12 +32,12 @@ HTTPServerParams::~HTTPServerParams()
|
||||
{
|
||||
}
|
||||
|
||||
|
||||
|
||||
void HTTPServerParams::setServerName(const std::string& serverName)
|
||||
{
|
||||
_serverName = serverName;
|
||||
}
|
||||
|
||||
|
||||
|
||||
void HTTPServerParams::setSoftwareVersion(const std::string& softwareVersion)
|
||||
{
|
||||
@ -50,24 +50,24 @@ void HTTPServerParams::setTimeout(const Poco::Timespan& timeout)
|
||||
_timeout = timeout;
|
||||
}
|
||||
|
||||
|
||||
|
||||
void HTTPServerParams::setKeepAlive(bool keepAlive)
|
||||
{
|
||||
_keepAlive = keepAlive;
|
||||
}
|
||||
|
||||
|
||||
|
||||
void HTTPServerParams::setKeepAliveTimeout(const Poco::Timespan& timeout)
|
||||
{
|
||||
_keepAliveTimeout = timeout;
|
||||
}
|
||||
|
||||
|
||||
void HTTPServerParams::setMaxKeepAliveRequests(int maxKeepAliveRequests)
|
||||
|
||||
void HTTPServerParams::setMaxKeepAliveRequests(size_t maxKeepAliveRequests)
|
||||
{
|
||||
poco_assert (maxKeepAliveRequests >= 0);
|
||||
_maxKeepAliveRequests = maxKeepAliveRequests;
|
||||
}
|
||||
|
||||
|
||||
|
||||
} } // namespace Poco::Net
|
||||
|
@ -19,11 +19,11 @@ namespace Poco {
|
||||
namespace Net {
|
||||
|
||||
|
||||
HTTPServerSession::HTTPServerSession(const StreamSocket& socket, HTTPServerParams::Ptr pParams):
|
||||
HTTPSession(socket, pParams->getKeepAlive()),
|
||||
_firstRequest(true),
|
||||
_keepAliveTimeout(pParams->getKeepAliveTimeout()),
|
||||
_maxKeepAliveRequests(pParams->getMaxKeepAliveRequests())
|
||||
HTTPServerSession::HTTPServerSession(const StreamSocket & socket, HTTPServerParams::Ptr pParams)
|
||||
: HTTPSession(socket, pParams->getKeepAlive())
|
||||
, _firstRequest(true)
|
||||
, _keepAliveTimeout(pParams->getKeepAliveTimeout())
|
||||
, _maxKeepAliveRequests(pParams->getMaxKeepAliveRequests())
|
||||
{
|
||||
setTimeout(pParams->getTimeout());
|
||||
}
|
||||
@ -50,13 +50,14 @@ bool HTTPServerSession::hasMoreRequests()
|
||||
--_maxKeepAliveRequests;
|
||||
return socket().poll(getTimeout(), Socket::SELECT_READ);
|
||||
}
|
||||
else if (_maxKeepAliveRequests != 0 && getKeepAlive())
|
||||
else if (canKeepAlive())
|
||||
{
|
||||
if (_maxKeepAliveRequests > 0)
|
||||
--_maxKeepAliveRequests;
|
||||
return buffered() > 0 || socket().poll(_keepAliveTimeout, Socket::SELECT_READ);
|
||||
}
|
||||
else return false;
|
||||
if (_maxKeepAliveRequests > 0)
|
||||
--_maxKeepAliveRequests;
|
||||
return buffered() > 0 || socket().poll(_keepAliveTimeout, Socket::SELECT_READ);
|
||||
}
|
||||
else
|
||||
return false;
|
||||
}
|
||||
|
||||
|
||||
|
@ -128,14 +128,14 @@ int HTTPSession::get()
|
||||
{
|
||||
if (_pCurrent == _pEnd)
|
||||
refill();
|
||||
|
||||
|
||||
if (_pCurrent < _pEnd)
|
||||
return *_pCurrent++;
|
||||
else
|
||||
return std::char_traits<char>::eof();
|
||||
}
|
||||
|
||||
|
||||
|
||||
int HTTPSession::peek()
|
||||
{
|
||||
if (_pCurrent == _pEnd)
|
||||
@ -147,7 +147,7 @@ int HTTPSession::peek()
|
||||
return std::char_traits<char>::eof();
|
||||
}
|
||||
|
||||
|
||||
|
||||
int HTTPSession::read(char* buffer, std::streamsize length)
|
||||
{
|
||||
if (_pCurrent < _pEnd)
|
||||
@ -166,10 +166,17 @@ int HTTPSession::write(const char* buffer, std::streamsize length)
|
||||
{
|
||||
try
|
||||
{
|
||||
return _socket.sendBytes(buffer, (int) length);
|
||||
if (_sendDataHooks)
|
||||
_sendDataHooks->atStart((int) length);
|
||||
int result = _socket.sendBytes(buffer, (int) length);
|
||||
if (_sendDataHooks)
|
||||
_sendDataHooks->atFinish(result);
|
||||
return result;
|
||||
}
|
||||
catch (Poco::Exception& exc)
|
||||
{
|
||||
if (_sendDataHooks)
|
||||
_sendDataHooks->atFail();
|
||||
setException(exc);
|
||||
throw;
|
||||
}
|
||||
@ -180,10 +187,17 @@ int HTTPSession::receive(char* buffer, int length)
|
||||
{
|
||||
try
|
||||
{
|
||||
return _socket.receiveBytes(buffer, length);
|
||||
if (_receiveDataHooks)
|
||||
_receiveDataHooks->atStart(length);
|
||||
int result = _socket.receiveBytes(buffer, length);
|
||||
if (_receiveDataHooks)
|
||||
_receiveDataHooks->atFinish(result);
|
||||
return result;
|
||||
}
|
||||
catch (Poco::Exception& exc)
|
||||
{
|
||||
if (_receiveDataHooks)
|
||||
_receiveDataHooks->atFail();
|
||||
setException(exc);
|
||||
throw;
|
||||
}
|
||||
|
@ -17,6 +17,7 @@
|
||||
#include "Poco/Net/StreamSocketImpl.h"
|
||||
#include "Poco/NumberFormatter.h"
|
||||
#include "Poco/Timestamp.h"
|
||||
#include "Poco/ErrorHandler.h"
|
||||
#include <string.h> // FD_SET needs memset on some platforms, so we can't use <cstring>
|
||||
|
||||
|
||||
@ -62,7 +63,7 @@ bool checkIsBrokenTimeout()
|
||||
|
||||
SocketImpl::SocketImpl():
|
||||
_sockfd(POCO_INVALID_SOCKET),
|
||||
_blocking(true),
|
||||
_blocking(true),
|
||||
_isBrokenTimeout(checkIsBrokenTimeout())
|
||||
{
|
||||
}
|
||||
@ -81,7 +82,7 @@ SocketImpl::~SocketImpl()
|
||||
close();
|
||||
}
|
||||
|
||||
|
||||
|
||||
SocketImpl* SocketImpl::acceptConnection(SocketAddress& clientAddr)
|
||||
{
|
||||
if (_sockfd == POCO_INVALID_SOCKET) throw InvalidSocketException();
|
||||
@ -117,7 +118,7 @@ void SocketImpl::connect(const SocketAddress& address)
|
||||
rc = ::connect(_sockfd, address.addr(), address.length());
|
||||
}
|
||||
while (rc != 0 && lastError() == POCO_EINTR);
|
||||
if (rc != 0)
|
||||
if (rc != 0)
|
||||
{
|
||||
int err = lastError();
|
||||
error(err, address.toString());
|
||||
@ -204,7 +205,7 @@ void SocketImpl::bind6(const SocketAddress& address, bool reuseAddress, bool reu
|
||||
#if defined(POCO_HAVE_IPv6)
|
||||
if (address.family() != SocketAddress::IPv6)
|
||||
throw Poco::InvalidArgumentException("SocketAddress must be an IPv6 address");
|
||||
|
||||
|
||||
if (_sockfd == POCO_INVALID_SOCKET)
|
||||
{
|
||||
init(address.af());
|
||||
@ -225,11 +226,11 @@ void SocketImpl::bind6(const SocketAddress& address, bool reuseAddress, bool reu
|
||||
#endif
|
||||
}
|
||||
|
||||
|
||||
|
||||
void SocketImpl::listen(int backlog)
|
||||
{
|
||||
if (_sockfd == POCO_INVALID_SOCKET) throw InvalidSocketException();
|
||||
|
||||
|
||||
int rc = ::listen(_sockfd, backlog);
|
||||
if (rc != 0) error();
|
||||
}
|
||||
@ -253,7 +254,7 @@ void SocketImpl::shutdownReceive()
|
||||
if (rc != 0) error();
|
||||
}
|
||||
|
||||
|
||||
|
||||
void SocketImpl::shutdownSend()
|
||||
{
|
||||
if (_sockfd == POCO_INVALID_SOCKET) throw InvalidSocketException();
|
||||
@ -262,7 +263,7 @@ void SocketImpl::shutdownSend()
|
||||
if (rc != 0) error();
|
||||
}
|
||||
|
||||
|
||||
|
||||
void SocketImpl::shutdown()
|
||||
{
|
||||
if (_sockfd == POCO_INVALID_SOCKET) throw InvalidSocketException();
|
||||
@ -317,7 +318,7 @@ int SocketImpl::receiveBytes(void* buffer, int length, int flags)
|
||||
throw TimeoutException();
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
int rc;
|
||||
do
|
||||
{
|
||||
@ -325,7 +326,7 @@ int SocketImpl::receiveBytes(void* buffer, int length, int flags)
|
||||
rc = ::recv(_sockfd, reinterpret_cast<char*>(buffer), length, flags);
|
||||
}
|
||||
while (blocking && rc < 0 && lastError() == POCO_EINTR);
|
||||
if (rc < 0)
|
||||
if (rc < 0)
|
||||
{
|
||||
int err = lastError();
|
||||
if ((err == POCO_EAGAIN || err == POCO_EWOULDBLOCK) && !blocking)
|
||||
@ -363,7 +364,7 @@ int SocketImpl::receiveFrom(void* buffer, int length, SocketAddress& address, in
|
||||
throw TimeoutException();
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
sockaddr_storage abuffer;
|
||||
struct sockaddr* pSA = reinterpret_cast<struct sockaddr*>(&abuffer);
|
||||
poco_socklen_t saLen = sizeof(abuffer);
|
||||
@ -450,7 +451,7 @@ bool SocketImpl::pollImpl(Poco::Timespan& remainingTime, int mode)
|
||||
}
|
||||
while (rc < 0 && lastError() == POCO_EINTR);
|
||||
if (rc < 0) error();
|
||||
return rc > 0;
|
||||
return rc > 0;
|
||||
|
||||
#else
|
||||
|
||||
@ -493,7 +494,7 @@ bool SocketImpl::pollImpl(Poco::Timespan& remainingTime, int mode)
|
||||
}
|
||||
while (rc < 0 && errorCode == POCO_EINTR);
|
||||
if (rc < 0) error(errorCode);
|
||||
return rc > 0;
|
||||
return rc > 0;
|
||||
|
||||
#endif // POCO_HAVE_FD_POLL
|
||||
}
|
||||
@ -503,13 +504,13 @@ bool SocketImpl::poll(const Poco::Timespan& timeout, int mode)
|
||||
Poco::Timespan remainingTime(timeout);
|
||||
return pollImpl(remainingTime, mode);
|
||||
}
|
||||
|
||||
|
||||
void SocketImpl::setSendBufferSize(int size)
|
||||
{
|
||||
setOption(SOL_SOCKET, SO_SNDBUF, size);
|
||||
}
|
||||
|
||||
|
||||
|
||||
int SocketImpl::getSendBufferSize()
|
||||
{
|
||||
int result;
|
||||
@ -523,7 +524,7 @@ void SocketImpl::setReceiveBufferSize(int size)
|
||||
setOption(SOL_SOCKET, SO_RCVBUF, size);
|
||||
}
|
||||
|
||||
|
||||
|
||||
int SocketImpl::getReceiveBufferSize()
|
||||
{
|
||||
int result;
|
||||
@ -569,7 +570,7 @@ Poco::Timespan SocketImpl::getReceiveTimeout()
|
||||
return result;
|
||||
}
|
||||
|
||||
|
||||
|
||||
SocketAddress SocketImpl::address()
|
||||
{
|
||||
if (_sockfd == POCO_INVALID_SOCKET) throw InvalidSocketException();
|
||||
@ -580,7 +581,7 @@ SocketAddress SocketImpl::address()
|
||||
int rc = ::getsockname(_sockfd, pSA, &saLen);
|
||||
if (rc == 0)
|
||||
return SocketAddress(pSA, saLen);
|
||||
else
|
||||
else
|
||||
error();
|
||||
return SocketAddress();
|
||||
}
|
||||
|
@ -8,7 +8,7 @@
|
||||
// Copyright (c) 2005-2006, Applied Informatics Software Engineering GmbH.
|
||||
// and Contributors.
|
||||
//
|
||||
// SPDX-License-Identifier: BSL-1.0
|
||||
// SPDX-License-Identifier: BSL-1.0
|
||||
//
|
||||
|
||||
|
||||
@ -44,190 +44,194 @@ TCPServerConnectionFilter::~TCPServerConnectionFilter()
|
||||
|
||||
|
||||
TCPServer::TCPServer(TCPServerConnectionFactory::Ptr pFactory, Poco::UInt16 portNumber, TCPServerParams::Ptr pParams):
|
||||
_socket(ServerSocket(portNumber)),
|
||||
_thread(threadName(_socket)),
|
||||
_stopped(true)
|
||||
{
|
||||
Poco::ThreadPool& pool = Poco::ThreadPool::defaultPool();
|
||||
if (pParams)
|
||||
{
|
||||
int toAdd = pParams->getMaxThreads() - pool.capacity();
|
||||
if (toAdd > 0) pool.addCapacity(toAdd);
|
||||
}
|
||||
_pDispatcher = new TCPServerDispatcher(pFactory, pool, pParams);
|
||||
|
||||
_socket(ServerSocket(portNumber)),
|
||||
_thread(threadName(_socket)),
|
||||
_stopped(true)
|
||||
{
|
||||
Poco::ThreadPool& pool = Poco::ThreadPool::defaultPool();
|
||||
if (pParams)
|
||||
{
|
||||
int toAdd = pParams->getMaxThreads() - pool.capacity();
|
||||
if (toAdd > 0) pool.addCapacity(toAdd);
|
||||
}
|
||||
_pDispatcher = new TCPServerDispatcher(pFactory, pool, pParams);
|
||||
|
||||
}
|
||||
|
||||
|
||||
TCPServer::TCPServer(TCPServerConnectionFactory::Ptr pFactory, const ServerSocket& socket, TCPServerParams::Ptr pParams):
|
||||
_socket(socket),
|
||||
_thread(threadName(socket)),
|
||||
_stopped(true)
|
||||
_socket(socket),
|
||||
_thread(threadName(socket)),
|
||||
_stopped(true)
|
||||
{
|
||||
Poco::ThreadPool& pool = Poco::ThreadPool::defaultPool();
|
||||
if (pParams)
|
||||
{
|
||||
int toAdd = pParams->getMaxThreads() - pool.capacity();
|
||||
if (toAdd > 0) pool.addCapacity(toAdd);
|
||||
}
|
||||
_pDispatcher = new TCPServerDispatcher(pFactory, pool, pParams);
|
||||
Poco::ThreadPool& pool = Poco::ThreadPool::defaultPool();
|
||||
if (pParams)
|
||||
{
|
||||
int toAdd = pParams->getMaxThreads() - pool.capacity();
|
||||
if (toAdd > 0) pool.addCapacity(toAdd);
|
||||
}
|
||||
_pDispatcher = new TCPServerDispatcher(pFactory, pool, pParams);
|
||||
}
|
||||
|
||||
|
||||
TCPServer::TCPServer(TCPServerConnectionFactory::Ptr pFactory, Poco::ThreadPool& threadPool, const ServerSocket& socket, TCPServerParams::Ptr pParams):
|
||||
_socket(socket),
|
||||
_pDispatcher(new TCPServerDispatcher(pFactory, threadPool, pParams)),
|
||||
_thread(threadName(socket)),
|
||||
_stopped(true)
|
||||
_socket(socket),
|
||||
_pDispatcher(new TCPServerDispatcher(pFactory, threadPool, pParams)),
|
||||
_thread(threadName(socket)),
|
||||
_stopped(true)
|
||||
{
|
||||
}
|
||||
|
||||
|
||||
TCPServer::~TCPServer()
|
||||
{
|
||||
try
|
||||
{
|
||||
stop();
|
||||
_pDispatcher->release();
|
||||
}
|
||||
catch (...)
|
||||
{
|
||||
poco_unexpected();
|
||||
}
|
||||
try
|
||||
{
|
||||
stop();
|
||||
_pDispatcher->release();
|
||||
}
|
||||
catch (...)
|
||||
{
|
||||
poco_unexpected();
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
const TCPServerParams& TCPServer::params() const
|
||||
{
|
||||
return _pDispatcher->params();
|
||||
return _pDispatcher->params();
|
||||
}
|
||||
|
||||
|
||||
void TCPServer::start()
|
||||
{
|
||||
poco_assert (_stopped);
|
||||
poco_assert (_stopped);
|
||||
|
||||
_stopped = false;
|
||||
_thread.start(*this);
|
||||
_stopped = false;
|
||||
_thread.start(*this);
|
||||
}
|
||||
|
||||
|
||||
|
||||
void TCPServer::stop()
|
||||
{
|
||||
if (!_stopped)
|
||||
{
|
||||
_stopped = true;
|
||||
_thread.join();
|
||||
_pDispatcher->stop();
|
||||
}
|
||||
if (!_stopped)
|
||||
{
|
||||
_stopped = true;
|
||||
_thread.join();
|
||||
_pDispatcher->stop();
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
void TCPServer::run()
|
||||
{
|
||||
while (!_stopped)
|
||||
{
|
||||
Poco::Timespan timeout(250000);
|
||||
try
|
||||
{
|
||||
if (_socket.poll(timeout, Socket::SELECT_READ))
|
||||
{
|
||||
try
|
||||
{
|
||||
StreamSocket ss = _socket.acceptConnection();
|
||||
|
||||
if (!_pConnectionFilter || _pConnectionFilter->accept(ss))
|
||||
{
|
||||
// enable nodelay per default: OSX really needs that
|
||||
while (!_stopped)
|
||||
{
|
||||
Poco::Timespan timeout(250000);
|
||||
try
|
||||
{
|
||||
if (_socket.poll(timeout, Socket::SELECT_READ))
|
||||
{
|
||||
try
|
||||
{
|
||||
StreamSocket ss = _socket.acceptConnection();
|
||||
|
||||
if (!_pConnectionFilter || _pConnectionFilter->accept(ss))
|
||||
{
|
||||
// enable nodelay per default: OSX really needs that
|
||||
#if defined(POCO_OS_FAMILY_UNIX)
|
||||
if (ss.address().family() != AddressFamily::UNIX_LOCAL)
|
||||
if (ss.address().family() != AddressFamily::UNIX_LOCAL)
|
||||
#endif
|
||||
{
|
||||
ss.setNoDelay(true);
|
||||
}
|
||||
_pDispatcher->enqueue(ss);
|
||||
}
|
||||
}
|
||||
catch (Poco::Exception& exc)
|
||||
{
|
||||
ErrorHandler::handle(exc);
|
||||
}
|
||||
catch (std::exception& exc)
|
||||
{
|
||||
ErrorHandler::handle(exc);
|
||||
}
|
||||
catch (...)
|
||||
{
|
||||
ErrorHandler::handle();
|
||||
}
|
||||
}
|
||||
}
|
||||
catch (Poco::Exception& exc)
|
||||
{
|
||||
ErrorHandler::handle(exc);
|
||||
// possibly a resource issue since poll() failed;
|
||||
// give some time to recover before trying again
|
||||
Poco::Thread::sleep(50);
|
||||
}
|
||||
}
|
||||
{
|
||||
ss.setNoDelay(true);
|
||||
}
|
||||
_pDispatcher->enqueue(ss);
|
||||
}
|
||||
else
|
||||
{
|
||||
ErrorHandler::logMessage(Message::PRIO_WARNING, "Filtered out connection from " + ss.peerAddress().toString());
|
||||
}
|
||||
}
|
||||
catch (Poco::Exception& exc)
|
||||
{
|
||||
ErrorHandler::handle(exc);
|
||||
}
|
||||
catch (std::exception& exc)
|
||||
{
|
||||
ErrorHandler::handle(exc);
|
||||
}
|
||||
catch (...)
|
||||
{
|
||||
ErrorHandler::handle();
|
||||
}
|
||||
}
|
||||
}
|
||||
catch (Poco::Exception& exc)
|
||||
{
|
||||
ErrorHandler::handle(exc);
|
||||
// possibly a resource issue since poll() failed;
|
||||
// give some time to recover before trying again
|
||||
Poco::Thread::sleep(50);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
int TCPServer::currentThreads() const
|
||||
{
|
||||
return _pDispatcher->currentThreads();
|
||||
return _pDispatcher->currentThreads();
|
||||
}
|
||||
|
||||
|
||||
int TCPServer::maxThreads() const
|
||||
{
|
||||
return _pDispatcher->maxThreads();
|
||||
return _pDispatcher->maxThreads();
|
||||
}
|
||||
|
||||
|
||||
|
||||
int TCPServer::totalConnections() const
|
||||
{
|
||||
return _pDispatcher->totalConnections();
|
||||
return _pDispatcher->totalConnections();
|
||||
}
|
||||
|
||||
|
||||
int TCPServer::currentConnections() const
|
||||
{
|
||||
return _pDispatcher->currentConnections();
|
||||
return _pDispatcher->currentConnections();
|
||||
}
|
||||
|
||||
|
||||
int TCPServer::maxConcurrentConnections() const
|
||||
{
|
||||
return _pDispatcher->maxConcurrentConnections();
|
||||
return _pDispatcher->maxConcurrentConnections();
|
||||
}
|
||||
|
||||
|
||||
|
||||
int TCPServer::queuedConnections() const
|
||||
{
|
||||
return _pDispatcher->queuedConnections();
|
||||
return _pDispatcher->queuedConnections();
|
||||
}
|
||||
|
||||
|
||||
int TCPServer::refusedConnections() const
|
||||
{
|
||||
return _pDispatcher->refusedConnections();
|
||||
return _pDispatcher->refusedConnections();
|
||||
}
|
||||
|
||||
|
||||
void TCPServer::setConnectionFilter(const TCPServerConnectionFilter::Ptr& pConnectionFilter)
|
||||
{
|
||||
poco_assert (_stopped);
|
||||
poco_assert (_stopped);
|
||||
|
||||
_pConnectionFilter = pConnectionFilter;
|
||||
_pConnectionFilter = pConnectionFilter;
|
||||
}
|
||||
|
||||
|
||||
std::string TCPServer::threadName(const ServerSocket& socket)
|
||||
{
|
||||
std::string name("TCPServer: ");
|
||||
name.append(socket.address().toString());
|
||||
return name;
|
||||
std::string name("TCPServer: ");
|
||||
name.append(socket.address().toString());
|
||||
return name;
|
||||
|
||||
}
|
||||
|
||||
|
@ -18,7 +18,6 @@
|
||||
|
||||
|
||||
using Poco::Exception;
|
||||
using Poco::ErrorHandler;
|
||||
|
||||
|
||||
namespace Poco {
|
||||
@ -31,9 +30,7 @@ TCPServerConnection::TCPServerConnection(const StreamSocket& socket):
|
||||
}
|
||||
|
||||
|
||||
TCPServerConnection::~TCPServerConnection()
|
||||
{
|
||||
}
|
||||
TCPServerConnection::~TCPServerConnection() = default;
|
||||
|
||||
|
||||
void TCPServerConnection::start()
|
||||
|
@ -8,7 +8,7 @@
|
||||
// Copyright (c) 2005-2007, Applied Informatics Software Engineering GmbH.
|
||||
// and Contributors.
|
||||
//
|
||||
// SPDX-License-Identifier: BSL-1.0
|
||||
// SPDX-License-Identifier: BSL-1.0
|
||||
//
|
||||
|
||||
|
||||
@ -33,44 +33,44 @@ namespace Net {
|
||||
class TCPConnectionNotification: public Notification
|
||||
{
|
||||
public:
|
||||
TCPConnectionNotification(const StreamSocket& socket):
|
||||
_socket(socket)
|
||||
{
|
||||
}
|
||||
|
||||
~TCPConnectionNotification()
|
||||
{
|
||||
}
|
||||
|
||||
const StreamSocket& socket() const
|
||||
{
|
||||
return _socket;
|
||||
}
|
||||
TCPConnectionNotification(const StreamSocket& socket):
|
||||
_socket(socket)
|
||||
{
|
||||
}
|
||||
|
||||
~TCPConnectionNotification()
|
||||
{
|
||||
}
|
||||
|
||||
const StreamSocket& socket() const
|
||||
{
|
||||
return _socket;
|
||||
}
|
||||
|
||||
private:
|
||||
StreamSocket _socket;
|
||||
StreamSocket _socket;
|
||||
};
|
||||
|
||||
|
||||
TCPServerDispatcher::TCPServerDispatcher(TCPServerConnectionFactory::Ptr pFactory, Poco::ThreadPool& threadPool, TCPServerParams::Ptr pParams):
|
||||
_rc(1),
|
||||
_pParams(pParams),
|
||||
_currentThreads(0),
|
||||
_totalConnections(0),
|
||||
_currentConnections(0),
|
||||
_maxConcurrentConnections(0),
|
||||
_refusedConnections(0),
|
||||
_stopped(false),
|
||||
_pConnectionFactory(pFactory),
|
||||
_threadPool(threadPool)
|
||||
_rc(1),
|
||||
_pParams(pParams),
|
||||
_currentThreads(0),
|
||||
_totalConnections(0),
|
||||
_currentConnections(0),
|
||||
_maxConcurrentConnections(0),
|
||||
_refusedConnections(0),
|
||||
_stopped(false),
|
||||
_pConnectionFactory(pFactory),
|
||||
_threadPool(threadPool)
|
||||
{
|
||||
poco_check_ptr (pFactory);
|
||||
poco_check_ptr (pFactory);
|
||||
|
||||
if (!_pParams)
|
||||
_pParams = new TCPServerParams;
|
||||
|
||||
if (_pParams->getMaxThreads() == 0)
|
||||
_pParams->setMaxThreads(threadPool.capacity());
|
||||
if (!_pParams)
|
||||
_pParams = new TCPServerParams;
|
||||
|
||||
if (_pParams->getMaxThreads() == 0)
|
||||
_pParams->setMaxThreads(threadPool.capacity());
|
||||
}
|
||||
|
||||
|
||||
@ -81,161 +81,184 @@ TCPServerDispatcher::~TCPServerDispatcher()
|
||||
|
||||
void TCPServerDispatcher::duplicate()
|
||||
{
|
||||
++_rc;
|
||||
++_rc;
|
||||
}
|
||||
|
||||
|
||||
void TCPServerDispatcher::release()
|
||||
{
|
||||
if (--_rc == 0) delete this;
|
||||
if (--_rc == 0) delete this;
|
||||
}
|
||||
|
||||
|
||||
void TCPServerDispatcher::run()
|
||||
{
|
||||
AutoPtr<TCPServerDispatcher> guard(this); // ensure object stays alive
|
||||
AutoPtr<TCPServerDispatcher> guard(this); // ensure object stays alive
|
||||
|
||||
int idleTime = (int) _pParams->getThreadIdleTime().totalMilliseconds();
|
||||
int idleTime = (int) _pParams->getThreadIdleTime().totalMilliseconds();
|
||||
|
||||
for (;;)
|
||||
{
|
||||
try
|
||||
{
|
||||
AutoPtr<Notification> pNf = _queue.waitDequeueNotification(idleTime);
|
||||
if (pNf && !_stopped)
|
||||
{
|
||||
TCPConnectionNotification* pCNf = dynamic_cast<TCPConnectionNotification*>(pNf.get());
|
||||
if (pCNf)
|
||||
{
|
||||
beginConnection();
|
||||
if (!_stopped)
|
||||
{
|
||||
std::unique_ptr<TCPServerConnection> pConnection(_pConnectionFactory->createConnection(pCNf->socket()));
|
||||
poco_check_ptr(pConnection.get());
|
||||
pConnection->start();
|
||||
}
|
||||
/// endConnection() should be called after destroying TCPServerConnection,
|
||||
/// otherwise currentConnections() could become zero while some connections are yet still alive.
|
||||
endConnection();
|
||||
}
|
||||
}
|
||||
}
|
||||
catch (Poco::Exception &exc) { ErrorHandler::handle(exc); }
|
||||
catch (std::exception &exc) { ErrorHandler::handle(exc); }
|
||||
catch (...) { ErrorHandler::handle(); }
|
||||
FastMutex::ScopedLock lock(_mutex);
|
||||
if (_stopped || (_currentThreads > 1 && _queue.empty()))
|
||||
{
|
||||
--_currentThreads;
|
||||
break;
|
||||
}
|
||||
}
|
||||
for (;;)
|
||||
{
|
||||
try
|
||||
{
|
||||
AutoPtr<Notification> pNf = _queue.waitDequeueNotification(idleTime);
|
||||
if (pNf && !_stopped)
|
||||
{
|
||||
TCPConnectionNotification* pCNf = dynamic_cast<TCPConnectionNotification*>(pNf.get());
|
||||
if (pCNf)
|
||||
{
|
||||
beginConnection();
|
||||
if (!_stopped)
|
||||
{
|
||||
std::unique_ptr<TCPServerConnection> pConnection(_pConnectionFactory->createConnection(pCNf->socket()));
|
||||
poco_check_ptr(pConnection.get());
|
||||
pConnection->start();
|
||||
}
|
||||
/// endConnection() should be called after destroying TCPServerConnection,
|
||||
/// otherwise currentConnections() could become zero while some connections are yet still alive.
|
||||
endConnection();
|
||||
}
|
||||
}
|
||||
}
|
||||
catch (Poco::Exception &exc) { ErrorHandler::handle(exc); }
|
||||
catch (std::exception &exc) { ErrorHandler::handle(exc); }
|
||||
catch (...) { ErrorHandler::handle(); }
|
||||
FastMutex::ScopedLock lock(_mutex);
|
||||
if (_stopped || (_currentThreads > 1 && _queue.empty()))
|
||||
{
|
||||
--_currentThreads;
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
namespace
|
||||
{
|
||||
static const std::string threadName("TCPServerConnection");
|
||||
static const std::string threadName("TCPServerConnection");
|
||||
}
|
||||
|
||||
|
||||
|
||||
void TCPServerDispatcher::enqueue(const StreamSocket& socket)
|
||||
{
|
||||
FastMutex::ScopedLock lock(_mutex);
|
||||
FastMutex::ScopedLock lock(_mutex);
|
||||
|
||||
if (_queue.size() < _pParams->getMaxQueued())
|
||||
{
|
||||
if (!_queue.hasIdleThreads() && _currentThreads < _pParams->getMaxThreads())
|
||||
{
|
||||
try
|
||||
{
|
||||
ErrorHandler::logMessage(Message::PRIO_TEST, "Queue size: " + std::to_string(_queue.size()) +
|
||||
", current threads: " + std::to_string(_currentThreads) +
|
||||
", threads in pool: " + std::to_string(_threadPool.allocated()) +
|
||||
", current connections: " + std::to_string(_currentConnections));
|
||||
|
||||
|
||||
if (_queue.size() < _pParams->getMaxQueued())
|
||||
{
|
||||
/// NOTE: the condition below is wrong.
|
||||
/// Since the thread pool is shared between multiple servers/TCPServerDispatchers,
|
||||
/// _currentThreads < _pParams->getMaxThreads() will be true when the pool is actually saturated.
|
||||
/// As a result, queue is useless and connections never wait in queue.
|
||||
/// Instead, we (mistakenly) think that we can create a thread for this connection, but we fail to create it
|
||||
/// and the connection get rejected.
|
||||
/// We could check _currentThreads < _threadPool.allocated() to make it work,
|
||||
/// but it's not clear if we want to make it work
|
||||
/// because it may be better to reject connection immediately if we don't have resources to handle it.
|
||||
if (!_queue.hasIdleThreads() && _currentThreads < _pParams->getMaxThreads())
|
||||
{
|
||||
try
|
||||
{
|
||||
this->duplicate();
|
||||
_threadPool.startWithPriority(_pParams->getThreadPriority(), *this, threadName);
|
||||
++_currentThreads;
|
||||
}
|
||||
catch (Poco::Exception& exc)
|
||||
{
|
||||
_threadPool.startWithPriority(_pParams->getThreadPriority(), *this, threadName);
|
||||
++_currentThreads;
|
||||
}
|
||||
catch (Poco::Exception& exc)
|
||||
{
|
||||
ErrorHandler::logMessage(Message::PRIO_WARNING, "Got an exception while starting thread for connection from " +
|
||||
socket.peerAddress().toString());
|
||||
ErrorHandler::handle(exc);
|
||||
this->release();
|
||||
++_refusedConnections;
|
||||
std::cerr << "Got exception while starting thread for connection. Error code: "
|
||||
<< exc.code() << ", message: '" << exc.displayText() << "'" << std::endl;
|
||||
return;
|
||||
}
|
||||
}
|
||||
_queue.enqueueNotification(new TCPConnectionNotification(socket));
|
||||
}
|
||||
else
|
||||
{
|
||||
++_refusedConnections;
|
||||
}
|
||||
++_refusedConnections;
|
||||
return;
|
||||
}
|
||||
}
|
||||
else if (!_queue.hasIdleThreads())
|
||||
{
|
||||
ErrorHandler::logMessage(Message::PRIO_TRACE, "Don't have idle threads, adding connection from " +
|
||||
socket.peerAddress().toString() + " to the queue, size: " + std::to_string(_queue.size()));
|
||||
}
|
||||
_queue.enqueueNotification(new TCPConnectionNotification(socket));
|
||||
}
|
||||
else
|
||||
{
|
||||
ErrorHandler::logMessage(Message::PRIO_WARNING, "Refusing connection from " + socket.peerAddress().toString() +
|
||||
", reached max queue size " + std::to_string(_pParams->getMaxQueued()));
|
||||
++_refusedConnections;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
void TCPServerDispatcher::stop()
|
||||
{
|
||||
_stopped = true;
|
||||
_queue.clear();
|
||||
_queue.wakeUpAll();
|
||||
_stopped = true;
|
||||
_queue.clear();
|
||||
_queue.wakeUpAll();
|
||||
}
|
||||
|
||||
|
||||
int TCPServerDispatcher::currentThreads() const
|
||||
{
|
||||
return _currentThreads;
|
||||
return _currentThreads;
|
||||
}
|
||||
|
||||
int TCPServerDispatcher::maxThreads() const
|
||||
{
|
||||
FastMutex::ScopedLock lock(_mutex);
|
||||
|
||||
return _threadPool.capacity();
|
||||
FastMutex::ScopedLock lock(_mutex);
|
||||
|
||||
return _threadPool.capacity();
|
||||
}
|
||||
|
||||
|
||||
int TCPServerDispatcher::totalConnections() const
|
||||
{
|
||||
return _totalConnections;
|
||||
return _totalConnections;
|
||||
}
|
||||
|
||||
|
||||
int TCPServerDispatcher::currentConnections() const
|
||||
{
|
||||
return _currentConnections;
|
||||
return _currentConnections;
|
||||
}
|
||||
|
||||
|
||||
int TCPServerDispatcher::maxConcurrentConnections() const
|
||||
{
|
||||
return _maxConcurrentConnections;
|
||||
return _maxConcurrentConnections;
|
||||
}
|
||||
|
||||
|
||||
int TCPServerDispatcher::queuedConnections() const
|
||||
{
|
||||
return _queue.size();
|
||||
return _queue.size();
|
||||
}
|
||||
|
||||
|
||||
int TCPServerDispatcher::refusedConnections() const
|
||||
{
|
||||
return _refusedConnections;
|
||||
return _refusedConnections;
|
||||
}
|
||||
|
||||
|
||||
void TCPServerDispatcher::beginConnection()
|
||||
{
|
||||
FastMutex::ScopedLock lock(_mutex);
|
||||
FastMutex::ScopedLock lock(_mutex);
|
||||
|
||||
++_totalConnections;
|
||||
++_currentConnections;
|
||||
if (_currentConnections > _maxConcurrentConnections)
|
||||
_maxConcurrentConnections.store(_currentConnections);
|
||||
++_totalConnections;
|
||||
++_currentConnections;
|
||||
if (_currentConnections > _maxConcurrentConnections)
|
||||
_maxConcurrentConnections.store(_currentConnections);
|
||||
}
|
||||
|
||||
|
||||
void TCPServerDispatcher::endConnection()
|
||||
{
|
||||
--_currentConnections;
|
||||
--_currentConnections;
|
||||
}
|
||||
|
||||
|
||||
|
@ -248,6 +248,9 @@ namespace Net
|
||||
SSL_CTX * sslContext() const;
|
||||
/// Returns the underlying OpenSSL SSL Context object.
|
||||
|
||||
SSL_CTX * takeSslContext();
|
||||
/// Takes ownership of the underlying OpenSSL SSL Context object.
|
||||
|
||||
Usage usage() const;
|
||||
/// Returns whether the context is for use by a client or by a server
|
||||
/// and whether TLSv1 is required.
|
||||
@ -401,6 +404,13 @@ namespace Net
|
||||
return _pSSLContext;
|
||||
}
|
||||
|
||||
inline SSL_CTX * Context::takeSslContext()
|
||||
{
|
||||
auto * result = _pSSLContext;
|
||||
_pSSLContext = nullptr;
|
||||
return result;
|
||||
}
|
||||
|
||||
|
||||
inline bool Context::extendedCertificateVerificationEnabled() const
|
||||
{
|
||||
|
@ -106,6 +106,11 @@ Context::Context(
|
||||
|
||||
Context::~Context()
|
||||
{
|
||||
if (_pSSLContext == nullptr)
|
||||
{
|
||||
return;
|
||||
}
|
||||
|
||||
try
|
||||
{
|
||||
SSL_CTX_free(_pSSLContext);
|
||||
|
@ -311,6 +311,14 @@ int SecureSocketImpl::sendBytes(const void* buffer, int length, int flags)
|
||||
while (mustRetry(rc, remaining_time));
|
||||
if (rc <= 0)
|
||||
{
|
||||
// At this stage we still can have last not yet received SSL message containing SSL error
|
||||
// so make a read to force SSL to process possible SSL error
|
||||
if (SSL_get_error(_pSSL, rc) == SSL_ERROR_SYSCALL && SocketImpl::lastError() == POCO_ECONNRESET)
|
||||
{
|
||||
char c = 0;
|
||||
SSL_read(_pSSL, &c, 1);
|
||||
}
|
||||
|
||||
rc = handleError(rc);
|
||||
if (rc == 0) throw SSLConnectionUnexpectedlyClosedException();
|
||||
}
|
||||
|
@ -18,4 +18,4 @@ target_compile_options (_poco_util
|
||||
-Wno-zero-as-null-pointer-constant
|
||||
)
|
||||
target_include_directories (_poco_util SYSTEM PUBLIC "include")
|
||||
target_link_libraries (_poco_util PUBLIC Poco::JSON Poco::XML)
|
||||
target_link_libraries (_poco_util PUBLIC Poco::JSON Poco::XML Poco::Net)
|
||||
|
@ -241,6 +241,20 @@ namespace Util
|
||||
/// If the value contains references to other properties (${<property>}), these
|
||||
/// are expanded.
|
||||
|
||||
std::string getHost(const std::string & key) const;
|
||||
/// Returns the string value of the host property with the given name.
|
||||
/// Throws a NotFoundException if the key does not exist.
|
||||
/// Throws a SyntaxException if the property is not a valid host (IP address or domain).
|
||||
/// If the value contains references to other properties (${<property>}), these
|
||||
/// are expanded.
|
||||
|
||||
std::string getHost(const std::string & key, const std::string & defaultValue) const;
|
||||
/// If a property with the given key exists, returns the host property's string value,
|
||||
/// otherwise returns the given default value.
|
||||
/// Throws a SyntaxException if the property is not a valid host (IP address or domain).
|
||||
/// If the value contains references to other properties (${<property>}), these
|
||||
/// are expanded.
|
||||
|
||||
virtual void setString(const std::string & key, const std::string & value);
|
||||
/// Sets the property with the given key to the given value.
|
||||
/// An already existing value for the key is overwritten.
|
||||
@ -339,12 +353,35 @@ namespace Util
|
||||
static bool parseBool(const std::string & value);
|
||||
void setRawWithEvent(const std::string & key, std::string value);
|
||||
|
||||
static void checkHostValidity(const std::string & value);
|
||||
/// Throws a SyntaxException if the value is not a valid host (IP address or domain).
|
||||
|
||||
virtual ~AbstractConfiguration();
|
||||
|
||||
private:
|
||||
std::string internalExpand(const std::string & value) const;
|
||||
std::string uncheckedExpand(const std::string & value) const;
|
||||
|
||||
static bool isValidIPv4Address(const std::string & value);
|
||||
/// IPv4 address considered valid if it is "0.0.0.0" or one of those,
|
||||
/// defined by inet_aton() or inet_addr()
|
||||
|
||||
static bool isValidIPv6Address(const std::string & value);
|
||||
/// IPv6 address considered valid if it is "::" or one of those,
|
||||
/// defined by inet_pton() with AF_INET6 flag
|
||||
/// (in this case it may have scope id and may be surrounded by '[', ']')
|
||||
|
||||
static bool isValidDomainName(const std::string & value);
|
||||
/// <domain> ::= <subdomain> [ "." ]
|
||||
/// <subdomain> ::= <label> | <subdomain> "." <label>
|
||||
/// <label> ::= <letter> [ [ <ldh-str> ] <let-dig> ]
|
||||
/// <ldh-str> ::= <let-dig-hyp> | <let-dig-hyp> <ldh-str>
|
||||
/// <let-dig-hyp> ::= <let-dig> | "-"
|
||||
/// <let-dig> ::= <letter> | <digit>
|
||||
/// <letter> ::= any one of the 52 alphabetic characters A through Z in
|
||||
/// upper case and a through z in lower case
|
||||
/// <digit> ::= any one of the ten digits 0 through 9
|
||||
|
||||
AbstractConfiguration(const AbstractConfiguration &);
|
||||
AbstractConfiguration & operator=(const AbstractConfiguration &);
|
||||
|
||||
|
@ -18,6 +18,7 @@
|
||||
#include "Poco/NumberParser.h"
|
||||
#include "Poco/NumberFormatter.h"
|
||||
#include "Poco/String.h"
|
||||
#include "Poco/Net/IPAddressImpl.h"
|
||||
|
||||
|
||||
using Poco::Mutex;
|
||||
@ -263,6 +264,41 @@ bool AbstractConfiguration::getBool(const std::string& key, bool defaultValue) c
|
||||
}
|
||||
|
||||
|
||||
std::string AbstractConfiguration::getHost(const std::string& key) const
|
||||
{
|
||||
Mutex::ScopedLock lock(_mutex);
|
||||
|
||||
std::string value;
|
||||
if (getRaw(key, value))
|
||||
{
|
||||
std::string expandedValue = internalExpand(value);
|
||||
checkHostValidity(expandedValue);
|
||||
return expandedValue;
|
||||
}
|
||||
else
|
||||
throw NotFoundException(key);
|
||||
}
|
||||
|
||||
|
||||
std::string AbstractConfiguration::getHost(const std::string& key, const std::string& defaultValue) const
|
||||
{
|
||||
Mutex::ScopedLock lock(_mutex);
|
||||
|
||||
std::string value;
|
||||
if (getRaw(key, value))
|
||||
{
|
||||
std::string expandedValue = internalExpand(value);
|
||||
checkHostValidity(expandedValue);
|
||||
return expandedValue;
|
||||
}
|
||||
else
|
||||
{
|
||||
checkHostValidity(defaultValue);
|
||||
return defaultValue;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
void AbstractConfiguration::setString(const std::string& key, const std::string& value)
|
||||
{
|
||||
setRawWithEvent(key, value);
|
||||
@ -529,4 +565,68 @@ void AbstractConfiguration::setRawWithEvent(const std::string& key, std::string
|
||||
}
|
||||
|
||||
|
||||
void AbstractConfiguration::checkHostValidity(const std::string& value)
|
||||
{
|
||||
if (!isValidIPv4Address(value) && !isValidIPv6Address(value) && !isValidDomainName(value))
|
||||
{
|
||||
throw SyntaxException("Property is not a valid host name", value);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
bool AbstractConfiguration::isValidIPv4Address(const std::string& value)
|
||||
{
|
||||
using Poco::Net::Impl::IPv4AddressImpl;
|
||||
IPv4AddressImpl empty4 = IPv4AddressImpl();
|
||||
|
||||
IPv4AddressImpl ipAddress = IPv4AddressImpl::parse(value);
|
||||
return ipAddress != empty4 || value == "0.0.0.0";
|
||||
}
|
||||
|
||||
|
||||
bool AbstractConfiguration::isValidIPv6Address(const std::string& value)
|
||||
{
|
||||
#if defined(POCO_HAVE_IPv6)
|
||||
using Poco::Net::Impl::IPv6AddressImpl;
|
||||
IPv6AddressImpl empty6 = IPv6AddressImpl();
|
||||
|
||||
IPv6AddressImpl ipAddress = IPv6AddressImpl::parse(value);
|
||||
return ipAddress != empty6 || value == "::";
|
||||
#else
|
||||
return false;
|
||||
#endif
|
||||
}
|
||||
|
||||
|
||||
bool AbstractConfiguration::isValidDomainName(const std::string& value)
|
||||
{
|
||||
if (value.empty() || value == "." || value.length() > 253)
|
||||
return false;
|
||||
int labelLength = 0;
|
||||
char oldChar = 0;
|
||||
|
||||
for (char ch : value)
|
||||
{
|
||||
if (ch == '.')
|
||||
{
|
||||
if (labelLength == 0 || labelLength > 63 || oldChar == '-')
|
||||
return false;
|
||||
labelLength = 0;
|
||||
}
|
||||
else if (isalnum(ch) || ch == '-')
|
||||
{
|
||||
if (labelLength == 0 && (ch == '-' || isdigit(ch)))
|
||||
return false;
|
||||
++labelLength;
|
||||
}
|
||||
else
|
||||
{
|
||||
return false;
|
||||
}
|
||||
oldChar = ch;
|
||||
}
|
||||
return oldChar == '.' || (labelLength > 0 && labelLength <= 63 && oldChar != '-');
|
||||
}
|
||||
|
||||
|
||||
} } // namespace Poco::Util
|
||||
|
1
ci/README.md
Normal file
1
ci/README.md
Normal file
@ -0,0 +1 @@
|
||||
Note: This directory is under active development for CI improvements and is not currently in use within the scope of the existing CI pipeline.
|
109
ci/docker/fasttest/Dockerfile
Normal file
109
ci/docker/fasttest/Dockerfile
Normal file
@ -0,0 +1,109 @@
|
||||
# docker build -t clickhouse/fasttest .
|
||||
FROM ubuntu:22.04
|
||||
|
||||
# ARG for quick switch to a given ubuntu mirror
|
||||
ARG apt_archive="http://archive.ubuntu.com"
|
||||
RUN sed -i "s|http://archive.ubuntu.com|$apt_archive|g" /etc/apt/sources.list
|
||||
|
||||
ENV DEBIAN_FRONTEND=noninteractive LLVM_VERSION=18
|
||||
|
||||
RUN apt-get update \
|
||||
&& apt-get install \
|
||||
apt-transport-https \
|
||||
apt-utils \
|
||||
ca-certificates \
|
||||
curl \
|
||||
gnupg \
|
||||
lsb-release \
|
||||
wget \
|
||||
git \
|
||||
--yes --no-install-recommends --verbose-versions \
|
||||
&& export LLVM_PUBKEY_HASH="bda960a8da687a275a2078d43c111d66b1c6a893a3275271beedf266c1ff4a0cdecb429c7a5cccf9f486ea7aa43fd27f" \
|
||||
&& wget -nv -O /tmp/llvm-snapshot.gpg.key https://apt.llvm.org/llvm-snapshot.gpg.key \
|
||||
&& echo "${LLVM_PUBKEY_HASH} /tmp/llvm-snapshot.gpg.key" | sha384sum -c \
|
||||
&& apt-key add /tmp/llvm-snapshot.gpg.key \
|
||||
&& export CODENAME="$(lsb_release --codename --short | tr 'A-Z' 'a-z')" \
|
||||
&& echo "deb https://apt.llvm.org/${CODENAME}/ llvm-toolchain-${CODENAME}-${LLVM_VERSION} main" >> \
|
||||
/etc/apt/sources.list \
|
||||
&& apt-get update \
|
||||
&& apt-get install --yes --no-install-recommends --verbose-versions llvm-${LLVM_VERSION} \
|
||||
&& apt-get clean \
|
||||
&& rm -rf /var/lib/apt/lists/* /var/cache/debconf /tmp/*
|
||||
|
||||
# moreutils - provides ts fo FT
|
||||
# expect, bzip2 - requried by FT
|
||||
# bsdmainutils - provides hexdump for FT
|
||||
# nasm - nasm copiler for one of submodules, required from normal build
|
||||
# yasm - asssembler for libhdfs3, required from normal build
|
||||
|
||||
RUN apt-get update \
|
||||
&& apt-get install \
|
||||
clang-${LLVM_VERSION} \
|
||||
cmake \
|
||||
libclang-${LLVM_VERSION}-dev \
|
||||
libclang-rt-${LLVM_VERSION}-dev \
|
||||
lld-${LLVM_VERSION} \
|
||||
llvm-${LLVM_VERSION}-dev \
|
||||
lsof \
|
||||
ninja-build \
|
||||
python3 \
|
||||
python3-pip \
|
||||
zstd \
|
||||
moreutils \
|
||||
expect \
|
||||
bsdmainutils \
|
||||
pv \
|
||||
jq \
|
||||
bzip2 \
|
||||
nasm \
|
||||
yasm \
|
||||
--yes --no-install-recommends \
|
||||
&& apt-get clean \
|
||||
&& rm -rf /var/lib/apt/lists/* /var/cache/debconf /tmp/*
|
||||
|
||||
COPY --from=clickhouse/cctools:0d6b90a7a490 /opt/gdb /opt/gdb
|
||||
# Give suid to gdb to grant it attach permissions
|
||||
RUN chmod u+s /opt/gdb/bin/gdb
|
||||
ENV PATH="/opt/gdb/bin:${PATH}"
|
||||
|
||||
# This symlink is required by gcc to find the lld linker
|
||||
RUN ln -s /usr/bin/lld-${LLVM_VERSION} /usr/bin/ld.lld
|
||||
# FIXME: workaround for "The imported target "merge-fdata" references the file" error
|
||||
# https://salsa.debian.org/pkg-llvm-team/llvm-toolchain/-/commit/992e52c0b156a5ba9c6a8a54f8c4857ddd3d371d
|
||||
RUN sed -i '/_IMPORT_CHECK_FILES_FOR_\(mlir-\|llvm-bolt\|merge-fdata\|MLIR\)/ {s|^|#|}' /usr/lib/llvm-${LLVM_VERSION}/lib/cmake/llvm/LLVMExports-*.cmake
|
||||
|
||||
# LLVM changes paths for compiler-rt libraries. For some reason clang-18.1.8 cannot catch up libraries from default install path.
|
||||
# It's very dirty workaround, better to build compiler and LLVM ourself and use it. Details: https://github.com/llvm/llvm-project/issues/95792
|
||||
RUN test ! -d /usr/lib/llvm-18/lib/clang/18/lib/x86_64-pc-linux-gnu || ln -s /usr/lib/llvm-18/lib/clang/18/lib/x86_64-pc-linux-gnu /usr/lib/llvm-18/lib/clang/18/lib/x86_64-unknown-linux-gnu
|
||||
|
||||
ARG TARGETARCH
|
||||
ARG SCCACHE_VERSION=v0.7.7
|
||||
ENV SCCACHE_IGNORE_SERVER_IO_ERROR=1
|
||||
# sccache requires a value for the region. So by default we use The Default Region
|
||||
ENV SCCACHE_REGION=us-east-1
|
||||
RUN arch=${TARGETARCH} \
|
||||
&& case $arch in \
|
||||
amd64) rarch=x86_64 ;; \
|
||||
arm64) rarch=aarch64 ;; \
|
||||
esac \
|
||||
&& curl -Ls "https://github.com/mozilla/sccache/releases/download/$SCCACHE_VERSION/sccache-$SCCACHE_VERSION-$rarch-unknown-linux-musl.tar.gz" | \
|
||||
tar xz -C /tmp \
|
||||
&& mv "/tmp/sccache-$SCCACHE_VERSION-$rarch-unknown-linux-musl/sccache" /usr/bin \
|
||||
&& rm "/tmp/sccache-$SCCACHE_VERSION-$rarch-unknown-linux-musl" -r
|
||||
|
||||
COPY requirements.txt /
|
||||
RUN pip3 install --no-cache-dir -r /requirements.txt
|
||||
|
||||
# chmod 777 to make the container user independent
|
||||
RUN mkdir -p /var/lib/clickhouse \
|
||||
&& chmod 777 /var/lib/clickhouse
|
||||
|
||||
ENV TZ=Europe/Amsterdam
|
||||
RUN ln -snf /usr/share/zoneinfo/$TZ /etc/localtime && echo $TZ > /etc/timezone
|
||||
|
||||
RUN groupadd --system --gid 1000 clickhouse \
|
||||
&& useradd --system --gid 1000 --uid 1000 -m clickhouse \
|
||||
&& mkdir -p /.cache/sccache && chmod 777 /.cache/sccache
|
||||
|
||||
ENV PYTHONPATH="/wd"
|
||||
ENV PYTHONUNBUFFERED=1
|
6
ci/docker/fasttest/requirements.txt
Normal file
6
ci/docker/fasttest/requirements.txt
Normal file
@ -0,0 +1,6 @@
|
||||
Jinja2==3.1.3
|
||||
numpy==1.26.4
|
||||
requests==2.32.3
|
||||
pandas==1.5.3
|
||||
scipy==1.12.0
|
||||
#https://clickhouse-builds.s3.amazonaws.com/packages/praktika-0.1-py3-none-any.whl
|
17
ci/docker/style-test/Dockerfile
Normal file
17
ci/docker/style-test/Dockerfile
Normal file
@ -0,0 +1,17 @@
|
||||
# docker build -t clickhouse/style-test .
|
||||
FROM ubuntu:22.04
|
||||
|
||||
RUN apt-get update && env DEBIAN_FRONTEND=noninteractive apt-get install --yes \
|
||||
aspell \
|
||||
libxml2-utils \
|
||||
python3-pip \
|
||||
locales \
|
||||
git \
|
||||
&& apt-get clean \
|
||||
&& rm -rf /var/lib/apt/lists/* /var/cache/debconf /tmp/*
|
||||
|
||||
RUN echo "en_US.UTF-8 UTF-8" > /etc/locale.gen && locale-gen en_US.UTF-8
|
||||
ENV LC_ALL=en_US.UTF-8
|
||||
|
||||
COPY requirements.txt /
|
||||
RUN pip3 install --no-cache-dir -r requirements.txt
|
5
ci/docker/style-test/requirements.txt
Normal file
5
ci/docker/style-test/requirements.txt
Normal file
@ -0,0 +1,5 @@
|
||||
requests==2.32.3
|
||||
yamllint==1.26.3
|
||||
codespell==2.2.1
|
||||
#use praktika from CH repo
|
||||
#https://clickhouse-builds.s3.amazonaws.com/packages/praktika-0.1-py3-none-any.whl
|
102
ci/jobs/build_clickhouse.py
Normal file
102
ci/jobs/build_clickhouse.py
Normal file
@ -0,0 +1,102 @@
|
||||
import argparse
|
||||
|
||||
from praktika.result import Result
|
||||
from praktika.settings import Settings
|
||||
from praktika.utils import MetaClasses, Shell, Utils
|
||||
|
||||
|
||||
class JobStages(metaclass=MetaClasses.WithIter):
|
||||
CHECKOUT_SUBMODULES = "checkout"
|
||||
CMAKE = "cmake"
|
||||
BUILD = "build"
|
||||
|
||||
|
||||
def parse_args():
|
||||
parser = argparse.ArgumentParser(description="ClickHouse Build Job")
|
||||
parser.add_argument("BUILD_TYPE", help="Type: <amd|arm_debug|release_sanitizer>")
|
||||
parser.add_argument("--param", help="Optional custom job start stage", default=None)
|
||||
return parser.parse_args()
|
||||
|
||||
|
||||
def main():
|
||||
|
||||
args = parse_args()
|
||||
|
||||
stop_watch = Utils.Stopwatch()
|
||||
|
||||
stages = list(JobStages)
|
||||
stage = args.param or JobStages.CHECKOUT_SUBMODULES
|
||||
if stage:
|
||||
assert stage in JobStages, f"--param must be one of [{list(JobStages)}]"
|
||||
print(f"Job will start from stage [{stage}]")
|
||||
while stage in stages:
|
||||
stages.pop(0)
|
||||
stages.insert(0, stage)
|
||||
|
||||
cmake_build_type = "Release"
|
||||
sanitizer = ""
|
||||
|
||||
if "debug" in args.BUILD_TYPE.lower():
|
||||
print("Build type set: debug")
|
||||
cmake_build_type = "Debug"
|
||||
|
||||
if "asan" in args.BUILD_TYPE.lower():
|
||||
print("Sanitizer set: address")
|
||||
sanitizer = "address"
|
||||
|
||||
# if Environment.is_local_run():
|
||||
# build_cache_type = "disabled"
|
||||
# else:
|
||||
build_cache_type = "sccache"
|
||||
|
||||
current_directory = Utils.cwd()
|
||||
build_dir = f"{Settings.TEMP_DIR}/build"
|
||||
|
||||
res = True
|
||||
results = []
|
||||
|
||||
if res and JobStages.CHECKOUT_SUBMODULES in stages:
|
||||
Shell.check(f"rm -rf {build_dir} && mkdir -p {build_dir}")
|
||||
results.append(
|
||||
Result.create_from_command_execution(
|
||||
name="Checkout Submodules",
|
||||
command=f"git submodule sync --recursive && git submodule init && git submodule update --depth 1 --recursive --jobs {min([Utils.cpu_count(), 20])}",
|
||||
)
|
||||
)
|
||||
res = results[-1].is_ok()
|
||||
|
||||
if res and JobStages.CMAKE in stages:
|
||||
results.append(
|
||||
Result.create_from_command_execution(
|
||||
name="Cmake configuration",
|
||||
command=f"cmake --debug-trycompile -DCMAKE_VERBOSE_MAKEFILE=1 -LA -DCMAKE_BUILD_TYPE={cmake_build_type} \
|
||||
-DSANITIZE={sanitizer} -DENABLE_CHECK_HEAVY_BUILDS=1 -DENABLE_CLICKHOUSE_SELF_EXTRACTING=1 -DENABLE_TESTS=0 \
|
||||
-DENABLE_UTILS=0 -DCMAKE_FIND_PACKAGE_NO_PACKAGE_REGISTRY=ON -DCMAKE_INSTALL_PREFIX=/usr \
|
||||
-DCMAKE_INSTALL_SYSCONFDIR=/etc -DCMAKE_INSTALL_LOCALSTATEDIR=/var -DCMAKE_SKIP_INSTALL_ALL_DEPENDENCY=ON \
|
||||
-DCMAKE_C_COMPILER=clang-18 -DCMAKE_CXX_COMPILER=clang++-18 -DCOMPILER_CACHE={build_cache_type} -DENABLE_TESTS=1 \
|
||||
-DENABLE_BUILD_PROFILING=1 {current_directory}",
|
||||
workdir=build_dir,
|
||||
with_log=True,
|
||||
)
|
||||
)
|
||||
res = results[-1].is_ok()
|
||||
|
||||
if res and JobStages.BUILD in stages:
|
||||
Shell.check("sccache --show-stats")
|
||||
results.append(
|
||||
Result.create_from_command_execution(
|
||||
name="Build ClickHouse",
|
||||
command="ninja clickhouse-bundle clickhouse-odbc-bridge clickhouse-library-bridge",
|
||||
workdir=build_dir,
|
||||
with_log=True,
|
||||
)
|
||||
)
|
||||
Shell.check("sccache --show-stats")
|
||||
Shell.check(f"ls -l {build_dir}/programs/")
|
||||
res = results[-1].is_ok()
|
||||
|
||||
Result.create_from(results=results, stopwatch=stop_watch).finish_job_accordingly()
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
382
ci/jobs/check_style.py
Normal file
382
ci/jobs/check_style.py
Normal file
@ -0,0 +1,382 @@
|
||||
import math
|
||||
import multiprocessing
|
||||
import os
|
||||
import re
|
||||
from concurrent.futures import ProcessPoolExecutor
|
||||
from pathlib import Path
|
||||
|
||||
from praktika.result import Result
|
||||
from praktika.utils import Shell, Utils
|
||||
|
||||
NPROC = multiprocessing.cpu_count()
|
||||
|
||||
|
||||
def chunk_list(data, n):
|
||||
"""Split the data list into n nearly equal-sized chunks."""
|
||||
chunk_size = math.ceil(len(data) / n)
|
||||
for i in range(0, len(data), chunk_size):
|
||||
yield data[i : i + chunk_size]
|
||||
|
||||
|
||||
def run_check_concurrent(check_name, check_function, files, nproc=NPROC):
|
||||
stop_watch = Utils.Stopwatch()
|
||||
|
||||
if not files:
|
||||
print(f"File list is empty [{files}]")
|
||||
raise
|
||||
|
||||
file_chunks = list(chunk_list(files, nproc))
|
||||
results = []
|
||||
|
||||
# Run check_function concurrently on each chunk
|
||||
with ProcessPoolExecutor(max_workers=NPROC) as executor:
|
||||
futures = [executor.submit(check_function, chunk) for chunk in file_chunks]
|
||||
# Wait for results and process them (optional)
|
||||
for future in futures:
|
||||
try:
|
||||
res = future.result()
|
||||
if res and res not in results:
|
||||
results.append(res)
|
||||
except Exception as e:
|
||||
results.append(f"Exception in {check_name}: {e}")
|
||||
|
||||
result = Result(
|
||||
name=check_name,
|
||||
status=Result.Status.SUCCESS if not results else Result.Status.FAILED,
|
||||
start_time=stop_watch.start_time,
|
||||
duration=stop_watch.duration,
|
||||
info=f"errors: {results}" if results else "",
|
||||
)
|
||||
return result
|
||||
|
||||
|
||||
def check_duplicate_includes(file_path):
|
||||
includes = []
|
||||
with open(file_path, "r", encoding="utf-8", errors="ignore") as f:
|
||||
for line in f:
|
||||
if re.match(r"^#include ", line):
|
||||
includes.append(line.strip())
|
||||
|
||||
include_counts = {line: includes.count(line) for line in includes}
|
||||
duplicates = {line: count for line, count in include_counts.items() if count > 1}
|
||||
|
||||
if duplicates:
|
||||
return f"{file_path}: {duplicates}"
|
||||
return ""
|
||||
|
||||
|
||||
def check_whitespaces(file_paths):
|
||||
for file in file_paths:
|
||||
exit_code, out, err = Shell.get_res_stdout_stderr(
|
||||
f'./ci/jobs/scripts/check_style/double_whitespaces.pl "{file}"',
|
||||
verbose=False,
|
||||
)
|
||||
if out or err:
|
||||
return out + " err: " + err
|
||||
return ""
|
||||
|
||||
|
||||
def check_yamllint(file_paths):
|
||||
file_paths = " ".join([f"'{file}'" for file in file_paths])
|
||||
exit_code, out, err = Shell.get_res_stdout_stderr(
|
||||
f"yamllint --config-file=./.yamllint {file_paths}", verbose=False
|
||||
)
|
||||
return out or err
|
||||
|
||||
|
||||
def check_xmllint(file_paths):
|
||||
if not isinstance(file_paths, list):
|
||||
file_paths = [file_paths]
|
||||
file_paths = " ".join([f"'{file}'" for file in file_paths])
|
||||
exit_code, out, err = Shell.get_res_stdout_stderr(
|
||||
f"xmllint --noout --nonet {file_paths}", verbose=False
|
||||
)
|
||||
return out or err
|
||||
|
||||
|
||||
def check_functional_test_cases(files):
|
||||
"""
|
||||
Queries with event_date should have yesterday() not today()
|
||||
NOTE: it is not that accurate, but at least something.
|
||||
"""
|
||||
|
||||
patterns = [
|
||||
re.compile(
|
||||
r"(?i)where.*?\bevent_date\s*(=|>=)\s*today\(\)(?!\s*-\s*1)",
|
||||
re.IGNORECASE | re.DOTALL,
|
||||
)
|
||||
]
|
||||
|
||||
errors = []
|
||||
for test_case in files:
|
||||
try:
|
||||
with open(test_case, "r", encoding="utf-8", errors="replace") as f:
|
||||
file_content = " ".join(
|
||||
f.read().splitlines()
|
||||
) # Combine lines into a single string
|
||||
|
||||
# Check if any pattern matches in the concatenated string
|
||||
if any(pattern.search(file_content) for pattern in patterns):
|
||||
errors.append(
|
||||
f"event_date should be filtered using >=yesterday() in {test_case} (to avoid flakiness)"
|
||||
)
|
||||
|
||||
except Exception as e:
|
||||
errors.append(f"Error checking {test_case}: {e}")
|
||||
|
||||
for test_case in files:
|
||||
if "fail" in test_case:
|
||||
errors.append(f"test case {test_case} includes 'fail' in its name")
|
||||
|
||||
return " ".join(errors)
|
||||
|
||||
|
||||
def check_gaps_in_tests_numbers(file_paths, gap_threshold=100):
|
||||
test_numbers = set()
|
||||
|
||||
pattern = re.compile(r"(\d+)")
|
||||
|
||||
for file in file_paths:
|
||||
file_name = os.path.basename(file)
|
||||
match = pattern.search(file_name)
|
||||
if match:
|
||||
test_numbers.add(int(match.group(1)))
|
||||
|
||||
sorted_numbers = sorted(test_numbers)
|
||||
large_gaps = []
|
||||
for i in range(1, len(sorted_numbers)):
|
||||
prev_num = sorted_numbers[i - 1]
|
||||
next_num = sorted_numbers[i]
|
||||
diff = next_num - prev_num
|
||||
if diff >= gap_threshold:
|
||||
large_gaps.append(f"Gap ({prev_num}, {next_num}) > {gap_threshold}")
|
||||
|
||||
return large_gaps
|
||||
|
||||
|
||||
def check_broken_links(path, exclude_paths):
|
||||
broken_symlinks = []
|
||||
|
||||
for path in Path(path).rglob("*"):
|
||||
if any(exclude_path in str(path) for exclude_path in exclude_paths):
|
||||
continue
|
||||
if path.is_symlink():
|
||||
if not path.exists():
|
||||
broken_symlinks.append(str(path))
|
||||
|
||||
if broken_symlinks:
|
||||
for symlink in broken_symlinks:
|
||||
print(symlink)
|
||||
return f"Broken symlinks found: {broken_symlinks}"
|
||||
else:
|
||||
return ""
|
||||
|
||||
|
||||
def check_cpp_code():
|
||||
res, out, err = Shell.get_res_stdout_stderr(
|
||||
"./ci/jobs/scripts/check_style/check_cpp.sh"
|
||||
)
|
||||
if err:
|
||||
out += err
|
||||
return out
|
||||
|
||||
|
||||
def check_repo_submodules():
|
||||
res, out, err = Shell.get_res_stdout_stderr(
|
||||
"./ci/jobs/scripts/check_style/check_submodules.sh"
|
||||
)
|
||||
if err:
|
||||
out += err
|
||||
return out
|
||||
|
||||
|
||||
def check_other():
|
||||
res, out, err = Shell.get_res_stdout_stderr(
|
||||
"./ci/jobs/scripts/check_style/checks_to_refactor.sh"
|
||||
)
|
||||
if err:
|
||||
out += err
|
||||
return out
|
||||
|
||||
|
||||
def check_codespell():
|
||||
res, out, err = Shell.get_res_stdout_stderr(
|
||||
"./ci/jobs/scripts/check_style/check_typos.sh"
|
||||
)
|
||||
if err:
|
||||
out += err
|
||||
return out
|
||||
|
||||
|
||||
def check_aspell():
|
||||
res, out, err = Shell.get_res_stdout_stderr(
|
||||
"./ci/jobs/scripts/check_style/check_aspell.sh"
|
||||
)
|
||||
if err:
|
||||
out += err
|
||||
return out
|
||||
|
||||
|
||||
def check_mypy():
|
||||
res, out, err = Shell.get_res_stdout_stderr(
|
||||
"./ci/jobs/scripts/check_style/check-mypy"
|
||||
)
|
||||
if err:
|
||||
out += err
|
||||
return out
|
||||
|
||||
|
||||
def check_pylint():
|
||||
res, out, err = Shell.get_res_stdout_stderr(
|
||||
"./ci/jobs/scripts/check_style/check-pylint"
|
||||
)
|
||||
if err:
|
||||
out += err
|
||||
return out
|
||||
|
||||
|
||||
def check_file_names(files):
|
||||
files_set = set()
|
||||
for file in files:
|
||||
file_ = file.lower()
|
||||
if file_ in files_set:
|
||||
return f"Non-uniq file name in lower case: {file}"
|
||||
files_set.add(file_)
|
||||
return ""
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
results = []
|
||||
stop_watch = Utils.Stopwatch()
|
||||
|
||||
all_files = Utils.traverse_paths(
|
||||
include_paths=["."],
|
||||
exclude_paths=[
|
||||
"./.git",
|
||||
"./contrib",
|
||||
"./build",
|
||||
],
|
||||
not_exists_ok=True, # ./build may exist if runs locally
|
||||
)
|
||||
|
||||
cpp_files = Utils.traverse_paths(
|
||||
include_paths=["./src", "./base", "./programs", "./utils"],
|
||||
exclude_paths=[
|
||||
"./base/glibc-compatibility",
|
||||
"./contrib/consistent-hashing",
|
||||
"./base/widechar_width",
|
||||
],
|
||||
file_suffixes=[".h", ".cpp"],
|
||||
)
|
||||
|
||||
yaml_workflow_files = Utils.traverse_paths(
|
||||
include_paths=["./.github"],
|
||||
exclude_paths=[],
|
||||
file_suffixes=[".yaml", ".yml"],
|
||||
)
|
||||
|
||||
xml_files = Utils.traverse_paths(
|
||||
include_paths=["."],
|
||||
exclude_paths=["./.git", "./contrib/"],
|
||||
file_suffixes=[".xml"],
|
||||
)
|
||||
|
||||
functional_test_files = Utils.traverse_paths(
|
||||
include_paths=["./tests/queries"],
|
||||
exclude_paths=[],
|
||||
file_suffixes=[".sql", ".sh", ".py", ".j2"],
|
||||
)
|
||||
|
||||
results.append(
|
||||
Result(
|
||||
name="Read Files",
|
||||
status=Result.Status.SUCCESS,
|
||||
start_time=stop_watch.start_time,
|
||||
duration=stop_watch.duration,
|
||||
)
|
||||
)
|
||||
|
||||
results.append(
|
||||
run_check_concurrent(
|
||||
check_name="Whitespace Check",
|
||||
check_function=check_whitespaces,
|
||||
files=cpp_files,
|
||||
)
|
||||
)
|
||||
results.append(
|
||||
run_check_concurrent(
|
||||
check_name="YamlLint Check",
|
||||
check_function=check_yamllint,
|
||||
files=yaml_workflow_files,
|
||||
)
|
||||
)
|
||||
results.append(
|
||||
run_check_concurrent(
|
||||
check_name="XmlLint Check",
|
||||
check_function=check_xmllint,
|
||||
files=xml_files,
|
||||
)
|
||||
)
|
||||
results.append(
|
||||
run_check_concurrent(
|
||||
check_name="Functional Tests scripts smoke check",
|
||||
check_function=check_functional_test_cases,
|
||||
files=functional_test_files,
|
||||
)
|
||||
)
|
||||
results.append(
|
||||
Result.create_from_command_execution(
|
||||
name="Check Tests Numbers",
|
||||
command=check_gaps_in_tests_numbers,
|
||||
command_args=[functional_test_files],
|
||||
)
|
||||
)
|
||||
results.append(
|
||||
Result.create_from_command_execution(
|
||||
name="Check Broken Symlinks",
|
||||
command=check_broken_links,
|
||||
command_kwargs={
|
||||
"path": "./",
|
||||
"exclude_paths": ["contrib/", "metadata/", "programs/server/data"],
|
||||
},
|
||||
)
|
||||
)
|
||||
results.append(
|
||||
Result.create_from_command_execution(
|
||||
name="Check CPP code",
|
||||
command=check_cpp_code,
|
||||
)
|
||||
)
|
||||
results.append(
|
||||
Result.create_from_command_execution(
|
||||
name="Check Submodules",
|
||||
command=check_repo_submodules,
|
||||
)
|
||||
)
|
||||
results.append(
|
||||
Result.create_from_command_execution(
|
||||
name="Check File Names",
|
||||
command=check_file_names,
|
||||
command_args=[all_files],
|
||||
)
|
||||
)
|
||||
results.append(
|
||||
Result.create_from_command_execution(
|
||||
name="Check Many Different Things",
|
||||
command=check_other,
|
||||
)
|
||||
)
|
||||
results.append(
|
||||
Result.create_from_command_execution(
|
||||
name="Check Codespell",
|
||||
command=check_codespell,
|
||||
)
|
||||
)
|
||||
results.append(
|
||||
Result.create_from_command_execution(
|
||||
name="Check Aspell",
|
||||
command=check_aspell,
|
||||
)
|
||||
)
|
||||
|
||||
Result.create_from(results=results, stopwatch=stop_watch).finish_job_accordingly()
|
337
ci/jobs/fast_test.py
Normal file
337
ci/jobs/fast_test.py
Normal file
@ -0,0 +1,337 @@
|
||||
import argparse
|
||||
import threading
|
||||
from pathlib import Path
|
||||
|
||||
from praktika.result import Result
|
||||
from praktika.settings import Settings
|
||||
from praktika.utils import MetaClasses, Shell, Utils
|
||||
|
||||
from ci.jobs.scripts.functional_tests_results import FTResultsProcessor
|
||||
|
||||
|
||||
class ClickHouseProc:
|
||||
def __init__(self):
|
||||
self.ch_config_dir = f"{Settings.TEMP_DIR}/etc/clickhouse-server"
|
||||
self.pid_file = f"{self.ch_config_dir}/clickhouse-server.pid"
|
||||
self.config_file = f"{self.ch_config_dir}/config.xml"
|
||||
self.user_files_path = f"{self.ch_config_dir}/user_files"
|
||||
self.test_output_file = f"{Settings.OUTPUT_DIR}/test_result.txt"
|
||||
self.command = f"clickhouse-server --config-file {self.config_file} --pid-file {self.pid_file} -- --path {self.ch_config_dir} --user_files_path {self.user_files_path} --top_level_domains_path {self.ch_config_dir}/top_level_domains --keeper_server.storage_path {self.ch_config_dir}/coordination"
|
||||
self.proc = None
|
||||
self.pid = 0
|
||||
nproc = int(Utils.cpu_count() / 2)
|
||||
self.fast_test_command = f"clickhouse-test --hung-check --fast-tests-only --no-random-settings --no-random-merge-tree-settings --no-long --testname --shard --zookeeper --check-zookeeper-session --order random --print-time --report-logs-stats --jobs {nproc} -- '' | ts '%Y-%m-%d %H:%M:%S' \
|
||||
| tee -a \"{self.test_output_file}\""
|
||||
# TODO: store info in case of failure
|
||||
self.info = ""
|
||||
self.info_file = ""
|
||||
|
||||
Utils.set_env("CLICKHOUSE_CONFIG_DIR", self.ch_config_dir)
|
||||
Utils.set_env("CLICKHOUSE_CONFIG", self.config_file)
|
||||
Utils.set_env("CLICKHOUSE_USER_FILES", self.user_files_path)
|
||||
Utils.set_env("CLICKHOUSE_SCHEMA_FILES", f"{self.ch_config_dir}/format_schemas")
|
||||
|
||||
def start(self):
|
||||
print("Starting ClickHouse server")
|
||||
Shell.check(f"rm {self.pid_file}")
|
||||
|
||||
def run_clickhouse():
|
||||
self.proc = Shell.run_async(
|
||||
self.command, verbose=True, suppress_output=True
|
||||
)
|
||||
|
||||
thread = threading.Thread(target=run_clickhouse)
|
||||
thread.daemon = True # Allow program to exit even if thread is still running
|
||||
thread.start()
|
||||
|
||||
# self.proc = Shell.run_async(self.command, verbose=True)
|
||||
|
||||
started = False
|
||||
try:
|
||||
for _ in range(5):
|
||||
pid = Shell.get_output(f"cat {self.pid_file}").strip()
|
||||
if not pid:
|
||||
Utils.sleep(1)
|
||||
continue
|
||||
started = True
|
||||
print(f"Got pid from fs [{pid}]")
|
||||
_ = int(pid)
|
||||
break
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
if not started:
|
||||
stdout = self.proc.stdout.read().strip() if self.proc.stdout else ""
|
||||
stderr = self.proc.stderr.read().strip() if self.proc.stderr else ""
|
||||
Utils.print_formatted_error("Failed to start ClickHouse", stdout, stderr)
|
||||
return False
|
||||
|
||||
print(f"ClickHouse server started successfully, pid [{pid}]")
|
||||
return True
|
||||
|
||||
def wait_ready(self):
|
||||
res, out, err = 0, "", ""
|
||||
attempts = 30
|
||||
delay = 2
|
||||
for attempt in range(attempts):
|
||||
res, out, err = Shell.get_res_stdout_stderr(
|
||||
'clickhouse-client --query "select 1"', verbose=True
|
||||
)
|
||||
if out.strip() == "1":
|
||||
print("Server ready")
|
||||
break
|
||||
else:
|
||||
print(f"Server not ready, wait")
|
||||
Utils.sleep(delay)
|
||||
else:
|
||||
Utils.print_formatted_error(
|
||||
f"Server not ready after [{attempts*delay}s]", out, err
|
||||
)
|
||||
return False
|
||||
return True
|
||||
|
||||
def run_fast_test(self):
|
||||
if Path(self.test_output_file).exists():
|
||||
Path(self.test_output_file).unlink()
|
||||
exit_code = Shell.run(self.fast_test_command)
|
||||
return exit_code == 0
|
||||
|
||||
def terminate(self):
|
||||
print("Terminate ClickHouse process")
|
||||
timeout = 10
|
||||
if self.proc:
|
||||
Utils.terminate_process_group(self.proc.pid)
|
||||
|
||||
self.proc.terminate()
|
||||
try:
|
||||
self.proc.wait(timeout=10)
|
||||
print(f"Process {self.proc.pid} terminated gracefully.")
|
||||
except Exception:
|
||||
print(
|
||||
f"Process {self.proc.pid} did not terminate in {timeout} seconds, killing it..."
|
||||
)
|
||||
Utils.terminate_process_group(self.proc.pid, force=True)
|
||||
self.proc.wait() # Wait for the process to be fully killed
|
||||
print(f"Process {self.proc} was killed.")
|
||||
|
||||
|
||||
def clone_submodules():
|
||||
submodules_to_update = [
|
||||
"contrib/sysroot",
|
||||
"contrib/magic_enum",
|
||||
"contrib/abseil-cpp",
|
||||
"contrib/boost",
|
||||
"contrib/zlib-ng",
|
||||
"contrib/libxml2",
|
||||
"contrib/libunwind",
|
||||
"contrib/fmtlib",
|
||||
"contrib/aklomp-base64",
|
||||
"contrib/cctz",
|
||||
"contrib/libcpuid",
|
||||
"contrib/libdivide",
|
||||
"contrib/double-conversion",
|
||||
"contrib/llvm-project",
|
||||
"contrib/lz4",
|
||||
"contrib/zstd",
|
||||
"contrib/fastops",
|
||||
"contrib/rapidjson",
|
||||
"contrib/re2",
|
||||
"contrib/sparsehash-c11",
|
||||
"contrib/croaring",
|
||||
"contrib/miniselect",
|
||||
"contrib/xz",
|
||||
"contrib/dragonbox",
|
||||
"contrib/fast_float",
|
||||
"contrib/NuRaft",
|
||||
"contrib/jemalloc",
|
||||
"contrib/replxx",
|
||||
"contrib/wyhash",
|
||||
"contrib/c-ares",
|
||||
"contrib/morton-nd",
|
||||
"contrib/xxHash",
|
||||
"contrib/expected",
|
||||
"contrib/simdjson",
|
||||
"contrib/liburing",
|
||||
"contrib/libfiu",
|
||||
"contrib/incbin",
|
||||
"contrib/yaml-cpp",
|
||||
]
|
||||
|
||||
res = Shell.check("git submodule sync", verbose=True, strict=True)
|
||||
res = res and Shell.check("git submodule init", verbose=True, strict=True)
|
||||
res = res and Shell.check(
|
||||
command=f"xargs --max-procs={min([Utils.cpu_count(), 20])} --null --no-run-if-empty --max-args=1 git submodule update --depth 1 --single-branch",
|
||||
stdin_str="\0".join(submodules_to_update) + "\0",
|
||||
timeout=120,
|
||||
retries=3,
|
||||
verbose=True,
|
||||
)
|
||||
res = res and Shell.check("git submodule foreach git reset --hard", verbose=True)
|
||||
res = res and Shell.check("git submodule foreach git checkout @ -f", verbose=True)
|
||||
res = res and Shell.check("git submodule foreach git clean -xfd", verbose=True)
|
||||
return res
|
||||
|
||||
|
||||
def update_path_ch_config(config_file_path=""):
|
||||
print("Updating path in clickhouse config")
|
||||
config_file_path = (
|
||||
config_file_path or f"{Settings.TEMP_DIR}/etc/clickhouse-server/config.xml"
|
||||
)
|
||||
ssl_config_file_path = (
|
||||
f"{Settings.TEMP_DIR}/etc/clickhouse-server/config.d/ssl_certs.xml"
|
||||
)
|
||||
try:
|
||||
with open(config_file_path, "r", encoding="utf-8") as file:
|
||||
content = file.read()
|
||||
|
||||
with open(ssl_config_file_path, "r", encoding="utf-8") as file:
|
||||
ssl_config_content = file.read()
|
||||
content = content.replace(">/var/", f">{Settings.TEMP_DIR}/var/")
|
||||
content = content.replace(">/etc/", f">{Settings.TEMP_DIR}/etc/")
|
||||
ssl_config_content = ssl_config_content.replace(
|
||||
">/etc/", f">{Settings.TEMP_DIR}/etc/"
|
||||
)
|
||||
with open(config_file_path, "w", encoding="utf-8") as file:
|
||||
file.write(content)
|
||||
with open(ssl_config_file_path, "w", encoding="utf-8") as file:
|
||||
file.write(ssl_config_content)
|
||||
except Exception as e:
|
||||
print(f"ERROR: failed to update config, exception: {e}")
|
||||
return False
|
||||
return True
|
||||
|
||||
|
||||
class JobStages(metaclass=MetaClasses.WithIter):
|
||||
CHECKOUT_SUBMODULES = "checkout"
|
||||
CMAKE = "cmake"
|
||||
BUILD = "build"
|
||||
CONFIG = "config"
|
||||
TEST = "test"
|
||||
|
||||
|
||||
def parse_args():
|
||||
parser = argparse.ArgumentParser(description="ClickHouse Fast Test Job")
|
||||
parser.add_argument("--param", help="Optional custom job start stage", default=None)
|
||||
return parser.parse_args()
|
||||
|
||||
|
||||
def main():
|
||||
args = parse_args()
|
||||
stop_watch = Utils.Stopwatch()
|
||||
|
||||
stages = list(JobStages)
|
||||
stage = args.param or JobStages.CHECKOUT_SUBMODULES
|
||||
if stage:
|
||||
assert stage in JobStages, f"--param must be one of [{list(JobStages)}]"
|
||||
print(f"Job will start from stage [{stage}]")
|
||||
while stage in stages:
|
||||
stages.pop(0)
|
||||
stages.insert(0, stage)
|
||||
|
||||
current_directory = Utils.cwd()
|
||||
build_dir = f"{Settings.TEMP_DIR}/build"
|
||||
|
||||
Utils.add_to_PATH(f"{build_dir}/programs:{current_directory}/tests")
|
||||
|
||||
res = True
|
||||
results = []
|
||||
|
||||
if res and JobStages.CHECKOUT_SUBMODULES in stages:
|
||||
Shell.check(f"rm -rf {build_dir} && mkdir -p {build_dir}")
|
||||
results.append(
|
||||
Result.create_from_command_execution(
|
||||
name="Checkout Submodules for Minimal Build",
|
||||
command=clone_submodules,
|
||||
)
|
||||
)
|
||||
res = results[-1].is_ok()
|
||||
|
||||
if res and JobStages.CMAKE in stages:
|
||||
results.append(
|
||||
Result.create_from_command_execution(
|
||||
name="Cmake configuration",
|
||||
command=f"cmake {current_directory} -DCMAKE_CXX_COMPILER=clang++-18 -DCMAKE_C_COMPILER=clang-18 \
|
||||
-DCMAKE_TOOLCHAIN_FILE={current_directory}/cmake/linux/toolchain-x86_64-musl.cmake -DENABLE_LIBRARIES=0 \
|
||||
-DENABLE_TESTS=0 -DENABLE_UTILS=0 -DENABLE_THINLTO=0 -DENABLE_NURAFT=1 -DENABLE_SIMDJSON=1 \
|
||||
-DENABLE_JEMALLOC=1 -DENABLE_LIBURING=1 -DENABLE_YAML_CPP=1 -DCOMPILER_CACHE=sccache",
|
||||
workdir=build_dir,
|
||||
with_log=True,
|
||||
)
|
||||
)
|
||||
res = results[-1].is_ok()
|
||||
|
||||
if res and JobStages.BUILD in stages:
|
||||
Shell.check("sccache --show-stats")
|
||||
results.append(
|
||||
Result.create_from_command_execution(
|
||||
name="Build ClickHouse",
|
||||
command="ninja clickhouse-bundle clickhouse-stripped",
|
||||
workdir=build_dir,
|
||||
with_log=True,
|
||||
)
|
||||
)
|
||||
Shell.check("sccache --show-stats")
|
||||
res = results[-1].is_ok()
|
||||
|
||||
if res and JobStages.BUILD in stages:
|
||||
commands = [
|
||||
f"mkdir -p {Settings.OUTPUT_DIR}/binaries",
|
||||
f"cp ./programs/clickhouse {Settings.OUTPUT_DIR}/binaries/clickhouse",
|
||||
f"zstd --threads=0 --force programs/clickhouse-stripped -o {Settings.OUTPUT_DIR}/binaries/clickhouse-stripped.zst",
|
||||
"sccache --show-stats",
|
||||
"clickhouse-client --version",
|
||||
"clickhouse-test --help",
|
||||
]
|
||||
results.append(
|
||||
Result.create_from_command_execution(
|
||||
name="Check and Compress binary",
|
||||
command=commands,
|
||||
workdir=build_dir,
|
||||
with_log=True,
|
||||
)
|
||||
)
|
||||
res = results[-1].is_ok()
|
||||
|
||||
if res and JobStages.CONFIG in stages:
|
||||
commands = [
|
||||
f"rm -rf {Settings.TEMP_DIR}/etc/ && mkdir -p {Settings.TEMP_DIR}/etc/clickhouse-client {Settings.TEMP_DIR}/etc/clickhouse-server",
|
||||
f"cp {current_directory}/programs/server/config.xml {current_directory}/programs/server/users.xml {Settings.TEMP_DIR}/etc/clickhouse-server/",
|
||||
f"{current_directory}/tests/config/install.sh {Settings.TEMP_DIR}/etc/clickhouse-server {Settings.TEMP_DIR}/etc/clickhouse-client",
|
||||
# f"cp -a {current_directory}/programs/server/config.d/log_to_console.xml {Settings.TEMP_DIR}/etc/clickhouse-server/config.d/",
|
||||
f"rm -f {Settings.TEMP_DIR}/etc/clickhouse-server/config.d/secure_ports.xml",
|
||||
update_path_ch_config,
|
||||
]
|
||||
results.append(
|
||||
Result.create_from_command_execution(
|
||||
name="Install ClickHouse Config",
|
||||
command=commands,
|
||||
with_log=True,
|
||||
)
|
||||
)
|
||||
res = results[-1].is_ok()
|
||||
|
||||
CH = ClickHouseProc()
|
||||
if res and JobStages.TEST in stages:
|
||||
stop_watch_ = Utils.Stopwatch()
|
||||
step_name = "Start ClickHouse Server"
|
||||
print(step_name)
|
||||
res = CH.start()
|
||||
res = res and CH.wait_ready()
|
||||
results.append(
|
||||
Result.create_from(name=step_name, status=res, stopwatch=stop_watch_)
|
||||
)
|
||||
|
||||
if res and JobStages.TEST in stages:
|
||||
step_name = "Tests"
|
||||
print(step_name)
|
||||
res = res and CH.run_fast_test()
|
||||
if res:
|
||||
results.append(FTResultsProcessor(wd=Settings.OUTPUT_DIR).run())
|
||||
|
||||
CH.terminate()
|
||||
|
||||
Result.create_from(results=results, stopwatch=stop_watch).finish_job_accordingly()
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
3134
ci/jobs/scripts/check_style/aspell-ignore/en/aspell-dict.txt
Normal file
3134
ci/jobs/scripts/check_style/aspell-ignore/en/aspell-dict.txt
Normal file
File diff suppressed because it is too large
Load Diff
59
ci/jobs/scripts/check_style/check_aspell.sh
Executable file
59
ci/jobs/scripts/check_style/check_aspell.sh
Executable file
@ -0,0 +1,59 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
# force-enable double star globbing
|
||||
shopt -s globstar
|
||||
|
||||
# Perform spell checking on the docs
|
||||
|
||||
if [[ ${1:-} == "--help" ]] || [[ ${1:-} == "-h" ]]; then
|
||||
echo "Usage $0 [--help|-h] [-i [filename]]"
|
||||
echo " --help|-h: print this help"
|
||||
echo " -i: interactive mode. If filename is specified, check only this file, otherwise check all files"
|
||||
exit 0
|
||||
fi
|
||||
|
||||
ROOT_PATH="."
|
||||
|
||||
CHECK_LANG=en
|
||||
|
||||
ASPELL_IGNORE_PATH="${ROOT_PATH}/utils/check-style/aspell-ignore/${CHECK_LANG}"
|
||||
|
||||
if [[ ${1:-} == "-i" ]]; then
|
||||
if [[ ! -z ${2:-} ]]; then
|
||||
FILES_TO_CHECK=${ROOT_PATH}/docs/${CHECK_LANG}/${2}
|
||||
else
|
||||
FILES_TO_CHECK=${ROOT_PATH}/docs/${CHECK_LANG}/**/*.md
|
||||
fi
|
||||
for fname in ${FILES_TO_CHECK}; do
|
||||
echo "Checking $fname"
|
||||
aspell --personal=aspell-dict.txt --add-sgml-skip=code --encoding=utf-8 --mode=markdown -W 3 --lang=${CHECK_LANG} --home-dir=${ASPELL_IGNORE_PATH} -c "$fname"
|
||||
done
|
||||
exit
|
||||
fi
|
||||
|
||||
STATUS=0
|
||||
for fname in ${ROOT_PATH}/docs/${CHECK_LANG}/**/*.md; do
|
||||
errors=$(cat "$fname" \
|
||||
| aspell list \
|
||||
-W 3 \
|
||||
--personal=aspell-dict.txt \
|
||||
--add-sgml-skip=code \
|
||||
--encoding=utf-8 \
|
||||
--mode=markdown \
|
||||
--lang=${CHECK_LANG} \
|
||||
--home-dir=${ASPELL_IGNORE_PATH} \
|
||||
| sort | uniq)
|
||||
if [ ! -z "$errors" ]; then
|
||||
STATUS=1
|
||||
echo "====== $fname ======"
|
||||
echo "$errors"
|
||||
fi
|
||||
done
|
||||
|
||||
if (( STATUS != 0 )); then
|
||||
echo "====== Errors found ======"
|
||||
echo "To exclude some words add them to the dictionary file \"${ASPELL_IGNORE_PATH}/aspell-dict.txt\""
|
||||
echo "You can also run ${0} -i to see the errors interactively and fix them or add to the dictionary file"
|
||||
fi
|
||||
|
||||
exit ${STATUS}
|
321
ci/jobs/scripts/check_style/check_cpp.sh
Executable file
321
ci/jobs/scripts/check_style/check_cpp.sh
Executable file
@ -0,0 +1,321 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
# For code formatting we have clang-format.
|
||||
#
|
||||
# But it's not sane to apply clang-format for whole code base,
|
||||
# because it sometimes makes worse for properly formatted files.
|
||||
#
|
||||
# It's only reasonable to blindly apply clang-format only in cases
|
||||
# when the code is likely to be out of style.
|
||||
#
|
||||
# For this purpose we have a script that will use very primitive heuristics
|
||||
# (simple regexps) to check if the code is likely to have basic style violations.
|
||||
# and then to run formatter only for the specified files.
|
||||
|
||||
LC_ALL="en_US.UTF-8"
|
||||
ROOT_PATH="."
|
||||
EXCLUDE='build/|integration/|widechar_width/|glibc-compatibility/|poco/|memcpy/|consistent-hashing|benchmark|tests/.*.cpp|utils/keeper-bench/example.yaml'
|
||||
EXCLUDE_DOCS='Settings\.cpp|FormatFactorySettingsDeclaration\.h'
|
||||
|
||||
# From [1]:
|
||||
# But since array_to_string_internal() in array.c still loops over array
|
||||
# elements and concatenates them into a string, it's probably not more
|
||||
# efficient than the looping solutions proposed, but it's more readable.
|
||||
#
|
||||
# [1]: https://stackoverflow.com/a/15394738/328260
|
||||
function in_array()
|
||||
{
|
||||
local IFS="|"
|
||||
local value=$1 && shift
|
||||
|
||||
[[ "${IFS}${*}${IFS}" =~ "${IFS}${value}${IFS}" ]]
|
||||
}
|
||||
|
||||
find $ROOT_PATH/{src,base,programs,utils} -name '*.h' -or -name '*.cpp' 2>/dev/null |
|
||||
grep -vP $EXCLUDE |
|
||||
grep -vP $EXCLUDE_DOCS |
|
||||
xargs grep $@ -P '((class|struct|namespace|enum|if|for|while|else|throw|switch).*|\)(\s*const)?(\s*override)?\s*)\{$|\s$|^ {1,3}[^\* ]\S|\t|^\s*(if|else if|if constexpr|else if constexpr|for|while|catch|switch)\(|\( [^\s\\]|\S \)' |
|
||||
# a curly brace not in a new line, but not for the case of C++11 init or agg. initialization | trailing whitespace | number of ws not a multiple of 4, but not in the case of comment continuation | missing whitespace after for/if/while... before opening brace | whitespaces inside braces
|
||||
grep -v -P '(//|:\s+\*|\$\(\()| \)"'
|
||||
# single-line comment | continuation of a multiline comment | a typical piece of embedded shell code | something like ending of raw string literal
|
||||
|
||||
# Tabs
|
||||
find $ROOT_PATH/{src,base,programs,utils} -name '*.h' -or -name '*.cpp' 2>/dev/null |
|
||||
grep -vP $EXCLUDE |
|
||||
xargs grep $@ -F $'\t' && echo '^ tabs are not allowed'
|
||||
|
||||
# // namespace comments are unneeded
|
||||
find $ROOT_PATH/{src,base,programs,utils} -name '*.h' -or -name '*.cpp' 2>/dev/null |
|
||||
grep -vP $EXCLUDE |
|
||||
xargs grep $@ -P '}\s*//+\s*namespace\s*'
|
||||
|
||||
# Broken symlinks
|
||||
find -L $ROOT_PATH -type l 2>/dev/null | grep -v contrib && echo "^ Broken symlinks found"
|
||||
|
||||
# Duplicated or incorrect setting declarations
|
||||
bash $ROOT_PATH/utils/check-style/check-settings-style
|
||||
|
||||
# Unused/Undefined/Duplicates ErrorCodes/ProfileEvents/CurrentMetrics
|
||||
declare -A EXTERN_TYPES
|
||||
EXTERN_TYPES[ErrorCodes]=int
|
||||
EXTERN_TYPES[ProfileEvents]=Event
|
||||
EXTERN_TYPES[CurrentMetrics]=Metric
|
||||
|
||||
EXTERN_TYPES_EXCLUDES=(
|
||||
ProfileEvents::global_counters
|
||||
ProfileEvents::Event
|
||||
ProfileEvents::Count
|
||||
ProfileEvents::Counters
|
||||
ProfileEvents::end
|
||||
ProfileEvents::increment
|
||||
ProfileEvents::incrementForLogMessage
|
||||
ProfileEvents::getName
|
||||
ProfileEvents::Timer
|
||||
ProfileEvents::Type
|
||||
ProfileEvents::TypeEnum
|
||||
ProfileEvents::ValueType
|
||||
ProfileEvents::dumpToMapColumn
|
||||
ProfileEvents::getProfileEvents
|
||||
ProfileEvents::ThreadIdToCountersSnapshot
|
||||
ProfileEvents::LOCAL_NAME
|
||||
ProfileEvents::keeper_profile_events
|
||||
ProfileEvents::CountersIncrement
|
||||
ProfileEvents::size
|
||||
|
||||
CurrentMetrics::add
|
||||
CurrentMetrics::sub
|
||||
CurrentMetrics::get
|
||||
CurrentMetrics::set
|
||||
CurrentMetrics::end
|
||||
CurrentMetrics::Increment
|
||||
CurrentMetrics::Metric
|
||||
CurrentMetrics::values
|
||||
CurrentMetrics::Value
|
||||
CurrentMetrics::keeper_metrics
|
||||
CurrentMetrics::size
|
||||
|
||||
ErrorCodes::ErrorCode
|
||||
ErrorCodes::getName
|
||||
ErrorCodes::increment
|
||||
ErrorCodes::end
|
||||
ErrorCodes::values
|
||||
ErrorCodes::values[i]
|
||||
ErrorCodes::getErrorCodeByName
|
||||
ErrorCodes::Value
|
||||
)
|
||||
for extern_type in ${!EXTERN_TYPES[@]}; do
|
||||
type_of_extern=${EXTERN_TYPES[$extern_type]}
|
||||
allowed_chars='[_A-Za-z]+'
|
||||
|
||||
# Unused
|
||||
# NOTE: to fix automatically, replace echo with:
|
||||
# sed -i "/extern const $type_of_extern $val/d" $file
|
||||
find $ROOT_PATH/{src,base,programs,utils} -name '*.h' -or -name '*.cpp' | {
|
||||
# NOTE: the check is pretty dumb and distinguish only by the type_of_extern,
|
||||
# and this matches with zkutil::CreateMode
|
||||
grep -v -e 'src/Common/ZooKeeper/Types.h' -e 'src/Coordination/KeeperConstants.cpp'
|
||||
} | {
|
||||
grep -vP $EXCLUDE | xargs grep -l -P "extern const $type_of_extern $allowed_chars"
|
||||
} | while read file; do
|
||||
grep -P "extern const $type_of_extern $allowed_chars;" $file | sed -r -e "s/^.*?extern const $type_of_extern ($allowed_chars);.*?$/\1/" | while read val; do
|
||||
if ! grep -q "$extern_type::$val" $file; then
|
||||
# Excludes for SOFTWARE_EVENT/HARDWARE_EVENT/CACHE_EVENT in ThreadProfileEvents.cpp
|
||||
if [[ ! $extern_type::$val =~ ProfileEvents::Perf.* ]]; then
|
||||
echo "$extern_type::$val is defined but not used in file $file"
|
||||
fi
|
||||
fi
|
||||
done
|
||||
done
|
||||
|
||||
# Undefined
|
||||
# NOTE: to fix automatically, replace echo with:
|
||||
# ( grep -q -F 'namespace $extern_type' $file && \
|
||||
# sed -i -r "0,/(\s*)extern const $type_of_extern [$allowed_chars]+/s//\1extern const $type_of_extern $val;\n&/" $file || \
|
||||
# awk '{ print; if (ns == 1) { ns = 2 }; if (ns == 2) { ns = 0; print "namespace $extern_type\n{\n extern const $type_of_extern '$val';\n}" } }; /namespace DB/ { ns = 1; };' < $file > ${file}.tmp && mv ${file}.tmp $file )
|
||||
find $ROOT_PATH/{src,base,programs,utils} -name '*.h' -or -name '*.cpp' | {
|
||||
grep -vP $EXCLUDE | xargs grep -l -P "$extern_type::$allowed_chars"
|
||||
} | while read file; do
|
||||
grep -P "$extern_type::$allowed_chars" $file | grep -P -v '^\s*//' | sed -r -e "s/^.*?$extern_type::($allowed_chars).*?$/\1/" | while read val; do
|
||||
if ! grep -q "extern const $type_of_extern $val" $file; then
|
||||
if ! in_array "$extern_type::$val" "${EXTERN_TYPES_EXCLUDES[@]}"; then
|
||||
echo "$extern_type::$val is used in file $file but not defined"
|
||||
fi
|
||||
fi
|
||||
done
|
||||
done
|
||||
|
||||
# Duplicates
|
||||
find $ROOT_PATH/{src,base,programs,utils} -name '*.h' -or -name '*.cpp' | {
|
||||
grep -vP $EXCLUDE | xargs grep -l -P "$extern_type::$allowed_chars"
|
||||
} | while read file; do
|
||||
grep -P "extern const $type_of_extern $allowed_chars;" $file | sort | uniq -c | grep -v -P ' +1 ' && echo "Duplicate $extern_type in file $file"
|
||||
done
|
||||
done
|
||||
|
||||
# Three or more consecutive empty lines
|
||||
find $ROOT_PATH/{src,base,programs,utils} -name '*.h' -or -name '*.cpp' |
|
||||
grep -vP $EXCLUDE |
|
||||
while read file; do awk '/^$/ { ++i; if (i > 2) { print "More than two consecutive empty lines in file '$file'" } } /./ { i = 0 }' $file; done
|
||||
|
||||
# Check that every header file has #pragma once in first line
|
||||
find $ROOT_PATH/{src,programs,utils} -name '*.h' |
|
||||
grep -vP $EXCLUDE |
|
||||
while read file; do [[ $(head -n1 $file) != '#pragma once' ]] && echo "File $file must have '#pragma once' in first line"; done
|
||||
|
||||
# Too many exclamation marks
|
||||
find $ROOT_PATH/{src,base,programs,utils} -name '*.h' -or -name '*.cpp' |
|
||||
grep -vP $EXCLUDE |
|
||||
xargs grep -F '!!!' | grep -P '.' && echo "Too many exclamation marks (looks dirty, unconfident)."
|
||||
|
||||
# Exclamation mark in a message
|
||||
find $ROOT_PATH/{src,base,programs,utils} -name '*.h' -or -name '*.cpp' |
|
||||
grep -vP $EXCLUDE |
|
||||
xargs grep -F '!",' | grep -P '.' && echo "No need for an exclamation mark (looks dirty, unconfident)."
|
||||
|
||||
# Trailing whitespaces
|
||||
find $ROOT_PATH/{src,base,programs,utils} -name '*.h' -or -name '*.cpp' |
|
||||
grep -vP $EXCLUDE |
|
||||
xargs grep -n -P ' $' | grep -n -P '.' && echo "^ Trailing whitespaces."
|
||||
|
||||
# Forbid stringstream because it's easy to use them incorrectly and hard to debug possible issues
|
||||
find $ROOT_PATH/{src,programs,utils} -name '*.h' -or -name '*.cpp' |
|
||||
grep -vP $EXCLUDE |
|
||||
xargs grep -P 'std::[io]?stringstream' | grep -v "STYLE_CHECK_ALLOW_STD_STRING_STREAM" && echo "Use WriteBufferFromOwnString or ReadBufferFromString instead of std::stringstream"
|
||||
|
||||
# Forbid std::cerr/std::cout in src (fine in programs/utils)
|
||||
std_cerr_cout_excludes=(
|
||||
/examples/
|
||||
/tests/
|
||||
_fuzzer
|
||||
# OK
|
||||
src/Common/ProgressIndication.cpp
|
||||
src/Common/ProgressTable.cpp
|
||||
# only under #ifdef DBMS_HASH_MAP_DEBUG_RESIZES, that is used only in tests
|
||||
src/Common/HashTable/HashTable.h
|
||||
# SensitiveDataMasker::printStats()
|
||||
src/Common/SensitiveDataMasker.cpp
|
||||
# StreamStatistics::print()
|
||||
src/Compression/LZ4_decompress_faster.cpp
|
||||
# ContextSharedPart with subsequent std::terminate()
|
||||
src/Interpreters/Context.cpp
|
||||
# IProcessor::dump()
|
||||
src/Processors/IProcessor.cpp
|
||||
src/Client/ClientApplicationBase.cpp
|
||||
src/Client/ClientBase.cpp
|
||||
src/Client/LineReader.cpp
|
||||
src/Client/QueryFuzzer.cpp
|
||||
src/Client/Suggest.cpp
|
||||
src/Client/ClientBase.h
|
||||
src/Client/LineReader.h
|
||||
src/Client/ReplxxLineReader.h
|
||||
src/Bridge/IBridge.cpp
|
||||
src/Daemon/BaseDaemon.cpp
|
||||
src/Loggers/Loggers.cpp
|
||||
src/Common/GWPAsan.cpp
|
||||
src/Common/ProgressIndication.h
|
||||
)
|
||||
sources_with_std_cerr_cout=( $(
|
||||
find $ROOT_PATH/{src,base} -name '*.h' -or -name '*.cpp' | \
|
||||
grep -vP $EXCLUDE | \
|
||||
grep -F -v $(printf -- "-e %s " "${std_cerr_cout_excludes[@]}") | \
|
||||
xargs grep -F --with-filename -e std::cerr -e std::cout | cut -d: -f1 | sort -u
|
||||
) )
|
||||
# Exclude comments
|
||||
for src in "${sources_with_std_cerr_cout[@]}"; do
|
||||
# suppress stderr, since it may contain warning for #pargma once in headers
|
||||
if gcc -fpreprocessed -dD -E "$src" 2>/dev/null | grep -F -q -e std::cerr -e std::cout; then
|
||||
echo "$src: uses std::cerr/std::cout"
|
||||
fi
|
||||
done
|
||||
|
||||
expect_tests=( $(find $ROOT_PATH/tests/queries -name '*.expect') )
|
||||
for test_case in "${expect_tests[@]}"; do
|
||||
pattern="^exp_internal -f \$CLICKHOUSE_TMP/\$basename.debuglog 0$"
|
||||
grep -q "$pattern" "$test_case" || echo "Missing '$pattern' in '$test_case'"
|
||||
|
||||
if grep -q "^spawn.*CLICKHOUSE_CLIENT_BINARY$" "$test_case"; then
|
||||
pattern="^spawn.*CLICKHOUSE_CLIENT_BINARY.*--history_file$"
|
||||
grep -q "$pattern" "$test_case" || echo "Missing '$pattern' in '$test_case'"
|
||||
fi
|
||||
|
||||
# Otherwise expect_after/expect_before will not bail without stdin attached
|
||||
# (and actually this is a hack anyway, correct way is to use $any_spawn_id)
|
||||
pattern="-i \$any_spawn_id timeout"
|
||||
grep -q -- "$pattern" "$test_case" || echo "Missing '$pattern' in '$test_case'"
|
||||
pattern="-i \$any_spawn_id eof"
|
||||
grep -q -- "$pattern" "$test_case" || echo "Missing '$pattern' in '$test_case'"
|
||||
done
|
||||
|
||||
# Forbid non-unique error codes
|
||||
if [[ "$(grep -Po "M\([0-9]*," $ROOT_PATH/src/Common/ErrorCodes.cpp | wc -l)" != "$(grep -Po "M\([0-9]*," $ROOT_PATH/src/Common/ErrorCodes.cpp | sort | uniq | wc -l)" ]]
|
||||
then
|
||||
echo "ErrorCodes.cpp contains non-unique error codes"
|
||||
fi
|
||||
|
||||
# Check that there is no system-wide libraries/headers in use.
|
||||
#
|
||||
# NOTE: it is better to override find_path/find_library in cmake, but right now
|
||||
# it is not possible, see [1] for the reference.
|
||||
#
|
||||
# [1]: git grep --recurse-submodules -e find_library -e find_path contrib
|
||||
if git grep -e find_path -e find_library -- :**CMakeLists.txt; then
|
||||
echo "There is find_path/find_library usage. ClickHouse should use everything bundled. Consider adding one more contrib module."
|
||||
fi
|
||||
|
||||
# Forbid std::filesystem::is_symlink and std::filesystem::read_symlink, because it's easy to use them incorrectly
|
||||
find $ROOT_PATH/{src,programs,utils} -name '*.h' -or -name '*.cpp' |
|
||||
grep -vP $EXCLUDE |
|
||||
xargs grep -P '::(is|read)_symlink' | grep -v "STYLE_CHECK_ALLOW_STD_FS_SYMLINK" && echo "Use DB::FS::isSymlink and DB::FS::readSymlink instead"
|
||||
|
||||
# Forbid __builtin_unreachable(), because it's hard to debug when it becomes reachable
|
||||
find $ROOT_PATH/{src,programs,utils} -name '*.h' -or -name '*.cpp' |
|
||||
grep -vP $EXCLUDE |
|
||||
xargs grep -P '__builtin_unreachable' && echo "Use UNREACHABLE() from defines.h instead"
|
||||
|
||||
# Forbid mt19937() and random_device() which are outdated and slow
|
||||
find $ROOT_PATH/{src,programs,utils} -name '*.h' -or -name '*.cpp' |
|
||||
grep -vP $EXCLUDE |
|
||||
xargs grep -P '(std::mt19937|std::mersenne_twister_engine|std::random_device)' && echo "Use pcg64_fast (from pcg_random.h) and randomSeed (from Common/randomSeed.h) instead"
|
||||
|
||||
# Require checking return value of close(),
|
||||
# since it can hide fd misuse and break other places.
|
||||
find $ROOT_PATH/{src,programs,utils} -name '*.h' -or -name '*.cpp' |
|
||||
grep -vP $EXCLUDE |
|
||||
xargs grep -e ' close(.*fd' -e ' ::close(' | grep -v = && echo "Return value of close() should be checked"
|
||||
|
||||
# A small typo can lead to debug code in release builds, see https://github.com/ClickHouse/ClickHouse/pull/47647
|
||||
find $ROOT_PATH/{src,programs,utils} -name '*.h' -or -name '*.cpp' | xargs grep -l -F '#ifdef NDEBUG' | xargs -I@FILE awk '/#ifdef NDEBUG/ { inside = 1; dirty = 1 } /#endif/ { if (inside && dirty) { print "File @FILE has suspicious #ifdef NDEBUG, possibly confused with #ifndef NDEBUG" }; inside = 0 } /#else/ { dirty = 0 }' @FILE
|
||||
|
||||
# If a user is doing dynamic or typeid cast with a pointer, and immediately dereferencing it, it is unsafe.
|
||||
find $ROOT_PATH/{src,programs,utils} -name '*.h' -or -name '*.cpp' | xargs grep --line-number -P '(dynamic|typeid)_cast<[^>]+\*>\([^\(\)]+\)->' | grep -P '.' && echo "It's suspicious when you are doing a dynamic_cast or typeid_cast with a pointer and immediately dereferencing it. Use references instead of pointers or check a pointer to nullptr."
|
||||
|
||||
# Check for bad punctuation: whitespace before comma.
|
||||
find $ROOT_PATH/{src,programs,utils} -name '*.h' -or -name '*.cpp' | xargs grep -P --line-number '\w ,' | grep -v 'bad punctuation is ok here' && echo "^ There is bad punctuation: whitespace before comma. You should write it like this: 'Hello, world!'"
|
||||
|
||||
# Check usage of std::regex which is too bloated and slow.
|
||||
find $ROOT_PATH/{src,programs,utils} -name '*.h' -or -name '*.cpp' | xargs grep -P --line-number 'std::regex' | grep -P '.' && echo "^ Please use re2 instead of std::regex"
|
||||
|
||||
# Cyrillic characters hiding inside Latin.
|
||||
find $ROOT_PATH/{src,programs,utils} -name '*.h' -or -name '*.cpp' | grep -v StorageSystemContributors.generated.cpp | xargs grep -P --line-number '[a-zA-Z][а-яА-ЯёЁ]|[а-яА-ЯёЁ][a-zA-Z]' && echo "^ Cyrillic characters found in unexpected place."
|
||||
|
||||
# Orphaned header files.
|
||||
join -v1 <(find $ROOT_PATH/{src,programs,utils} -name '*.h' -printf '%f\n' | sort | uniq) <(find $ROOT_PATH/{src,programs,utils} -name '*.cpp' -or -name '*.c' -or -name '*.h' -or -name '*.S' | xargs grep --no-filename -o -P '[\w-]+\.h' | sort | uniq) |
|
||||
grep . && echo '^ Found orphan header files.'
|
||||
|
||||
# Don't allow dynamic compiler check with CMake, because we are using hermetic, reproducible, cross-compiled, static (TLDR, good) builds.
|
||||
ls -1d $ROOT_PATH/contrib/*-cmake | xargs -I@ find @ -name 'CMakeLists.txt' -or -name '*.cmake' | xargs grep --with-filename -i -P 'check_c_compiler_flag|check_cxx_compiler_flag|check_c_source_compiles|check_cxx_source_compiles|check_include_file|check_symbol_exists|cmake_push_check_state|cmake_pop_check_state|find_package|CMAKE_REQUIRED_FLAGS|CheckIncludeFile|CheckCCompilerFlag|CheckCXXCompilerFlag|CheckCSourceCompiles|CheckCXXSourceCompiles|CheckCSymbolExists|CheckCXXSymbolExists' | grep -v Rust && echo "^ It's not allowed to have dynamic compiler checks with CMake."
|
||||
|
||||
# Wrong spelling of abbreviations, e.g. SQL is right, Sql is wrong. XMLHttpRequest is very wrong.
|
||||
find $ROOT_PATH/{src,base,programs,utils} -name '*.h' -or -name '*.cpp' |
|
||||
grep -vP $EXCLUDE |
|
||||
xargs grep -P 'Sql|Html|Xml|Cpu|Tcp|Udp|Http|Db|Json|Yaml' | grep -v -P 'RabbitMQ|Azure|Aws|aws|Avro|IO/S3' &&
|
||||
echo "Abbreviations such as SQL, XML, HTTP, should be in all caps. For example, SQL is right, Sql is wrong. XMLHttpRequest is very wrong."
|
||||
|
||||
find $ROOT_PATH/{src,base,programs,utils} -name '*.h' -or -name '*.cpp' |
|
||||
grep -vP $EXCLUDE |
|
||||
xargs grep -F -i 'ErrorCodes::LOGICAL_ERROR, "Logical error:' &&
|
||||
echo "If an exception has LOGICAL_ERROR code, there is no need to include the text 'Logical error' in the exception message, because then the phrase 'Logical error' will be printed twice."
|
||||
|
||||
PATTERN="allow_";
|
||||
DIFF=$(comm -3 <(grep -o "\b$PATTERN\w*\b" $ROOT_PATH/src/Core/Settings.cpp | sort -u) <(grep -o -h "\b$PATTERN\w*\b" $ROOT_PATH/src/Databases/enableAllExperimentalSettings.cpp $ROOT_PATH/utils/check-style/experimental_settings_ignore.txt | sort -u));
|
||||
[ -n "$DIFF" ] && echo "$DIFF" && echo "^^ Detected 'allow_*' settings that might need to be included in src/Databases/enableAllExperimentalSettings.cpp" && echo "Alternatively, consider adding an exception to utils/check-style/experimental_settings_ignore.txt"
|
37
ci/jobs/scripts/check_style/check_submodules.sh
Executable file
37
ci/jobs/scripts/check_style/check_submodules.sh
Executable file
@ -0,0 +1,37 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
# The script checks if all submodules defined in $GIT_ROOT/.gitmodules exist in $GIT_ROOT/contrib
|
||||
|
||||
set -e
|
||||
|
||||
GIT_ROOT="."
|
||||
|
||||
cd "$GIT_ROOT"
|
||||
|
||||
# Remove keys for submodule.*.path parameters, the values are separated by \0
|
||||
# and check if the directory exists
|
||||
git config --file .gitmodules --null --get-regexp path | sed -z 's|.*\n||' | \
|
||||
xargs -P100 -0 --no-run-if-empty -I{} bash -c 'if ! test -d '"'{}'"'; then echo Directory for submodule {} is not found; exit 1; fi' 2>&1
|
||||
|
||||
|
||||
# And check that the submodule is fine
|
||||
git config --file .gitmodules --null --get-regexp path | sed -z 's|.*\n||' | \
|
||||
xargs -P100 -0 --no-run-if-empty -I{} git submodule status -q '{}' 2>&1
|
||||
|
||||
|
||||
# All submodules should be from https://github.com/
|
||||
git config --file "$ROOT_PATH/.gitmodules" --get-regexp 'submodule\..+\.url' | \
|
||||
while read -r line; do
|
||||
name=${line#submodule.}; name=${name%.url*}
|
||||
url=${line#* }
|
||||
[[ "$url" != 'https://github.com/'* ]] && echo "All submodules should be from https://github.com/, submodule '$name' has '$url'"
|
||||
done
|
||||
|
||||
# All submodules should be of this form: [submodule "contrib/libxyz"] (for consistency, the submodule name does matter too much)
|
||||
# - restrict the check to top-level .gitmodules file
|
||||
git config --file "$ROOT_PATH/.gitmodules" --get-regexp 'submodule\..+\.path' | \
|
||||
while read -r line; do
|
||||
name=${line#submodule.}; name=${name%.path*}
|
||||
path=${line#* }
|
||||
[ "$name" != "$path" ] && echo "Submodule name '$name' is not equal to it's path '$path'"
|
||||
done
|
15
ci/jobs/scripts/check_style/check_typos.sh
Executable file
15
ci/jobs/scripts/check_style/check_typos.sh
Executable file
@ -0,0 +1,15 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
# Check for typos in code.
|
||||
|
||||
ROOT_PATH="."
|
||||
|
||||
#FIXME: check all (or almost all) repo
|
||||
codespell \
|
||||
--skip "*generated*,*gperf*,*.bin,*.mrk*,*.idx,checksums.txt,*.dat,*.pyc,*.kate-swp,*obfuscateQueries.cpp,d3-*.js,*.min.js,*.sum,${ROOT_PATH}/utils/check-style/aspell-ignore" \
|
||||
--ignore-words "${ROOT_PATH}/utils/check-style/codespell-ignore-words.list" \
|
||||
--exclude-file "${ROOT_PATH}/utils/check-style/codespell-ignore-lines.list" \
|
||||
--quiet-level 2 \
|
||||
"$ROOT_PATH"/{src,base,programs,utils} \
|
||||
$@ | grep -P '.' \
|
||||
&& echo -e "\nFound some typos in code.\nSee the files utils/check-style/codespell* if you want to add an exception."
|
98
ci/jobs/scripts/check_style/checks_to_refactor.sh
Executable file
98
ci/jobs/scripts/check_style/checks_to_refactor.sh
Executable file
@ -0,0 +1,98 @@
|
||||
#!/bin/bash
|
||||
|
||||
ROOT_PATH="."
|
||||
|
||||
# Queries to system.query_log/system.query_thread_log should have current_database = currentDatabase() condition
|
||||
# NOTE: it is not that accurate, but at least something.
|
||||
tests_with_query_log=( $(
|
||||
find $ROOT_PATH/tests/queries -iname '*.sql' -or -iname '*.sh' -or -iname '*.py' -or -iname '*.j2' |
|
||||
xargs grep --with-filename -e system.query_log -e system.query_thread_log | cut -d: -f1 | sort -u
|
||||
) )
|
||||
for test_case in "${tests_with_query_log[@]}"; do
|
||||
grep -qE current_database.*currentDatabase "$test_case" || {
|
||||
grep -qE 'current_database.*\$CLICKHOUSE_DATABASE' "$test_case"
|
||||
} || echo "Queries to system.query_log/system.query_thread_log does not have current_database = currentDatabase() condition in $test_case"
|
||||
done
|
||||
|
||||
grep -iE 'SYSTEM STOP MERGES;?$' -R $ROOT_PATH/tests/queries && echo "Merges cannot be disabled globally in fast/stateful/stateless tests, because it will break concurrently running queries"
|
||||
|
||||
|
||||
# Queries to:
|
||||
tables_with_database_column=(
|
||||
system.tables
|
||||
system.parts
|
||||
system.detached_parts
|
||||
system.parts_columns
|
||||
system.columns
|
||||
system.projection_parts
|
||||
system.mutations
|
||||
)
|
||||
# should have database = currentDatabase() condition
|
||||
#
|
||||
# NOTE: it is not that accuate, but at least something.
|
||||
tests_with_database_column=( $(
|
||||
find $ROOT_PATH/tests/queries -iname '*.sql' -or -iname '*.sh' -or -iname '*.py' -or -iname '*.j2' |
|
||||
xargs grep --with-filename $(printf -- "-e %s " "${tables_with_database_column[@]}") |
|
||||
grep -v -e ':--' -e ':#' |
|
||||
cut -d: -f1 | sort -u
|
||||
) )
|
||||
for test_case in "${tests_with_database_column[@]}"; do
|
||||
grep -qE database.*currentDatabase "$test_case" || {
|
||||
grep -qE 'database.*\$CLICKHOUSE_DATABASE' "$test_case"
|
||||
} || {
|
||||
# explicit database
|
||||
grep -qE "database[ ]*=[ ]*'" "$test_case"
|
||||
} || {
|
||||
echo "Queries to ${tables_with_database_column[*]} does not have database = currentDatabase()/\$CLICKHOUSE_DATABASE condition in $test_case"
|
||||
}
|
||||
done
|
||||
|
||||
# Queries with ReplicatedMergeTree
|
||||
# NOTE: it is not that accuate, but at least something.
|
||||
tests_with_replicated_merge_tree=( $(
|
||||
find $ROOT_PATH/tests/queries -iname '*.sql' -or -iname '*.sh' -or -iname '*.py' -or -iname '*.j2' |
|
||||
xargs grep --with-filename -e "Replicated.*MergeTree[ ]*(.*" | cut -d: -f1 | sort -u
|
||||
) )
|
||||
for test_case in "${tests_with_replicated_merge_tree[@]}"; do
|
||||
case "$test_case" in
|
||||
*.gen.*)
|
||||
;;
|
||||
*.sh)
|
||||
test_case_zk_prefix="\(\$CLICKHOUSE_TEST_ZOOKEEPER_PREFIX\|{database}\)"
|
||||
grep -q -e "Replicated.*MergeTree[ ]*(.*$test_case_zk_prefix" "$test_case" || echo "Replicated.*MergeTree should contain '$test_case_zk_prefix' in zookeeper path to avoid overlaps ($test_case)"
|
||||
;;
|
||||
*.sql|*.sql.j2)
|
||||
test_case_zk_prefix="\({database}\|currentDatabase()\|{uuid}\|{default_path_test}\)"
|
||||
grep -q -e "Replicated.*MergeTree[ ]*(.*$test_case_zk_prefix" "$test_case" || echo "Replicated.*MergeTree should contain '$test_case_zk_prefix' in zookeeper path to avoid overlaps ($test_case)"
|
||||
;;
|
||||
*.py)
|
||||
# Right now there is not such tests anyway
|
||||
echo "No ReplicatedMergeTree style check for *.py ($test_case)"
|
||||
;;
|
||||
esac
|
||||
done
|
||||
|
||||
# The stateful directory should only contain the tests that depend on the test dataset (hits or visits).
|
||||
find $ROOT_PATH/tests/queries/1_stateful -name '*.sql' -or -name '*.sh' | grep -v '00076_system_columns_bytes' | xargs -I{} bash -c 'grep -q -P "hits|visits" "{}" || echo "The test {} does not depend on the test dataset (hits or visits table) and should be located in the 0_stateless directory. You can also add an exception to the check-style script."'
|
||||
|
||||
# Check for existence of __init__.py files
|
||||
for i in "${ROOT_PATH}"/tests/integration/test_*; do FILE="${i}/__init__.py"; [ ! -f "${FILE}" ] && echo "${FILE} should exist for every integration test"; done
|
||||
|
||||
# Check for executable bit on non-executable files
|
||||
find $ROOT_PATH/{src,base,programs,utils,tests,docs,cmake} '(' -name '*.cpp' -or -name '*.h' -or -name '*.sql' -or -name '*.j2' -or -name '*.xml' -or -name '*.reference' -or -name '*.txt' -or -name '*.md' ')' -and -executable | grep -P '.' && echo "These files should not be executable."
|
||||
|
||||
# Check for BOM
|
||||
find $ROOT_PATH/{src,base,programs,utils,tests,docs,cmake} -name '*.md' -or -name '*.cpp' -or -name '*.h' | xargs grep -l -F $'\xEF\xBB\xBF' | grep -P '.' && echo "Files should not have UTF-8 BOM"
|
||||
find $ROOT_PATH/{src,base,programs,utils,tests,docs,cmake} -name '*.md' -or -name '*.cpp' -or -name '*.h' | xargs grep -l -F $'\xFF\xFE' | grep -P '.' && echo "Files should not have UTF-16LE BOM"
|
||||
find $ROOT_PATH/{src,base,programs,utils,tests,docs,cmake} -name '*.md' -or -name '*.cpp' -or -name '*.h' | xargs grep -l -F $'\xFE\xFF' | grep -P '.' && echo "Files should not have UTF-16BE BOM"
|
||||
|
||||
# Conflict markers
|
||||
find $ROOT_PATH/{src,base,programs,utils,tests,docs,cmake} -name '*.md' -or -name '*.cpp' -or -name '*.h' |
|
||||
xargs grep -P '^(<<<<<<<|=======|>>>>>>>)$' | grep -P '.' && echo "Conflict markers are found in files"
|
||||
|
||||
# DOS/Windows newlines
|
||||
find $ROOT_PATH/{base,src,programs,utils,docs} -name '*.md' -or -name '*.h' -or -name '*.cpp' -or -name '*.js' -or -name '*.py' -or -name '*.html' | xargs grep -l -P '\r$' && echo "^ Files contain DOS/Windows newlines (\r\n instead of \n)."
|
||||
|
||||
# # workflows check
|
||||
# act --list --directory="$ROOT_PATH" 1>/dev/null 2>&1 || act --list --directory="$ROOT_PATH" 2>&1
|
||||
# actionlint -ignore 'reusable workflow call.+' || :
|
37
ci/jobs/scripts/check_style/double_whitespaces.pl
Executable file
37
ci/jobs/scripts/check_style/double_whitespaces.pl
Executable file
@ -0,0 +1,37 @@
|
||||
#!/usr/bin/perl
|
||||
|
||||
use strict;
|
||||
|
||||
# Find double whitespace such as "a, b, c" that looks very ugly and annoying.
|
||||
# But skip double whitespaces if they are used as an alignment - by comparing to surrounding lines.
|
||||
|
||||
my $ret = 0;
|
||||
|
||||
foreach my $file (@ARGV)
|
||||
{
|
||||
my @array;
|
||||
|
||||
open (FH,'<',$file);
|
||||
while (<FH>)
|
||||
{
|
||||
push @array, $_;
|
||||
}
|
||||
|
||||
for (my $i = 1; $i < $#array; ++$i)
|
||||
{
|
||||
if ($array[$i] =~ ',( {2,3})[^ /]')
|
||||
{
|
||||
# https://stackoverflow.com/questions/87380/how-can-i-find-the-location-of-a-regex-match-in-perl
|
||||
|
||||
if ((substr($array[$i - 1], $+[1] - 1, 2) !~ /^[ -][^ ]$/) # whitespaces are not part of alignment
|
||||
&& (substr($array[$i + 1], $+[1] - 1, 2) !~ /^[ -][^ ]$/)
|
||||
&& $array[$i] !~ /(-?\d+\w*,\s+){3,}/) # this is not a number table like { 10, -1, 2 }
|
||||
{
|
||||
print($file . ":" . ($i + 1) . $array[$i]);
|
||||
$ret = 1;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
exit $ret;
|
284
ci/jobs/scripts/functional_tests_results.py
Executable file
284
ci/jobs/scripts/functional_tests_results.py
Executable file
@ -0,0 +1,284 @@
|
||||
import dataclasses
|
||||
from typing import List
|
||||
|
||||
from praktika.environment import Environment
|
||||
from praktika.result import Result
|
||||
|
||||
OK_SIGN = "[ OK "
|
||||
FAIL_SIGN = "[ FAIL "
|
||||
TIMEOUT_SIGN = "[ Timeout! "
|
||||
UNKNOWN_SIGN = "[ UNKNOWN "
|
||||
SKIPPED_SIGN = "[ SKIPPED "
|
||||
HUNG_SIGN = "Found hung queries in processlist"
|
||||
SERVER_DIED_SIGN = "Server died, terminating all processes"
|
||||
SERVER_DIED_SIGN2 = "Server does not respond to health check"
|
||||
DATABASE_SIGN = "Database: "
|
||||
|
||||
SUCCESS_FINISH_SIGNS = ["All tests have finished", "No tests were run"]
|
||||
|
||||
RETRIES_SIGN = "Some tests were restarted"
|
||||
|
||||
|
||||
# def write_results(results_file, status_file, results, status):
|
||||
# with open(results_file, "w", encoding="utf-8") as f:
|
||||
# out = csv.writer(f, delimiter="\t")
|
||||
# out.writerows(results)
|
||||
# with open(status_file, "w", encoding="utf-8") as f:
|
||||
# out = csv.writer(f, delimiter="\t")
|
||||
# out.writerow(status)
|
||||
|
||||
BROKEN_TESTS_ANALYZER_TECH_DEBT = [
|
||||
"01624_soft_constraints",
|
||||
# Check after ConstantNode refactoring
|
||||
"02944_variant_as_common_type",
|
||||
]
|
||||
|
||||
|
||||
class FTResultsProcessor:
|
||||
@dataclasses.dataclass
|
||||
class Summary:
|
||||
total: int
|
||||
skipped: int
|
||||
unknown: int
|
||||
failed: int
|
||||
success: int
|
||||
test_results: List[Result]
|
||||
hung: bool = False
|
||||
server_died: bool = False
|
||||
retries: bool = False
|
||||
success_finish: bool = False
|
||||
test_end: bool = True
|
||||
|
||||
def __init__(self, wd):
|
||||
self.tests_output_file = f"{wd}/test_result.txt"
|
||||
# self.test_results_parsed_file = f"{wd}/test_result.tsv"
|
||||
# self.status_file = f"{wd}/check_status.tsv"
|
||||
self.broken_tests = BROKEN_TESTS_ANALYZER_TECH_DEBT
|
||||
|
||||
def _process_test_output(self):
|
||||
total = 0
|
||||
skipped = 0
|
||||
unknown = 0
|
||||
failed = 0
|
||||
success = 0
|
||||
hung = False
|
||||
server_died = False
|
||||
retries = False
|
||||
success_finish = False
|
||||
test_results = []
|
||||
test_end = True
|
||||
|
||||
with open(self.tests_output_file, "r", encoding="utf-8") as test_file:
|
||||
for line in test_file:
|
||||
original_line = line
|
||||
line = line.strip()
|
||||
|
||||
if any(s in line for s in SUCCESS_FINISH_SIGNS):
|
||||
success_finish = True
|
||||
# Ignore hung check report, since it may be quite large.
|
||||
# (and may break python parser which has limit of 128KiB for each row).
|
||||
if HUNG_SIGN in line:
|
||||
hung = True
|
||||
break
|
||||
if SERVER_DIED_SIGN in line or SERVER_DIED_SIGN2 in line:
|
||||
server_died = True
|
||||
if RETRIES_SIGN in line:
|
||||
retries = True
|
||||
if any(
|
||||
sign in line
|
||||
for sign in (OK_SIGN, FAIL_SIGN, UNKNOWN_SIGN, SKIPPED_SIGN)
|
||||
):
|
||||
test_name = line.split(" ")[2].split(":")[0]
|
||||
|
||||
test_time = ""
|
||||
try:
|
||||
time_token = line.split("]")[1].strip().split()[0]
|
||||
float(time_token)
|
||||
test_time = time_token
|
||||
except:
|
||||
pass
|
||||
|
||||
total += 1
|
||||
if TIMEOUT_SIGN in line:
|
||||
if test_name in self.broken_tests:
|
||||
success += 1
|
||||
test_results.append((test_name, "BROKEN", test_time, []))
|
||||
else:
|
||||
failed += 1
|
||||
test_results.append((test_name, "Timeout", test_time, []))
|
||||
elif FAIL_SIGN in line:
|
||||
if test_name in self.broken_tests:
|
||||
success += 1
|
||||
test_results.append((test_name, "BROKEN", test_time, []))
|
||||
else:
|
||||
failed += 1
|
||||
test_results.append((test_name, "FAIL", test_time, []))
|
||||
elif UNKNOWN_SIGN in line:
|
||||
unknown += 1
|
||||
test_results.append((test_name, "FAIL", test_time, []))
|
||||
elif SKIPPED_SIGN in line:
|
||||
skipped += 1
|
||||
test_results.append((test_name, "SKIPPED", test_time, []))
|
||||
else:
|
||||
if OK_SIGN in line and test_name in self.broken_tests:
|
||||
skipped += 1
|
||||
test_results.append(
|
||||
(
|
||||
test_name,
|
||||
"NOT_FAILED",
|
||||
test_time,
|
||||
[
|
||||
"This test passed. Update analyzer_tech_debt.txt.\n"
|
||||
],
|
||||
)
|
||||
)
|
||||
else:
|
||||
success += int(OK_SIGN in line)
|
||||
test_results.append((test_name, "OK", test_time, []))
|
||||
test_end = False
|
||||
elif (
|
||||
len(test_results) > 0
|
||||
and test_results[-1][1] == "FAIL"
|
||||
and not test_end
|
||||
):
|
||||
test_results[-1][3].append(original_line)
|
||||
# Database printed after everything else in case of failures,
|
||||
# so this is a stop marker for capturing test output.
|
||||
#
|
||||
# And it is handled after everything else to include line with database into the report.
|
||||
if DATABASE_SIGN in line:
|
||||
test_end = True
|
||||
|
||||
test_results = [
|
||||
Result(
|
||||
name=test[0],
|
||||
status=test[1],
|
||||
start_time=None,
|
||||
duration=float(test[2]),
|
||||
info="".join(test[3])[:8192],
|
||||
)
|
||||
for test in test_results
|
||||
]
|
||||
|
||||
s = self.Summary(
|
||||
total=total,
|
||||
skipped=skipped,
|
||||
unknown=unknown,
|
||||
failed=failed,
|
||||
success=success,
|
||||
test_results=test_results,
|
||||
hung=hung,
|
||||
server_died=server_died,
|
||||
success_finish=success_finish,
|
||||
retries=retries,
|
||||
)
|
||||
|
||||
return s
|
||||
|
||||
def run(self):
|
||||
state = Result.Status.SUCCESS
|
||||
s = self._process_test_output()
|
||||
test_results = s.test_results
|
||||
|
||||
# # Check test_results.tsv for sanitizer asserts, crashes and other critical errors.
|
||||
# # If the file is present, it's expected to be generated by stress_test.lib check for critical errors
|
||||
# # In the end this file will be fully regenerated, including both results from critical errors check and
|
||||
# # functional test results.
|
||||
# if test_results_path and os.path.exists(test_results_path):
|
||||
# with open(test_results_path, "r", encoding="utf-8") as test_results_file:
|
||||
# existing_test_results = list(
|
||||
# csv.reader(test_results_file, delimiter="\t")
|
||||
# )
|
||||
# for test in existing_test_results:
|
||||
# if len(test) < 2:
|
||||
# unknown += 1
|
||||
# else:
|
||||
# test_results.append(test)
|
||||
#
|
||||
# if test[1] != "OK":
|
||||
# failed += 1
|
||||
# else:
|
||||
# success += 1
|
||||
|
||||
# is_flaky_check = 1 < int(os.environ.get("NUM_TRIES", 1))
|
||||
# logging.info("Is flaky check: %s", is_flaky_check)
|
||||
# # If no tests were run (success == 0) it indicates an error (e.g. server did not start or crashed immediately)
|
||||
# # But it's Ok for "flaky checks" - they can contain just one test for check which is marked as skipped.
|
||||
# if failed != 0 or unknown != 0 or (success == 0 and (not is_flaky_check)):
|
||||
if s.failed != 0 or s.unknown != 0:
|
||||
state = Result.Status.FAILED
|
||||
|
||||
if s.hung:
|
||||
state = Result.Status.FAILED
|
||||
test_results.append(
|
||||
Result("Some queries hung", "FAIL", info="Some queries hung")
|
||||
)
|
||||
elif s.server_died:
|
||||
state = Result.Status.FAILED
|
||||
# When ClickHouse server crashes, some tests are still running
|
||||
# and fail because they cannot connect to server
|
||||
for result in test_results:
|
||||
if result.status == "FAIL":
|
||||
result.status = "SERVER_DIED"
|
||||
test_results.append(Result("Server died", "FAIL", info="Server died"))
|
||||
elif not s.success_finish:
|
||||
state = Result.Status.FAILED
|
||||
test_results.append(
|
||||
Result("Tests are not finished", "FAIL", info="Tests are not finished")
|
||||
)
|
||||
elif s.retries:
|
||||
test_results.append(
|
||||
Result("Some tests restarted", "SKIPPED", info="Some tests restarted")
|
||||
)
|
||||
else:
|
||||
pass
|
||||
|
||||
# TODO: !!!
|
||||
# def test_result_comparator(item):
|
||||
# # sort by status then by check name
|
||||
# order = {
|
||||
# "FAIL": 0,
|
||||
# "SERVER_DIED": 1,
|
||||
# "Timeout": 2,
|
||||
# "NOT_FAILED": 3,
|
||||
# "BROKEN": 4,
|
||||
# "OK": 5,
|
||||
# "SKIPPED": 6,
|
||||
# }
|
||||
# return order.get(item[1], 10), str(item[0]), item[1]
|
||||
#
|
||||
# test_results.sort(key=test_result_comparator)
|
||||
|
||||
return Result.create_from(
|
||||
name=Environment.JOB_NAME,
|
||||
results=test_results,
|
||||
status=state,
|
||||
files=[self.tests_output_file],
|
||||
with_info_from_results=False,
|
||||
)
|
||||
|
||||
|
||||
# if __name__ == "__main__":
|
||||
# logging.basicConfig(level=logging.INFO, format="%(asctime)s %(message)s")
|
||||
# parser = argparse.ArgumentParser(
|
||||
# description="ClickHouse script for parsing results of functional tests"
|
||||
# )
|
||||
#
|
||||
# parser.add_argument("--out-results-file", default="/test_output/test_results.tsv")
|
||||
# parser.add_argument("--out-status-file", default="/test_output/check_status.tsv")
|
||||
# args = parser.parse_args()
|
||||
#
|
||||
# broken_tests = []
|
||||
# state, description, test_results = process_result(
|
||||
# args.in_results_dir,
|
||||
# broken_tests,
|
||||
# args.in_test_result_file,
|
||||
# args.in_results_file,
|
||||
# )
|
||||
# logging.info("Result parsed")
|
||||
# status = (state, description)
|
||||
#
|
||||
#
|
||||
#
|
||||
# write_results(args.out_results_file, args.out_status_file, test_results, status)
|
||||
# logging.info("Result written")
|
5
ci/praktika/__init__.py
Normal file
5
ci/praktika/__init__.py
Normal file
@ -0,0 +1,5 @@
|
||||
from .artifact import Artifact
|
||||
from .docker import Docker
|
||||
from .job import Job
|
||||
from .secret import Secret
|
||||
from .workflow import Workflow
|
94
ci/praktika/__main__.py
Normal file
94
ci/praktika/__main__.py
Normal file
@ -0,0 +1,94 @@
|
||||
import argparse
|
||||
import sys
|
||||
|
||||
from praktika.html_prepare import Html
|
||||
from praktika.utils import Utils
|
||||
from praktika.validator import Validator
|
||||
from praktika.yaml_generator import YamlGenerator
|
||||
|
||||
|
||||
def create_parser():
|
||||
parser = argparse.ArgumentParser(prog="python3 -m praktika")
|
||||
|
||||
subparsers = parser.add_subparsers(dest="command", help="Available subcommands")
|
||||
|
||||
run_parser = subparsers.add_parser("run", help="Job Runner")
|
||||
run_parser.add_argument("--job", help="Job Name", type=str, required=True)
|
||||
run_parser.add_argument(
|
||||
"--workflow",
|
||||
help="Workflow Name (required if job name is not uniq per config)",
|
||||
type=str,
|
||||
default="",
|
||||
)
|
||||
run_parser.add_argument(
|
||||
"--no-docker",
|
||||
help="Do not run job in docker even if job config says so, for local test",
|
||||
action="store_true",
|
||||
)
|
||||
run_parser.add_argument(
|
||||
"--docker",
|
||||
help="Custom docker image for job run, for local test",
|
||||
type=str,
|
||||
default="",
|
||||
)
|
||||
run_parser.add_argument(
|
||||
"--param",
|
||||
help="Custom parameter to pass into a job script, it's up to job script how to use it, for local test",
|
||||
type=str,
|
||||
default=None,
|
||||
)
|
||||
run_parser.add_argument(
|
||||
"--ci",
|
||||
help="When not set - dummy env will be generated, for local test",
|
||||
action="store_true",
|
||||
default="",
|
||||
)
|
||||
|
||||
_yaml_parser = subparsers.add_parser("yaml", help="Generates Yaml Workflows")
|
||||
|
||||
_html_parser = subparsers.add_parser("html", help="Uploads HTML page for reports")
|
||||
|
||||
return parser
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
parser = create_parser()
|
||||
args = parser.parse_args()
|
||||
|
||||
if args.command == "yaml":
|
||||
Validator().validate()
|
||||
YamlGenerator().generate()
|
||||
elif args.command == "html":
|
||||
Html.prepare()
|
||||
elif args.command == "run":
|
||||
from praktika.mangle import _get_workflows
|
||||
from praktika.runner import Runner
|
||||
|
||||
workflows = _get_workflows(name=args.workflow or None)
|
||||
job_workflow_pairs = []
|
||||
for workflow in workflows:
|
||||
job = workflow.find_job(args.job, lazy=True)
|
||||
if job:
|
||||
job_workflow_pairs.append((job, workflow))
|
||||
if not job_workflow_pairs:
|
||||
Utils.raise_with_error(
|
||||
f"Failed to find job [{args.job}] workflow [{args.workflow}]"
|
||||
)
|
||||
elif len(job_workflow_pairs) > 1:
|
||||
Utils.raise_with_error(
|
||||
f"More than one job [{args.job}] found - try specifying workflow name with --workflow"
|
||||
)
|
||||
else:
|
||||
job, workflow = job_workflow_pairs[0][0], job_workflow_pairs[0][1]
|
||||
print(f"Going to run job [{job.name}], workflow [{workflow.name}]")
|
||||
Runner().run(
|
||||
workflow=workflow,
|
||||
job=job,
|
||||
docker=args.docker,
|
||||
dummy_env=not args.ci,
|
||||
no_docker=args.no_docker,
|
||||
param=args.param,
|
||||
)
|
||||
else:
|
||||
parser.print_help()
|
||||
sys.exit(1)
|
198
ci/praktika/_environment.py
Normal file
198
ci/praktika/_environment.py
Normal file
@ -0,0 +1,198 @@
|
||||
import dataclasses
|
||||
import json
|
||||
import os
|
||||
from pathlib import Path
|
||||
from types import SimpleNamespace
|
||||
from typing import Any, Dict, List, Type
|
||||
|
||||
from praktika import Workflow
|
||||
from praktika._settings import _Settings
|
||||
from praktika.utils import MetaClasses, T
|
||||
|
||||
|
||||
@dataclasses.dataclass
|
||||
class _Environment(MetaClasses.Serializable):
|
||||
WORKFLOW_NAME: str
|
||||
JOB_NAME: str
|
||||
REPOSITORY: str
|
||||
BRANCH: str
|
||||
SHA: str
|
||||
PR_NUMBER: int
|
||||
EVENT_TYPE: str
|
||||
JOB_OUTPUT_STREAM: str
|
||||
EVENT_FILE_PATH: str
|
||||
CHANGE_URL: str
|
||||
COMMIT_URL: str
|
||||
BASE_BRANCH: str
|
||||
RUN_ID: str
|
||||
RUN_URL: str
|
||||
INSTANCE_TYPE: str
|
||||
INSTANCE_ID: str
|
||||
INSTANCE_LIFE_CYCLE: str
|
||||
LOCAL_RUN: bool = False
|
||||
PARAMETER: Any = None
|
||||
REPORT_INFO: List[str] = dataclasses.field(default_factory=list)
|
||||
name = "environment"
|
||||
|
||||
@classmethod
|
||||
def file_name_static(cls, _name=""):
|
||||
return f"{_Settings.TEMP_DIR}/{cls.name}.json"
|
||||
|
||||
@classmethod
|
||||
def from_dict(cls: Type[T], obj: Dict[str, Any]) -> T:
|
||||
JOB_OUTPUT_STREAM = os.getenv("GITHUB_OUTPUT", "")
|
||||
obj["JOB_OUTPUT_STREAM"] = JOB_OUTPUT_STREAM
|
||||
if "PARAMETER" in obj:
|
||||
obj["PARAMETER"] = _to_object(obj["PARAMETER"])
|
||||
return cls(**obj)
|
||||
|
||||
def add_info(self, info):
|
||||
self.REPORT_INFO.append(info)
|
||||
self.dump()
|
||||
|
||||
@classmethod
|
||||
def get(cls):
|
||||
if Path(cls.file_name_static()).is_file():
|
||||
return cls.from_fs("environment")
|
||||
else:
|
||||
print("WARNING: Environment: get from env")
|
||||
env = cls.from_env()
|
||||
env.dump()
|
||||
return env
|
||||
|
||||
def set_job_name(self, job_name):
|
||||
self.JOB_NAME = job_name
|
||||
self.dump()
|
||||
return self
|
||||
|
||||
@staticmethod
|
||||
def get_needs_statuses():
|
||||
if Path(_Settings.WORKFLOW_STATUS_FILE).is_file():
|
||||
with open(_Settings.WORKFLOW_STATUS_FILE, "r", encoding="utf8") as f:
|
||||
return json.load(f)
|
||||
else:
|
||||
print(
|
||||
f"ERROR: Status file [{_Settings.WORKFLOW_STATUS_FILE}] does not exist"
|
||||
)
|
||||
raise RuntimeError()
|
||||
|
||||
@classmethod
|
||||
def from_env(cls) -> "_Environment":
|
||||
WORKFLOW_NAME = os.getenv("GITHUB_WORKFLOW", "")
|
||||
JOB_NAME = os.getenv("JOB_NAME", "")
|
||||
REPOSITORY = os.getenv("GITHUB_REPOSITORY", "")
|
||||
BRANCH = os.getenv("GITHUB_HEAD_REF", "")
|
||||
|
||||
EVENT_FILE_PATH = os.getenv("GITHUB_EVENT_PATH", "")
|
||||
JOB_OUTPUT_STREAM = os.getenv("GITHUB_OUTPUT", "")
|
||||
RUN_ID = os.getenv("GITHUB_RUN_ID", "0")
|
||||
RUN_URL = f"https://github.com/{REPOSITORY}/actions/runs/{RUN_ID}"
|
||||
BASE_BRANCH = os.getenv("GITHUB_BASE_REF", "")
|
||||
|
||||
if EVENT_FILE_PATH:
|
||||
with open(EVENT_FILE_PATH, "r", encoding="utf-8") as f:
|
||||
github_event = json.load(f)
|
||||
if "pull_request" in github_event:
|
||||
EVENT_TYPE = Workflow.Event.PULL_REQUEST
|
||||
PR_NUMBER = github_event["pull_request"]["number"]
|
||||
SHA = github_event["pull_request"]["head"]["sha"]
|
||||
CHANGE_URL = github_event["pull_request"]["html_url"]
|
||||
COMMIT_URL = CHANGE_URL + f"/commits/{SHA}"
|
||||
elif "commits" in github_event:
|
||||
EVENT_TYPE = Workflow.Event.PUSH
|
||||
SHA = github_event["after"]
|
||||
CHANGE_URL = github_event["head_commit"]["url"] # commit url
|
||||
PR_NUMBER = 0
|
||||
COMMIT_URL = CHANGE_URL
|
||||
else:
|
||||
assert False, "TODO: not supported"
|
||||
else:
|
||||
print("WARNING: Local execution - dummy Environment will be generated")
|
||||
SHA = "TEST"
|
||||
PR_NUMBER = -1
|
||||
EVENT_TYPE = Workflow.Event.PUSH
|
||||
CHANGE_URL = ""
|
||||
COMMIT_URL = ""
|
||||
|
||||
INSTANCE_TYPE = (
|
||||
os.getenv("INSTANCE_TYPE", None)
|
||||
# or Shell.get_output("ec2metadata --instance-type")
|
||||
or ""
|
||||
)
|
||||
INSTANCE_ID = (
|
||||
os.getenv("INSTANCE_ID", None)
|
||||
# or Shell.get_output("ec2metadata --instance-id")
|
||||
or ""
|
||||
)
|
||||
INSTANCE_LIFE_CYCLE = (
|
||||
os.getenv("INSTANCE_LIFE_CYCLE", None)
|
||||
# or Shell.get_output(
|
||||
# "curl -s --fail http://169.254.169.254/latest/meta-data/instance-life-cycle"
|
||||
# )
|
||||
or ""
|
||||
)
|
||||
|
||||
return _Environment(
|
||||
WORKFLOW_NAME=WORKFLOW_NAME,
|
||||
JOB_NAME=JOB_NAME,
|
||||
REPOSITORY=REPOSITORY,
|
||||
BRANCH=BRANCH,
|
||||
EVENT_FILE_PATH=EVENT_FILE_PATH,
|
||||
JOB_OUTPUT_STREAM=JOB_OUTPUT_STREAM,
|
||||
SHA=SHA,
|
||||
EVENT_TYPE=EVENT_TYPE,
|
||||
PR_NUMBER=PR_NUMBER,
|
||||
RUN_ID=RUN_ID,
|
||||
CHANGE_URL=CHANGE_URL,
|
||||
COMMIT_URL=COMMIT_URL,
|
||||
RUN_URL=RUN_URL,
|
||||
BASE_BRANCH=BASE_BRANCH,
|
||||
INSTANCE_TYPE=INSTANCE_TYPE,
|
||||
INSTANCE_ID=INSTANCE_ID,
|
||||
INSTANCE_LIFE_CYCLE=INSTANCE_LIFE_CYCLE,
|
||||
REPORT_INFO=[],
|
||||
)
|
||||
|
||||
def get_s3_prefix(self, latest=False):
|
||||
return self.get_s3_prefix_static(self.PR_NUMBER, self.BRANCH, self.SHA, latest)
|
||||
|
||||
@classmethod
|
||||
def get_s3_prefix_static(cls, pr_number, branch, sha, latest=False):
|
||||
prefix = ""
|
||||
if pr_number > 0:
|
||||
prefix += f"{pr_number}"
|
||||
else:
|
||||
prefix += f"{branch}"
|
||||
if latest:
|
||||
prefix += f"/latest"
|
||||
elif sha:
|
||||
prefix += f"/{sha}"
|
||||
return prefix
|
||||
|
||||
# TODO: find a better place for the function. This file should not import praktika.settings
|
||||
# as it's requires reading users config, that's why imports nested inside the function
|
||||
def get_report_url(self):
|
||||
import urllib
|
||||
|
||||
from praktika.settings import Settings
|
||||
from praktika.utils import Utils
|
||||
|
||||
path = Settings.HTML_S3_PATH
|
||||
for bucket, endpoint in Settings.S3_BUCKET_TO_HTTP_ENDPOINT.items():
|
||||
if bucket in path:
|
||||
path = path.replace(bucket, endpoint)
|
||||
break
|
||||
REPORT_URL = f"https://{path}/{Path(Settings.HTML_PAGE_FILE).name}?PR={self.PR_NUMBER}&sha={self.SHA}&name_0={urllib.parse.quote(self.WORKFLOW_NAME, safe='')}&name_1={urllib.parse.quote(self.JOB_NAME, safe='')}"
|
||||
return REPORT_URL
|
||||
|
||||
def is_local_run(self):
|
||||
return self.LOCAL_RUN
|
||||
|
||||
|
||||
def _to_object(data):
|
||||
if isinstance(data, dict):
|
||||
return SimpleNamespace(**{k: _to_object(v) for k, v in data.items()})
|
||||
elif isinstance(data, list):
|
||||
return [_to_object(i) for i in data]
|
||||
else:
|
||||
return data
|
124
ci/praktika/_settings.py
Normal file
124
ci/praktika/_settings.py
Normal file
@ -0,0 +1,124 @@
|
||||
import dataclasses
|
||||
from pathlib import Path
|
||||
from typing import Dict, Iterable, List, Optional
|
||||
|
||||
|
||||
@dataclasses.dataclass
|
||||
class _Settings:
|
||||
######################################
|
||||
# Pipeline generation settings #
|
||||
######################################
|
||||
CI_PATH = "./ci"
|
||||
WORKFLOW_PATH_PREFIX: str = "./.github/workflows"
|
||||
WORKFLOWS_DIRECTORY: str = f"{CI_PATH}/workflows"
|
||||
SETTINGS_DIRECTORY: str = f"{CI_PATH}/settings"
|
||||
CI_CONFIG_JOB_NAME = "Config Workflow"
|
||||
DOCKER_BUILD_JOB_NAME = "Docker Builds"
|
||||
FINISH_WORKFLOW_JOB_NAME = "Finish Workflow"
|
||||
READY_FOR_MERGE_STATUS_NAME = "Ready for Merge"
|
||||
CI_CONFIG_RUNS_ON: Optional[List[str]] = None
|
||||
DOCKER_BUILD_RUNS_ON: Optional[List[str]] = None
|
||||
VALIDATE_FILE_PATHS: bool = True
|
||||
|
||||
######################################
|
||||
# Runtime Settings #
|
||||
######################################
|
||||
MAX_RETRIES_S3 = 3
|
||||
MAX_RETRIES_GH = 3
|
||||
|
||||
######################################
|
||||
# S3 (artifact storage) settings #
|
||||
######################################
|
||||
S3_ARTIFACT_PATH: str = ""
|
||||
|
||||
######################################
|
||||
# CI workspace settings #
|
||||
######################################
|
||||
TEMP_DIR: str = "/tmp/praktika"
|
||||
OUTPUT_DIR: str = f"{TEMP_DIR}/output"
|
||||
INPUT_DIR: str = f"{TEMP_DIR}/input"
|
||||
PYTHON_INTERPRETER: str = "python3"
|
||||
PYTHON_PACKET_MANAGER: str = "pip3"
|
||||
PYTHON_VERSION: str = "3.9"
|
||||
INSTALL_PYTHON_FOR_NATIVE_JOBS: bool = False
|
||||
INSTALL_PYTHON_REQS_FOR_NATIVE_JOBS: str = "./ci/requirements.txt"
|
||||
ENVIRONMENT_VAR_FILE: str = f"{TEMP_DIR}/environment.json"
|
||||
RUN_LOG: str = f"{TEMP_DIR}/praktika_run.log"
|
||||
|
||||
SECRET_GH_APP_ID: str = "GH_APP_ID"
|
||||
SECRET_GH_APP_PEM_KEY: str = "GH_APP_PEM_KEY"
|
||||
|
||||
ENV_SETUP_SCRIPT: str = "/tmp/praktika_setup_env.sh"
|
||||
WORKFLOW_STATUS_FILE: str = f"{TEMP_DIR}/workflow_status.json"
|
||||
|
||||
######################################
|
||||
# CI Cache settings #
|
||||
######################################
|
||||
CACHE_VERSION: int = 1
|
||||
CACHE_DIGEST_LEN: int = 20
|
||||
CACHE_S3_PATH: str = ""
|
||||
CACHE_LOCAL_PATH: str = f"{TEMP_DIR}/ci_cache"
|
||||
|
||||
######################################
|
||||
# Report settings #
|
||||
######################################
|
||||
HTML_S3_PATH: str = ""
|
||||
HTML_PAGE_FILE: str = "./praktika/json.html"
|
||||
TEXT_CONTENT_EXTENSIONS: Iterable[str] = frozenset([".txt", ".log"])
|
||||
S3_BUCKET_TO_HTTP_ENDPOINT: Optional[Dict[str, str]] = None
|
||||
|
||||
DOCKERHUB_USERNAME: str = ""
|
||||
DOCKERHUB_SECRET: str = ""
|
||||
DOCKER_WD: str = "/wd"
|
||||
|
||||
######################################
|
||||
# CI DB Settings #
|
||||
######################################
|
||||
SECRET_CI_DB_URL: str = "CI_DB_URL"
|
||||
SECRET_CI_DB_PASSWORD: str = "CI_DB_PASSWORD"
|
||||
CI_DB_DB_NAME = ""
|
||||
CI_DB_TABLE_NAME = ""
|
||||
CI_DB_INSERT_TIMEOUT_SEC = 5
|
||||
|
||||
|
||||
_USER_DEFINED_SETTINGS = [
|
||||
"S3_ARTIFACT_PATH",
|
||||
"CACHE_S3_PATH",
|
||||
"HTML_S3_PATH",
|
||||
"S3_BUCKET_TO_HTTP_ENDPOINT",
|
||||
"TEXT_CONTENT_EXTENSIONS",
|
||||
"TEMP_DIR",
|
||||
"OUTPUT_DIR",
|
||||
"INPUT_DIR",
|
||||
"CI_CONFIG_RUNS_ON",
|
||||
"DOCKER_BUILD_RUNS_ON",
|
||||
"CI_CONFIG_JOB_NAME",
|
||||
"PYTHON_INTERPRETER",
|
||||
"PYTHON_VERSION",
|
||||
"PYTHON_PACKET_MANAGER",
|
||||
"INSTALL_PYTHON_FOR_NATIVE_JOBS",
|
||||
"INSTALL_PYTHON_REQS_FOR_NATIVE_JOBS",
|
||||
"MAX_RETRIES_S3",
|
||||
"MAX_RETRIES_GH",
|
||||
"VALIDATE_FILE_PATHS",
|
||||
"DOCKERHUB_USERNAME",
|
||||
"DOCKERHUB_SECRET",
|
||||
"READY_FOR_MERGE_STATUS_NAME",
|
||||
"SECRET_CI_DB_URL",
|
||||
"SECRET_CI_DB_PASSWORD",
|
||||
"CI_DB_DB_NAME",
|
||||
"CI_DB_TABLE_NAME",
|
||||
"CI_DB_INSERT_TIMEOUT_SEC",
|
||||
"SECRET_GH_APP_PEM_KEY",
|
||||
"SECRET_GH_APP_ID",
|
||||
]
|
||||
|
||||
|
||||
class GHRunners:
|
||||
ubuntu = "ubuntu-latest"
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
for setting in _USER_DEFINED_SETTINGS:
|
||||
print(_Settings().__getattribute__(setting))
|
||||
# print(dataclasses.asdict(_Settings()))
|
33
ci/praktika/artifact.py
Normal file
33
ci/praktika/artifact.py
Normal file
@ -0,0 +1,33 @@
|
||||
from dataclasses import dataclass
|
||||
|
||||
|
||||
class Artifact:
|
||||
class Type:
|
||||
GH = "github"
|
||||
S3 = "s3"
|
||||
PHONY = "phony"
|
||||
|
||||
@dataclass
|
||||
class Config:
|
||||
"""
|
||||
name - artifact name
|
||||
type - artifact type, see Artifact.Type
|
||||
path - file path or glob, e.g. "path/**/[abc]rtifac?/*"
|
||||
"""
|
||||
|
||||
name: str
|
||||
type: str
|
||||
path: str
|
||||
_provided_by: str = ""
|
||||
_s3_path: str = ""
|
||||
|
||||
def is_s3_artifact(self):
|
||||
return self.type == Artifact.Type.S3
|
||||
|
||||
@classmethod
|
||||
def define_artifact(cls, name, type, path):
|
||||
return cls.Config(name=name, type=type, path=path)
|
||||
|
||||
@classmethod
|
||||
def define_gh_artifact(cls, name, path):
|
||||
return cls.define_artifact(name=name, type=cls.Type.GH, path=path)
|
127
ci/praktika/cache.py
Normal file
127
ci/praktika/cache.py
Normal file
@ -0,0 +1,127 @@
|
||||
import dataclasses
|
||||
import json
|
||||
from pathlib import Path
|
||||
|
||||
from praktika import Artifact, Job, Workflow
|
||||
from praktika._environment import _Environment
|
||||
from praktika.digest import Digest
|
||||
from praktika.s3 import S3
|
||||
from praktika.settings import Settings
|
||||
from praktika.utils import Utils
|
||||
|
||||
|
||||
class Cache:
|
||||
@dataclasses.dataclass
|
||||
class CacheRecord:
|
||||
class Type:
|
||||
SUCCESS = "success"
|
||||
|
||||
type: str
|
||||
sha: str
|
||||
pr_number: int
|
||||
branch: str
|
||||
|
||||
def dump(self, path):
|
||||
with open(path, "w", encoding="utf8") as f:
|
||||
json.dump(dataclasses.asdict(self), f)
|
||||
|
||||
@classmethod
|
||||
def from_fs(cls, path):
|
||||
with open(path, "r", encoding="utf8") as f:
|
||||
return Cache.CacheRecord(**json.load(f))
|
||||
|
||||
@classmethod
|
||||
def from_dict(cls, obj):
|
||||
return Cache.CacheRecord(**obj)
|
||||
|
||||
def __init__(self):
|
||||
self.digest = Digest()
|
||||
self.success = {} # type Dict[str, Any]
|
||||
|
||||
@classmethod
|
||||
def push_success_record(cls, job_name, job_digest, sha):
|
||||
type_ = Cache.CacheRecord.Type.SUCCESS
|
||||
record = Cache.CacheRecord(
|
||||
type=type_,
|
||||
sha=sha,
|
||||
pr_number=_Environment.get().PR_NUMBER,
|
||||
branch=_Environment.get().BRANCH,
|
||||
)
|
||||
assert (
|
||||
Settings.CACHE_S3_PATH
|
||||
), f"Setting CACHE_S3_PATH must be defined with enabled CI Cache"
|
||||
record_path = f"{Settings.CACHE_S3_PATH}/v{Settings.CACHE_VERSION}/{Utils.normalize_string(job_name)}/{job_digest}"
|
||||
record_file = Path(Settings.TEMP_DIR) / type_
|
||||
record.dump(record_file)
|
||||
S3.copy_file_to_s3(s3_path=record_path, local_path=record_file)
|
||||
record_file.unlink()
|
||||
|
||||
def fetch_success(self, job_name, job_digest):
|
||||
type_ = Cache.CacheRecord.Type.SUCCESS
|
||||
assert (
|
||||
Settings.CACHE_S3_PATH
|
||||
), f"Setting CACHE_S3_PATH must be defined with enabled CI Cache"
|
||||
record_path = f"{Settings.CACHE_S3_PATH}/v{Settings.CACHE_VERSION}/{Utils.normalize_string(job_name)}/{job_digest}/{type_}"
|
||||
record_file_local_dir = (
|
||||
f"{Settings.CACHE_LOCAL_PATH}/{Utils.normalize_string(job_name)}/"
|
||||
)
|
||||
Path(record_file_local_dir).mkdir(parents=True, exist_ok=True)
|
||||
|
||||
if S3.head_object(record_path):
|
||||
res = S3.copy_file_from_s3(
|
||||
s3_path=record_path, local_path=record_file_local_dir
|
||||
)
|
||||
else:
|
||||
res = None
|
||||
|
||||
if res:
|
||||
print(f"Cache record found, job [{job_name}], digest [{job_digest}]")
|
||||
self.success[job_name] = True
|
||||
return Cache.CacheRecord.from_fs(Path(record_file_local_dir) / type_)
|
||||
return None
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
# test
|
||||
c = Cache()
|
||||
workflow = Workflow.Config(
|
||||
name="TEST",
|
||||
event=Workflow.Event.PULL_REQUEST,
|
||||
jobs=[
|
||||
Job.Config(
|
||||
name="JobA",
|
||||
runs_on=["some"],
|
||||
command="python -m unittest ./ci/tests/example_1/test_example_produce_artifact.py",
|
||||
provides=["greet"],
|
||||
job_requirements=Job.Requirements(
|
||||
python_requirements_txt="./ci/requirements.txt"
|
||||
),
|
||||
digest_config=Job.CacheDigestConfig(
|
||||
# example: use glob to include files
|
||||
include_paths=["./ci/tests/example_1/test_example_consume*.py"],
|
||||
),
|
||||
),
|
||||
Job.Config(
|
||||
name="JobB",
|
||||
runs_on=["some"],
|
||||
command="python -m unittest ./ci/tests/example_1/test_example_consume_artifact.py",
|
||||
requires=["greet"],
|
||||
job_requirements=Job.Requirements(
|
||||
python_requirements_txt="./ci/requirements.txt"
|
||||
),
|
||||
digest_config=Job.CacheDigestConfig(
|
||||
# example: use dir to include files recursively
|
||||
include_paths=["./ci/tests/example_1"],
|
||||
# example: use glob to exclude files from digest
|
||||
exclude_paths=[
|
||||
"./ci/tests/example_1/test_example_consume*",
|
||||
"./**/*.pyc",
|
||||
],
|
||||
),
|
||||
),
|
||||
],
|
||||
artifacts=[Artifact.Config(type="s3", name="greet", path="hello")],
|
||||
enable_cache=True,
|
||||
)
|
||||
for job in workflow.jobs:
|
||||
print(c.digest.calc_job_digest(job))
|
136
ci/praktika/cidb.py
Normal file
136
ci/praktika/cidb.py
Normal file
@ -0,0 +1,136 @@
|
||||
import copy
|
||||
import dataclasses
|
||||
import json
|
||||
from typing import Optional
|
||||
|
||||
import requests
|
||||
from praktika._environment import _Environment
|
||||
from praktika.result import Result
|
||||
from praktika.settings import Settings
|
||||
from praktika.utils import Utils
|
||||
|
||||
|
||||
class CIDB:
|
||||
@dataclasses.dataclass
|
||||
class TableRecord:
|
||||
pull_request_number: int
|
||||
commit_sha: str
|
||||
commit_url: str
|
||||
check_name: str
|
||||
check_status: str
|
||||
check_duration_ms: int
|
||||
check_start_time: int
|
||||
report_url: str
|
||||
pull_request_url: str
|
||||
base_ref: str
|
||||
base_repo: str
|
||||
head_ref: str
|
||||
head_repo: str
|
||||
task_url: str
|
||||
instance_type: str
|
||||
instance_id: str
|
||||
test_name: str
|
||||
test_status: str
|
||||
test_duration_ms: Optional[int]
|
||||
test_context_raw: str
|
||||
|
||||
def __init__(self, url, passwd):
|
||||
self.url = url
|
||||
self.auth = {
|
||||
"X-ClickHouse-User": "default",
|
||||
"X-ClickHouse-Key": passwd,
|
||||
}
|
||||
|
||||
@classmethod
|
||||
def json_data_generator(cls, result: Result):
|
||||
env = _Environment.get()
|
||||
base_record = cls.TableRecord(
|
||||
pull_request_number=env.PR_NUMBER,
|
||||
commit_sha=env.SHA,
|
||||
commit_url=env.COMMIT_URL,
|
||||
check_name=result.name,
|
||||
check_status=result.status,
|
||||
check_duration_ms=int(result.duration * 1000),
|
||||
check_start_time=Utils.timestamp_to_str(result.start_time),
|
||||
report_url=env.get_report_url(),
|
||||
pull_request_url=env.CHANGE_URL,
|
||||
base_ref=env.BASE_BRANCH,
|
||||
base_repo=env.REPOSITORY,
|
||||
head_ref=env.BRANCH,
|
||||
# TODO: remove from table?
|
||||
head_repo=env.REPOSITORY,
|
||||
# TODO: remove from table?
|
||||
task_url="",
|
||||
instance_type=",".join([env.INSTANCE_TYPE, env.INSTANCE_LIFE_CYCLE]),
|
||||
instance_id=env.INSTANCE_ID,
|
||||
test_name="",
|
||||
test_status="",
|
||||
test_duration_ms=None,
|
||||
test_context_raw=result.info,
|
||||
)
|
||||
yield json.dumps(dataclasses.asdict(base_record))
|
||||
for result_ in result.results:
|
||||
record = copy.deepcopy(base_record)
|
||||
record.test_name = result_.name
|
||||
if result_.start_time:
|
||||
record.check_start_time = (Utils.timestamp_to_str(result.start_time),)
|
||||
record.test_status = result_.status
|
||||
record.test_duration_ms = int(result_.duration * 1000)
|
||||
record.test_context_raw = result_.info
|
||||
yield json.dumps(dataclasses.asdict(record))
|
||||
|
||||
def insert(self, result: Result):
|
||||
# Create a session object
|
||||
params = {
|
||||
"database": Settings.CI_DB_DB_NAME,
|
||||
"query": f"INSERT INTO {Settings.CI_DB_TABLE_NAME} FORMAT JSONEachRow",
|
||||
"date_time_input_format": "best_effort",
|
||||
"send_logs_level": "warning",
|
||||
}
|
||||
|
||||
session = requests.Session()
|
||||
|
||||
for json_str in self.json_data_generator(result):
|
||||
try:
|
||||
response1 = session.post(
|
||||
url=self.url,
|
||||
params=params,
|
||||
data=json_str,
|
||||
headers=self.auth,
|
||||
timeout=Settings.CI_DB_INSERT_TIMEOUT_SEC,
|
||||
)
|
||||
except Exception as ex:
|
||||
raise ex
|
||||
|
||||
session.close()
|
||||
|
||||
def check(self):
|
||||
# Create a session object
|
||||
params = {
|
||||
"database": Settings.CI_DB_DB_NAME,
|
||||
"query": f"SELECT 1",
|
||||
}
|
||||
try:
|
||||
response = requests.post(
|
||||
url=self.url,
|
||||
params=params,
|
||||
data="",
|
||||
headers=self.auth,
|
||||
timeout=Settings.CI_DB_INSERT_TIMEOUT_SEC,
|
||||
)
|
||||
if not response.ok:
|
||||
print("ERROR: No connection to CI DB")
|
||||
return (
|
||||
False,
|
||||
f"ERROR: No connection to CI DB [{response.status_code}/{response.reason}]",
|
||||
)
|
||||
if not response.json() == 1:
|
||||
print("ERROR: CI DB smoke test failed select 1 == 1")
|
||||
return (
|
||||
False,
|
||||
f"ERROR: CI DB smoke test failed [select 1 ==> {response.json()}]",
|
||||
)
|
||||
except Exception as ex:
|
||||
print(f"ERROR: Exception [{ex}]")
|
||||
return False, "CIDB: ERROR: Exception [{ex}]"
|
||||
return True, ""
|
112
ci/praktika/digest.py
Normal file
112
ci/praktika/digest.py
Normal file
@ -0,0 +1,112 @@
|
||||
import dataclasses
|
||||
import hashlib
|
||||
import os
|
||||
from hashlib import md5
|
||||
from pathlib import Path
|
||||
from typing import List
|
||||
|
||||
from praktika import Job
|
||||
from praktika.docker import Docker
|
||||
from praktika.settings import Settings
|
||||
from praktika.utils import Utils
|
||||
|
||||
|
||||
class Digest:
|
||||
def __init__(self):
|
||||
self.digest_cache = {}
|
||||
|
||||
@staticmethod
|
||||
def _hash_digest_config(digest_config: Job.CacheDigestConfig) -> str:
|
||||
data_dict = dataclasses.asdict(digest_config)
|
||||
hash_obj = md5()
|
||||
hash_obj.update(str(data_dict).encode())
|
||||
hash_string = hash_obj.hexdigest()
|
||||
return hash_string
|
||||
|
||||
def calc_job_digest(self, job_config: Job.Config):
|
||||
config = job_config.digest_config
|
||||
if not config:
|
||||
return "f" * Settings.CACHE_DIGEST_LEN
|
||||
|
||||
cache_key = self._hash_digest_config(config)
|
||||
|
||||
if cache_key in self.digest_cache:
|
||||
return self.digest_cache[cache_key]
|
||||
|
||||
included_files = Utils.traverse_paths(
|
||||
job_config.digest_config.include_paths,
|
||||
job_config.digest_config.exclude_paths,
|
||||
sorted=True,
|
||||
)
|
||||
|
||||
print(
|
||||
f"calc digest for job [{job_config.name}]: hash_key [{cache_key}], include [{len(included_files)}] files"
|
||||
)
|
||||
# Sort files to ensure consistent hash calculation
|
||||
included_files.sort()
|
||||
|
||||
# Calculate MD5 hash
|
||||
res = ""
|
||||
if not included_files:
|
||||
res = "f" * Settings.CACHE_DIGEST_LEN
|
||||
print(f"NOTE: empty digest config [{config}] - return dummy digest")
|
||||
else:
|
||||
hash_md5 = hashlib.md5()
|
||||
for file_path in included_files:
|
||||
res = self._calc_file_digest(file_path, hash_md5)
|
||||
assert res
|
||||
self.digest_cache[cache_key] = res
|
||||
return res
|
||||
|
||||
def calc_docker_digest(
|
||||
self,
|
||||
docker_config: Docker.Config,
|
||||
dependency_configs: List[Docker.Config],
|
||||
hash_md5=None,
|
||||
):
|
||||
"""
|
||||
|
||||
:param hash_md5:
|
||||
:param dependency_configs: list of Docker.Config(s) that :param docker_config: depends on
|
||||
:param docker_config: Docker.Config to calculate digest for
|
||||
:return:
|
||||
"""
|
||||
print(f"Calculate digest for docker [{docker_config.name}]")
|
||||
paths = Utils.traverse_path(docker_config.path, sorted=True)
|
||||
if not hash_md5:
|
||||
hash_md5 = hashlib.md5()
|
||||
|
||||
dependencies = []
|
||||
for dependency_name in docker_config.depends_on:
|
||||
for dependency_config in dependency_configs:
|
||||
if dependency_config.name == dependency_name:
|
||||
print(
|
||||
f"Add docker [{dependency_config.name}] as dependency for docker [{docker_config.name}] digest calculation"
|
||||
)
|
||||
dependencies.append(dependency_config)
|
||||
|
||||
for dependency in dependencies:
|
||||
_ = self.calc_docker_digest(dependency, dependency_configs, hash_md5)
|
||||
|
||||
for path in paths:
|
||||
_ = self._calc_file_digest(path, hash_md5=hash_md5)
|
||||
|
||||
return hash_md5.hexdigest()[: Settings.CACHE_DIGEST_LEN]
|
||||
|
||||
@staticmethod
|
||||
def _calc_file_digest(file_path, hash_md5):
|
||||
# Resolve file path if it's a symbolic link
|
||||
resolved_path = file_path
|
||||
if Path(file_path).is_symlink():
|
||||
resolved_path = os.path.realpath(file_path)
|
||||
if not Path(resolved_path).is_file():
|
||||
print(
|
||||
f"WARNING: No valid file resolved by link {file_path} -> {resolved_path} - skipping digest calculation"
|
||||
)
|
||||
return hash_md5.hexdigest()[: Settings.CACHE_DIGEST_LEN]
|
||||
|
||||
with open(resolved_path, "rb") as f:
|
||||
for chunk in iter(lambda: f.read(4096), b""):
|
||||
hash_md5.update(chunk)
|
||||
|
||||
return hash_md5.hexdigest()[: Settings.CACHE_DIGEST_LEN]
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user